1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
32 #define INITIAL_N_BUCKETS 31
34 struct hashmap_entry {
37 struct hashmap_entry *bucket_next, *bucket_previous;
38 struct hashmap_entry *iterate_next, *iterate_previous;
42 hash_func_t hash_func;
43 compare_func_t compare_func;
45 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
47 struct hashmap_entry ** buckets;
48 unsigned n_buckets, n_entries;
50 uint8_t hash_key[HASH_KEY_SIZE];
60 static struct pool *first_hashmap_pool = NULL;
61 static void *first_hashmap_tile = NULL;
63 static struct pool *first_entry_pool = NULL;
64 static void *first_entry_tile = NULL;
66 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
69 /* When a tile is released we add it to the list and simply
70 * place the next pointer at its offset 0. */
72 assert(tile_size >= sizeof(void*));
79 *first_tile = * (void**) (*first_tile);
83 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
88 n = *first_pool ? (*first_pool)->n_tiles : 0;
89 n = MAX(at_least, n * 2);
90 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
91 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
97 p->next = *first_pool;
104 i = (*first_pool)->n_used++;
106 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
109 static void deallocate_tile(void **first_tile, void *p) {
110 * (void**) p = *first_tile;
116 static void drop_pool(struct pool *p) {
125 __attribute__((destructor)) static void cleanup_pool(void) {
126 /* Be nice to valgrind */
128 drop_pool(first_hashmap_pool);
129 drop_pool(first_entry_pool);
134 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
136 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
137 return (unsigned long) u;
140 int string_compare_func(const void *a, const void *b) {
144 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
146 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
147 return (unsigned long) u;
150 int trivial_compare_func(const void *a, const void *b) {
151 return a < b ? -1 : (a > b ? 1 : 0);
154 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
156 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
157 return (unsigned long) u;
160 int uint64_compare_func(const void *_a, const void *_b) {
162 a = *(const uint64_t*) _a;
163 b = *(const uint64_t*) _b;
164 return a < b ? -1 : (a > b ? 1 : 0);
167 static unsigned bucket_hash(Hashmap *h, const void *p) {
168 return (unsigned) (h->hash_func(p, h->hash_key) % h->n_buckets);
171 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
172 static uint8_t current[HASH_KEY_SIZE];
173 static bool current_initialized = false;
175 /* Returns a hash function key to use. In order to keep things
176 * fast we will not generate a new key each time we allocate a
177 * new hash table. Instead, we'll just reuse the most recently
178 * generated one, except if we never generated one or when we
179 * are rehashing an entire hash table because we reached a
182 if (!current_initialized || !reuse_is_ok) {
183 random_bytes(current, sizeof(current));
184 current_initialized = true;
187 memcpy(hash_key, current, sizeof(current));
190 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
195 b = is_main_thread();
197 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
200 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
212 h->hash_func = hash_func ? hash_func : trivial_hash_func;
213 h->compare_func = compare_func ? compare_func : trivial_compare_func;
215 h->n_buckets = INITIAL_N_BUCKETS;
217 h->iterate_list_head = h->iterate_list_tail = NULL;
219 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
223 get_hash_key(h->hash_key, true);
228 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
236 q = hashmap_new(hash_func, compare_func);
244 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
248 /* Insert into hash table */
249 e->bucket_next = h->buckets[hash];
250 e->bucket_previous = NULL;
251 if (h->buckets[hash])
252 h->buckets[hash]->bucket_previous = e;
253 h->buckets[hash] = e;
255 /* Insert into iteration list */
256 e->iterate_previous = h->iterate_list_tail;
257 e->iterate_next = NULL;
258 if (h->iterate_list_tail) {
259 assert(h->iterate_list_head);
260 h->iterate_list_tail->iterate_next = e;
262 assert(!h->iterate_list_head);
263 h->iterate_list_head = e;
265 h->iterate_list_tail = e;
268 assert(h->n_entries >= 1);
271 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
275 /* Remove from iteration list */
277 e->iterate_next->iterate_previous = e->iterate_previous;
279 h->iterate_list_tail = e->iterate_previous;
281 if (e->iterate_previous)
282 e->iterate_previous->iterate_next = e->iterate_next;
284 h->iterate_list_head = e->iterate_next;
286 /* Remove from hash table bucket list */
288 e->bucket_next->bucket_previous = e->bucket_previous;
290 if (e->bucket_previous)
291 e->bucket_previous->bucket_next = e->bucket_next;
293 h->buckets[hash] = e->bucket_next;
295 assert(h->n_entries >= 1);
299 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
305 hash = bucket_hash(h, e->key);
306 unlink_entry(h, e, hash);
309 deallocate_tile(&first_entry_tile, e);
314 void hashmap_free(Hashmap*h) {
316 /* Free the hashmap, but nothing in it */
323 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
327 deallocate_tile(&first_hashmap_tile, h);
332 void hashmap_free_free(Hashmap *h) {
334 /* Free the hashmap and all data objects in it, but not the
340 hashmap_clear_free(h);
344 void hashmap_free_free_free(Hashmap *h) {
346 /* Free the hashmap and all data and key objects in it */
351 hashmap_clear_free_free(h);
355 void hashmap_clear(Hashmap *h) {
359 while (h->iterate_list_head)
360 remove_entry(h, h->iterate_list_head);
363 void hashmap_clear_free(Hashmap *h) {
369 while ((p = hashmap_steal_first(h)))
373 void hashmap_clear_free_free(Hashmap *h) {
377 while (h->iterate_list_head) {
380 a = h->iterate_list_head->value;
381 b = (void*) h->iterate_list_head->key;
382 remove_entry(h, h->iterate_list_head);
388 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
389 struct hashmap_entry *e;
391 assert(hash < h->n_buckets);
393 for (e = h->buckets[hash]; e; e = e->bucket_next)
394 if (h->compare_func(e->key, key) == 0)
400 static bool resize_buckets(Hashmap *h) {
401 struct hashmap_entry **n, *i;
403 uint8_t nkey[HASH_KEY_SIZE];
407 if (_likely_(h->n_entries*4 < h->n_buckets*3))
410 /* Increase by four */
411 m = (h->n_entries+1)*4-1;
413 /* If we hit OOM we simply risk packed hashmaps... */
414 n = new0(struct hashmap_entry*, m);
418 /* Let's use a different randomized hash key for the
419 * extension, so that people cannot guess what we are using
421 get_hash_key(nkey, false);
423 for (i = h->iterate_list_head; i; i = i->iterate_next) {
424 unsigned long old_bucket, new_bucket;
426 old_bucket = h->hash_func(i->key, h->hash_key) % h->n_buckets;
428 /* First, drop from old bucket table */
430 i->bucket_next->bucket_previous = i->bucket_previous;
432 if (i->bucket_previous)
433 i->bucket_previous->bucket_next = i->bucket_next;
435 h->buckets[old_bucket] = i->bucket_next;
437 /* Then, add to new backet table */
438 new_bucket = h->hash_func(i->key, nkey) % m;
440 i->bucket_next = n[new_bucket];
441 i->bucket_previous = NULL;
443 n[new_bucket]->bucket_previous = i;
447 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
453 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
458 int hashmap_put(Hashmap *h, const void *key, void *value) {
459 struct hashmap_entry *e;
464 hash = bucket_hash(h, key);
465 e = hash_scan(h, hash, key);
467 if (e->value == value)
472 if (resize_buckets(h))
473 hash = bucket_hash(h, key);
476 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
478 e = new(struct hashmap_entry, 1);
486 link_entry(h, e, hash);
491 int hashmap_replace(Hashmap *h, const void *key, void *value) {
492 struct hashmap_entry *e;
497 hash = bucket_hash(h, key);
498 e = hash_scan(h, hash, key);
505 return hashmap_put(h, key, value);
508 int hashmap_update(Hashmap *h, const void *key, void *value) {
509 struct hashmap_entry *e;
514 hash = bucket_hash(h, key);
515 e = hash_scan(h, hash, key);
523 void* hashmap_get(Hashmap *h, const void *key) {
525 struct hashmap_entry *e;
530 hash = bucket_hash(h, key);
531 e = hash_scan(h, hash, key);
538 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
540 struct hashmap_entry *e;
545 hash = bucket_hash(h, key);
546 e = hash_scan(h, hash, key);
551 *key2 = (void*) e->key;
556 bool hashmap_contains(Hashmap *h, const void *key) {
562 hash = bucket_hash(h, key);
563 return !!hash_scan(h, hash, key);
566 void* hashmap_remove(Hashmap *h, const void *key) {
567 struct hashmap_entry *e;
574 hash = bucket_hash(h, key);
575 e = hash_scan(h, hash, key);
585 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
586 struct hashmap_entry *e;
596 hash = bucket_hash(h, key);
597 e = hash_scan(h, hash, key);
606 *rkey = (void*) e->key;
613 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
614 struct hashmap_entry *e;
615 unsigned old_hash, new_hash;
620 old_hash = bucket_hash(h, old_key);
621 e = hash_scan(h, old_hash, old_key);
625 new_hash = bucket_hash(h, new_key);
626 if (hash_scan(h, new_hash, new_key))
629 unlink_entry(h, e, old_hash);
634 link_entry(h, e, new_hash);
639 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
640 struct hashmap_entry *e, *k;
641 unsigned old_hash, new_hash;
646 old_hash = bucket_hash(h, old_key);
647 e = hash_scan(h, old_hash, old_key);
651 new_hash = bucket_hash(h, new_key);
652 k = hash_scan(h, new_hash, new_key);
657 unlink_entry(h, e, old_hash);
662 link_entry(h, e, new_hash);
667 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
668 struct hashmap_entry *e;
674 hash = bucket_hash(h, key);
676 e = hash_scan(h, hash, key);
680 if (e->value != value)
688 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
689 struct hashmap_entry *e;
696 if (*i == ITERATOR_LAST)
699 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
702 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
705 *i = (Iterator) e->iterate_next;
723 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
724 struct hashmap_entry *e;
731 if (*i == ITERATOR_FIRST)
734 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
737 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
739 if (e->iterate_previous)
740 *i = (Iterator) e->iterate_previous;
758 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
760 struct hashmap_entry *e;
765 hash = bucket_hash(h, key);
767 e = hash_scan(h, hash, key);
776 void* hashmap_first(Hashmap *h) {
781 if (!h->iterate_list_head)
784 return h->iterate_list_head->value;
787 void* hashmap_first_key(Hashmap *h) {
792 if (!h->iterate_list_head)
795 return (void*) h->iterate_list_head->key;
798 void* hashmap_last(Hashmap *h) {
803 if (!h->iterate_list_tail)
806 return h->iterate_list_tail->value;
809 void* hashmap_steal_first(Hashmap *h) {
815 if (!h->iterate_list_head)
818 data = h->iterate_list_head->value;
819 remove_entry(h, h->iterate_list_head);
824 void* hashmap_steal_first_key(Hashmap *h) {
830 if (!h->iterate_list_head)
833 key = (void*) h->iterate_list_head->key;
834 remove_entry(h, h->iterate_list_head);
839 unsigned hashmap_size(Hashmap *h) {
847 unsigned hashmap_buckets(Hashmap *h) {
855 bool hashmap_isempty(Hashmap *h) {
860 return h->n_entries == 0;
863 int hashmap_merge(Hashmap *h, Hashmap *other) {
864 struct hashmap_entry *e;
871 for (e = other->iterate_list_head; e; e = e->iterate_next) {
874 r = hashmap_put(h, e->key, e->value);
875 if (r < 0 && r != -EEXIST)
882 void hashmap_move(Hashmap *h, Hashmap *other) {
883 struct hashmap_entry *e, *n;
887 /* The same as hashmap_merge(), but every new item from other
888 * is moved to h. This function is guaranteed to succeed. */
893 for (e = other->iterate_list_head; e; e = n) {
894 unsigned h_hash, other_hash;
898 h_hash = bucket_hash(h, e->key);
899 if (hash_scan(h, h_hash, e->key))
902 other_hash = bucket_hash(other, e->key);
903 unlink_entry(other, e, other_hash);
904 link_entry(h, e, h_hash);
908 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
909 unsigned h_hash, other_hash;
910 struct hashmap_entry *e;
917 h_hash = bucket_hash(h, key);
918 if (hash_scan(h, h_hash, key))
921 other_hash = bucket_hash(other, key);
922 e = hash_scan(other, other_hash, key);
926 unlink_entry(other, e, other_hash);
927 link_entry(h, e, h_hash);
932 Hashmap *hashmap_copy(Hashmap *h) {
937 copy = hashmap_new(h->hash_func, h->compare_func);
941 if (hashmap_merge(copy, h) < 0) {
949 char **hashmap_get_strv(Hashmap *h) {
955 sv = new(char*, h->n_entries+1);
960 HASHMAP_FOREACH(item, h, it)
967 void *hashmap_next(Hashmap *h, const void *key) {
969 struct hashmap_entry *e;
977 hash = bucket_hash(h, key);
978 e = hash_scan(h, hash, key);