1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
32 #define INITIAL_N_BUCKETS 31
34 struct hashmap_entry {
37 struct hashmap_entry *bucket_next, *bucket_previous;
38 struct hashmap_entry *iterate_next, *iterate_previous;
42 hash_func_t hash_func;
43 compare_func_t compare_func;
45 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
47 struct hashmap_entry ** buckets;
48 unsigned n_buckets, n_entries;
50 uint8_t hash_key[HASH_KEY_SIZE];
60 static struct pool *first_hashmap_pool = NULL;
61 static void *first_hashmap_tile = NULL;
63 static struct pool *first_entry_pool = NULL;
64 static void *first_entry_tile = NULL;
66 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
69 /* When a tile is released we add it to the list and simply
70 * place the next pointer at its offset 0. */
72 assert(tile_size >= sizeof(void*));
79 *first_tile = * (void**) (*first_tile);
83 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
88 n = *first_pool ? (*first_pool)->n_tiles : 0;
89 n = MAX(at_least, n * 2);
90 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
91 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
97 p->next = *first_pool;
104 i = (*first_pool)->n_used++;
106 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
109 static void deallocate_tile(void **first_tile, void *p) {
110 * (void**) p = *first_tile;
116 static void drop_pool(struct pool *p) {
125 __attribute__((destructor)) static void cleanup_pool(void) {
126 /* Be nice to valgrind */
128 drop_pool(first_hashmap_pool);
129 drop_pool(first_entry_pool);
134 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
136 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
137 return (unsigned long) u;
140 int string_compare_func(const void *a, const void *b) {
144 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
146 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
147 return (unsigned long) u;
150 int trivial_compare_func(const void *a, const void *b) {
151 return a < b ? -1 : (a > b ? 1 : 0);
154 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
156 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
157 return (unsigned long) u;
160 int uint64_compare_func(const void *_a, const void *_b) {
162 a = *(const uint64_t*) _a;
163 b = *(const uint64_t*) _b;
164 return a < b ? -1 : (a > b ? 1 : 0);
167 #if SIZEOF_DEV_T != 8
168 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
170 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
171 return (unsigned long) u;
174 int devt_compare_func(const void *_a, const void *_b) {
176 a = *(const dev_t*) _a;
177 b = *(const dev_t*) _b;
178 return a < b ? -1 : (a > b ? 1 : 0);
182 static unsigned bucket_hash(Hashmap *h, const void *p) {
183 return (unsigned) (h->hash_func(p, h->hash_key) % h->n_buckets);
186 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
187 static uint8_t current[HASH_KEY_SIZE];
188 static bool current_initialized = false;
190 /* Returns a hash function key to use. In order to keep things
191 * fast we will not generate a new key each time we allocate a
192 * new hash table. Instead, we'll just reuse the most recently
193 * generated one, except if we never generated one or when we
194 * are rehashing an entire hash table because we reached a
197 if (!current_initialized || !reuse_is_ok) {
198 random_bytes(current, sizeof(current));
199 current_initialized = true;
202 memcpy(hash_key, current, sizeof(current));
205 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
210 b = is_main_thread();
212 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
215 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
227 h->hash_func = hash_func ? hash_func : trivial_hash_func;
228 h->compare_func = compare_func ? compare_func : trivial_compare_func;
230 h->n_buckets = INITIAL_N_BUCKETS;
232 h->iterate_list_head = h->iterate_list_tail = NULL;
234 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
238 get_hash_key(h->hash_key, true);
243 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
251 q = hashmap_new(hash_func, compare_func);
259 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
263 /* Insert into hash table */
264 e->bucket_next = h->buckets[hash];
265 e->bucket_previous = NULL;
266 if (h->buckets[hash])
267 h->buckets[hash]->bucket_previous = e;
268 h->buckets[hash] = e;
270 /* Insert into iteration list */
271 e->iterate_previous = h->iterate_list_tail;
272 e->iterate_next = NULL;
273 if (h->iterate_list_tail) {
274 assert(h->iterate_list_head);
275 h->iterate_list_tail->iterate_next = e;
277 assert(!h->iterate_list_head);
278 h->iterate_list_head = e;
280 h->iterate_list_tail = e;
283 assert(h->n_entries >= 1);
286 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
290 /* Remove from iteration list */
292 e->iterate_next->iterate_previous = e->iterate_previous;
294 h->iterate_list_tail = e->iterate_previous;
296 if (e->iterate_previous)
297 e->iterate_previous->iterate_next = e->iterate_next;
299 h->iterate_list_head = e->iterate_next;
301 /* Remove from hash table bucket list */
303 e->bucket_next->bucket_previous = e->bucket_previous;
305 if (e->bucket_previous)
306 e->bucket_previous->bucket_next = e->bucket_next;
308 h->buckets[hash] = e->bucket_next;
310 assert(h->n_entries >= 1);
314 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
320 hash = bucket_hash(h, e->key);
321 unlink_entry(h, e, hash);
324 deallocate_tile(&first_entry_tile, e);
329 void hashmap_free(Hashmap*h) {
331 /* Free the hashmap, but nothing in it */
338 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
342 deallocate_tile(&first_hashmap_tile, h);
347 void hashmap_free_free(Hashmap *h) {
349 /* Free the hashmap and all data objects in it, but not the
355 hashmap_clear_free(h);
359 void hashmap_free_free_free(Hashmap *h) {
361 /* Free the hashmap and all data and key objects in it */
366 hashmap_clear_free_free(h);
370 void hashmap_clear(Hashmap *h) {
374 while (h->iterate_list_head)
375 remove_entry(h, h->iterate_list_head);
378 void hashmap_clear_free(Hashmap *h) {
384 while ((p = hashmap_steal_first(h)))
388 void hashmap_clear_free_free(Hashmap *h) {
392 while (h->iterate_list_head) {
395 a = h->iterate_list_head->value;
396 b = (void*) h->iterate_list_head->key;
397 remove_entry(h, h->iterate_list_head);
403 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
404 struct hashmap_entry *e;
406 assert(hash < h->n_buckets);
408 for (e = h->buckets[hash]; e; e = e->bucket_next)
409 if (h->compare_func(e->key, key) == 0)
415 static bool resize_buckets(Hashmap *h) {
416 struct hashmap_entry **n, *i;
418 uint8_t nkey[HASH_KEY_SIZE];
422 if (_likely_(h->n_entries*4 < h->n_buckets*3))
425 /* Increase by four */
426 m = (h->n_entries+1)*4-1;
428 /* If we hit OOM we simply risk packed hashmaps... */
429 n = new0(struct hashmap_entry*, m);
433 /* Let's use a different randomized hash key for the
434 * extension, so that people cannot guess what we are using
436 get_hash_key(nkey, false);
438 for (i = h->iterate_list_head; i; i = i->iterate_next) {
439 unsigned long old_bucket, new_bucket;
441 old_bucket = h->hash_func(i->key, h->hash_key) % h->n_buckets;
443 /* First, drop from old bucket table */
445 i->bucket_next->bucket_previous = i->bucket_previous;
447 if (i->bucket_previous)
448 i->bucket_previous->bucket_next = i->bucket_next;
450 h->buckets[old_bucket] = i->bucket_next;
452 /* Then, add to new backet table */
453 new_bucket = h->hash_func(i->key, nkey) % m;
455 i->bucket_next = n[new_bucket];
456 i->bucket_previous = NULL;
458 n[new_bucket]->bucket_previous = i;
462 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
468 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
473 int hashmap_put(Hashmap *h, const void *key, void *value) {
474 struct hashmap_entry *e;
479 hash = bucket_hash(h, key);
480 e = hash_scan(h, hash, key);
482 if (e->value == value)
487 if (resize_buckets(h))
488 hash = bucket_hash(h, key);
491 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
493 e = new(struct hashmap_entry, 1);
501 link_entry(h, e, hash);
506 int hashmap_replace(Hashmap *h, const void *key, void *value) {
507 struct hashmap_entry *e;
512 hash = bucket_hash(h, key);
513 e = hash_scan(h, hash, key);
520 return hashmap_put(h, key, value);
523 int hashmap_update(Hashmap *h, const void *key, void *value) {
524 struct hashmap_entry *e;
529 hash = bucket_hash(h, key);
530 e = hash_scan(h, hash, key);
538 void* hashmap_get(Hashmap *h, const void *key) {
540 struct hashmap_entry *e;
545 hash = bucket_hash(h, key);
546 e = hash_scan(h, hash, key);
553 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
555 struct hashmap_entry *e;
560 hash = bucket_hash(h, key);
561 e = hash_scan(h, hash, key);
566 *key2 = (void*) e->key;
571 bool hashmap_contains(Hashmap *h, const void *key) {
577 hash = bucket_hash(h, key);
578 return !!hash_scan(h, hash, key);
581 void* hashmap_remove(Hashmap *h, const void *key) {
582 struct hashmap_entry *e;
589 hash = bucket_hash(h, key);
590 e = hash_scan(h, hash, key);
600 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
601 struct hashmap_entry *e;
611 hash = bucket_hash(h, key);
612 e = hash_scan(h, hash, key);
621 *rkey = (void*) e->key;
628 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
629 struct hashmap_entry *e;
630 unsigned old_hash, new_hash;
635 old_hash = bucket_hash(h, old_key);
636 e = hash_scan(h, old_hash, old_key);
640 new_hash = bucket_hash(h, new_key);
641 if (hash_scan(h, new_hash, new_key))
644 unlink_entry(h, e, old_hash);
649 link_entry(h, e, new_hash);
654 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
655 struct hashmap_entry *e, *k;
656 unsigned old_hash, new_hash;
661 old_hash = bucket_hash(h, old_key);
662 e = hash_scan(h, old_hash, old_key);
666 new_hash = bucket_hash(h, new_key);
667 k = hash_scan(h, new_hash, new_key);
672 unlink_entry(h, e, old_hash);
677 link_entry(h, e, new_hash);
682 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
683 struct hashmap_entry *e;
689 hash = bucket_hash(h, key);
691 e = hash_scan(h, hash, key);
695 if (e->value != value)
703 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
704 struct hashmap_entry *e;
711 if (*i == ITERATOR_LAST)
714 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
717 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
720 *i = (Iterator) e->iterate_next;
738 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
739 struct hashmap_entry *e;
746 if (*i == ITERATOR_FIRST)
749 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
752 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
754 if (e->iterate_previous)
755 *i = (Iterator) e->iterate_previous;
773 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
775 struct hashmap_entry *e;
780 hash = bucket_hash(h, key);
782 e = hash_scan(h, hash, key);
791 void* hashmap_first(Hashmap *h) {
796 if (!h->iterate_list_head)
799 return h->iterate_list_head->value;
802 void* hashmap_first_key(Hashmap *h) {
807 if (!h->iterate_list_head)
810 return (void*) h->iterate_list_head->key;
813 void* hashmap_last(Hashmap *h) {
818 if (!h->iterate_list_tail)
821 return h->iterate_list_tail->value;
824 void* hashmap_steal_first(Hashmap *h) {
830 if (!h->iterate_list_head)
833 data = h->iterate_list_head->value;
834 remove_entry(h, h->iterate_list_head);
839 void* hashmap_steal_first_key(Hashmap *h) {
845 if (!h->iterate_list_head)
848 key = (void*) h->iterate_list_head->key;
849 remove_entry(h, h->iterate_list_head);
854 unsigned hashmap_size(Hashmap *h) {
862 unsigned hashmap_buckets(Hashmap *h) {
870 bool hashmap_isempty(Hashmap *h) {
875 return h->n_entries == 0;
878 int hashmap_merge(Hashmap *h, Hashmap *other) {
879 struct hashmap_entry *e;
886 for (e = other->iterate_list_head; e; e = e->iterate_next) {
889 r = hashmap_put(h, e->key, e->value);
890 if (r < 0 && r != -EEXIST)
897 void hashmap_move(Hashmap *h, Hashmap *other) {
898 struct hashmap_entry *e, *n;
902 /* The same as hashmap_merge(), but every new item from other
903 * is moved to h. This function is guaranteed to succeed. */
908 for (e = other->iterate_list_head; e; e = n) {
909 unsigned h_hash, other_hash;
913 h_hash = bucket_hash(h, e->key);
914 if (hash_scan(h, h_hash, e->key))
917 other_hash = bucket_hash(other, e->key);
918 unlink_entry(other, e, other_hash);
919 link_entry(h, e, h_hash);
923 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
924 unsigned h_hash, other_hash;
925 struct hashmap_entry *e;
932 h_hash = bucket_hash(h, key);
933 if (hash_scan(h, h_hash, key))
936 other_hash = bucket_hash(other, key);
937 e = hash_scan(other, other_hash, key);
941 unlink_entry(other, e, other_hash);
942 link_entry(h, e, h_hash);
947 Hashmap *hashmap_copy(Hashmap *h) {
952 copy = hashmap_new(h->hash_func, h->compare_func);
956 if (hashmap_merge(copy, h) < 0) {
964 char **hashmap_get_strv(Hashmap *h) {
970 sv = new(char*, h->n_entries+1);
975 HASHMAP_FOREACH(item, h, it)
982 void *hashmap_next(Hashmap *h, const void *key) {
984 struct hashmap_entry *e;
992 hash = bucket_hash(h, key);
993 e = hash_scan(h, hash, key);