1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
32 #define INITIAL_N_BUCKETS 31
34 struct hashmap_entry {
37 struct hashmap_entry *bucket_next, *bucket_previous;
38 struct hashmap_entry *iterate_next, *iterate_previous;
42 const struct hash_ops *hash_ops;
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
46 struct hashmap_entry ** buckets;
47 unsigned n_buckets, n_entries;
49 uint8_t hash_key[HASH_KEY_SIZE];
59 static struct pool *first_hashmap_pool = NULL;
60 static void *first_hashmap_tile = NULL;
62 static struct pool *first_entry_pool = NULL;
63 static void *first_entry_tile = NULL;
65 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
68 /* When a tile is released we add it to the list and simply
69 * place the next pointer at its offset 0. */
71 assert(tile_size >= sizeof(void*));
78 *first_tile = * (void**) (*first_tile);
82 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
87 n = *first_pool ? (*first_pool)->n_tiles : 0;
88 n = MAX(at_least, n * 2);
89 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
90 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
96 p->next = *first_pool;
103 i = (*first_pool)->n_used++;
105 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
108 static void deallocate_tile(void **first_tile, void *p) {
109 * (void**) p = *first_tile;
115 static void drop_pool(struct pool *p) {
124 __attribute__((destructor)) static void cleanup_pool(void) {
125 /* Be nice to valgrind */
127 drop_pool(first_hashmap_pool);
128 drop_pool(first_entry_pool);
133 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
135 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
136 return (unsigned long) u;
139 int string_compare_func(const void *a, const void *b) {
143 const struct hash_ops string_hash_ops = {
144 .hash = string_hash_func,
145 .compare = string_compare_func
148 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
150 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
151 return (unsigned long) u;
154 int trivial_compare_func(const void *a, const void *b) {
155 return a < b ? -1 : (a > b ? 1 : 0);
158 const struct hash_ops trivial_hash_ops = {
159 .hash = trivial_hash_func,
160 .compare = trivial_compare_func
163 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
165 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
166 return (unsigned long) u;
169 int uint64_compare_func(const void *_a, const void *_b) {
171 a = *(const uint64_t*) _a;
172 b = *(const uint64_t*) _b;
173 return a < b ? -1 : (a > b ? 1 : 0);
176 const struct hash_ops uint64_hash_ops = {
177 .hash = uint64_hash_func,
178 .compare = uint64_compare_func
181 #if SIZEOF_DEV_T != 8
182 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
184 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
185 return (unsigned long) u;
188 int devt_compare_func(const void *_a, const void *_b) {
190 a = *(const dev_t*) _a;
191 b = *(const dev_t*) _b;
192 return a < b ? -1 : (a > b ? 1 : 0);
195 const struct hash_ops devt_hash_ops = {
196 .hash = devt_hash_func,
197 .compare = devt_compare_func
201 static unsigned bucket_hash(Hashmap *h, const void *p) {
202 return (unsigned) (h->hash_ops->hash(p, h->hash_key) % h->n_buckets);
205 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
206 static uint8_t current[HASH_KEY_SIZE];
207 static bool current_initialized = false;
209 /* Returns a hash function key to use. In order to keep things
210 * fast we will not generate a new key each time we allocate a
211 * new hash table. Instead, we'll just reuse the most recently
212 * generated one, except if we never generated one or when we
213 * are rehashing an entire hash table because we reached a
216 if (!current_initialized || !reuse_is_ok) {
217 random_bytes(current, sizeof(current));
218 current_initialized = true;
221 memcpy(hash_key, current, sizeof(current));
224 Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
229 b = is_main_thread();
231 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
234 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
246 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
248 h->n_buckets = INITIAL_N_BUCKETS;
250 h->iterate_list_head = h->iterate_list_tail = NULL;
252 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
256 get_hash_key(h->hash_key, true);
261 int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
269 q = hashmap_new(hash_ops);
277 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
281 /* Insert into hash table */
282 e->bucket_next = h->buckets[hash];
283 e->bucket_previous = NULL;
284 if (h->buckets[hash])
285 h->buckets[hash]->bucket_previous = e;
286 h->buckets[hash] = e;
288 /* Insert into iteration list */
289 e->iterate_previous = h->iterate_list_tail;
290 e->iterate_next = NULL;
291 if (h->iterate_list_tail) {
292 assert(h->iterate_list_head);
293 h->iterate_list_tail->iterate_next = e;
295 assert(!h->iterate_list_head);
296 h->iterate_list_head = e;
298 h->iterate_list_tail = e;
301 assert(h->n_entries >= 1);
304 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
308 /* Remove from iteration list */
310 e->iterate_next->iterate_previous = e->iterate_previous;
312 h->iterate_list_tail = e->iterate_previous;
314 if (e->iterate_previous)
315 e->iterate_previous->iterate_next = e->iterate_next;
317 h->iterate_list_head = e->iterate_next;
319 /* Remove from hash table bucket list */
321 e->bucket_next->bucket_previous = e->bucket_previous;
323 if (e->bucket_previous)
324 e->bucket_previous->bucket_next = e->bucket_next;
326 h->buckets[hash] = e->bucket_next;
328 assert(h->n_entries >= 1);
332 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
338 hash = bucket_hash(h, e->key);
339 unlink_entry(h, e, hash);
342 deallocate_tile(&first_entry_tile, e);
347 void hashmap_free(Hashmap*h) {
349 /* Free the hashmap, but nothing in it */
356 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
360 deallocate_tile(&first_hashmap_tile, h);
365 void hashmap_free_free(Hashmap *h) {
367 /* Free the hashmap and all data objects in it, but not the
373 hashmap_clear_free(h);
377 void hashmap_free_free_free(Hashmap *h) {
379 /* Free the hashmap and all data and key objects in it */
384 hashmap_clear_free_free(h);
388 void hashmap_clear(Hashmap *h) {
392 while (h->iterate_list_head)
393 remove_entry(h, h->iterate_list_head);
396 void hashmap_clear_free(Hashmap *h) {
402 while ((p = hashmap_steal_first(h)))
406 void hashmap_clear_free_free(Hashmap *h) {
410 while (h->iterate_list_head) {
413 a = h->iterate_list_head->value;
414 b = (void*) h->iterate_list_head->key;
415 remove_entry(h, h->iterate_list_head);
421 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
422 struct hashmap_entry *e;
424 assert(hash < h->n_buckets);
426 for (e = h->buckets[hash]; e; e = e->bucket_next)
427 if (h->hash_ops->compare(e->key, key) == 0)
433 static bool resize_buckets(Hashmap *h) {
434 struct hashmap_entry **n, *i;
436 uint8_t nkey[HASH_KEY_SIZE];
440 if (_likely_(h->n_entries*4 < h->n_buckets*3))
443 /* Increase by four */
444 m = (h->n_entries+1)*4-1;
446 /* If we hit OOM we simply risk packed hashmaps... */
447 n = new0(struct hashmap_entry*, m);
451 /* Let's use a different randomized hash key for the
452 * extension, so that people cannot guess what we are using
454 get_hash_key(nkey, false);
456 for (i = h->iterate_list_head; i; i = i->iterate_next) {
457 unsigned long old_bucket, new_bucket;
459 old_bucket = h->hash_ops->hash(i->key, h->hash_key) % h->n_buckets;
461 /* First, drop from old bucket table */
463 i->bucket_next->bucket_previous = i->bucket_previous;
465 if (i->bucket_previous)
466 i->bucket_previous->bucket_next = i->bucket_next;
468 h->buckets[old_bucket] = i->bucket_next;
470 /* Then, add to new backet table */
471 new_bucket = h->hash_ops->hash(i->key, nkey) % m;
473 i->bucket_next = n[new_bucket];
474 i->bucket_previous = NULL;
476 n[new_bucket]->bucket_previous = i;
480 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
486 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
491 int hashmap_put(Hashmap *h, const void *key, void *value) {
492 struct hashmap_entry *e;
497 hash = bucket_hash(h, key);
498 e = hash_scan(h, hash, key);
500 if (e->value == value)
505 if (resize_buckets(h))
506 hash = bucket_hash(h, key);
509 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
511 e = new(struct hashmap_entry, 1);
519 link_entry(h, e, hash);
524 int hashmap_replace(Hashmap *h, const void *key, void *value) {
525 struct hashmap_entry *e;
530 hash = bucket_hash(h, key);
531 e = hash_scan(h, hash, key);
538 return hashmap_put(h, key, value);
541 int hashmap_update(Hashmap *h, const void *key, void *value) {
542 struct hashmap_entry *e;
547 hash = bucket_hash(h, key);
548 e = hash_scan(h, hash, key);
556 void* hashmap_get(Hashmap *h, const void *key) {
558 struct hashmap_entry *e;
563 hash = bucket_hash(h, key);
564 e = hash_scan(h, hash, key);
571 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
573 struct hashmap_entry *e;
578 hash = bucket_hash(h, key);
579 e = hash_scan(h, hash, key);
584 *key2 = (void*) e->key;
589 bool hashmap_contains(Hashmap *h, const void *key) {
595 hash = bucket_hash(h, key);
596 return !!hash_scan(h, hash, key);
599 void* hashmap_remove(Hashmap *h, const void *key) {
600 struct hashmap_entry *e;
607 hash = bucket_hash(h, key);
608 e = hash_scan(h, hash, key);
618 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
619 struct hashmap_entry *e;
629 hash = bucket_hash(h, key);
630 e = hash_scan(h, hash, key);
639 *rkey = (void*) e->key;
646 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
647 struct hashmap_entry *e;
648 unsigned old_hash, new_hash;
653 old_hash = bucket_hash(h, old_key);
654 e = hash_scan(h, old_hash, old_key);
658 new_hash = bucket_hash(h, new_key);
659 if (hash_scan(h, new_hash, new_key))
662 unlink_entry(h, e, old_hash);
667 link_entry(h, e, new_hash);
672 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
673 struct hashmap_entry *e, *k;
674 unsigned old_hash, new_hash;
679 old_hash = bucket_hash(h, old_key);
680 e = hash_scan(h, old_hash, old_key);
684 new_hash = bucket_hash(h, new_key);
685 k = hash_scan(h, new_hash, new_key);
690 unlink_entry(h, e, old_hash);
695 link_entry(h, e, new_hash);
700 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
701 struct hashmap_entry *e;
707 hash = bucket_hash(h, key);
709 e = hash_scan(h, hash, key);
713 if (e->value != value)
721 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
722 struct hashmap_entry *e;
729 if (*i == ITERATOR_LAST)
732 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
735 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
738 *i = (Iterator) e->iterate_next;
756 void* hashmap_first(Hashmap *h) {
761 if (!h->iterate_list_head)
764 return h->iterate_list_head->value;
767 void* hashmap_first_key(Hashmap *h) {
772 if (!h->iterate_list_head)
775 return (void*) h->iterate_list_head->key;
778 void* hashmap_steal_first(Hashmap *h) {
784 if (!h->iterate_list_head)
787 data = h->iterate_list_head->value;
788 remove_entry(h, h->iterate_list_head);
793 void* hashmap_steal_first_key(Hashmap *h) {
799 if (!h->iterate_list_head)
802 key = (void*) h->iterate_list_head->key;
803 remove_entry(h, h->iterate_list_head);
808 unsigned hashmap_size(Hashmap *h) {
816 unsigned hashmap_buckets(Hashmap *h) {
824 bool hashmap_isempty(Hashmap *h) {
829 return h->n_entries == 0;
832 int hashmap_merge(Hashmap *h, Hashmap *other) {
833 struct hashmap_entry *e;
840 for (e = other->iterate_list_head; e; e = e->iterate_next) {
843 r = hashmap_put(h, e->key, e->value);
844 if (r < 0 && r != -EEXIST)
851 void hashmap_move(Hashmap *h, Hashmap *other) {
852 struct hashmap_entry *e, *n;
856 /* The same as hashmap_merge(), but every new item from other
857 * is moved to h. This function is guaranteed to succeed. */
862 for (e = other->iterate_list_head; e; e = n) {
863 unsigned h_hash, other_hash;
867 h_hash = bucket_hash(h, e->key);
868 if (hash_scan(h, h_hash, e->key))
871 other_hash = bucket_hash(other, e->key);
872 unlink_entry(other, e, other_hash);
873 link_entry(h, e, h_hash);
877 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
878 unsigned h_hash, other_hash;
879 struct hashmap_entry *e;
886 h_hash = bucket_hash(h, key);
887 if (hash_scan(h, h_hash, key))
890 other_hash = bucket_hash(other, key);
891 e = hash_scan(other, other_hash, key);
895 unlink_entry(other, e, other_hash);
896 link_entry(h, e, h_hash);
901 Hashmap *hashmap_copy(Hashmap *h) {
906 copy = hashmap_new(h->hash_ops);
910 if (hashmap_merge(copy, h) < 0) {
918 char **hashmap_get_strv(Hashmap *h) {
924 sv = new(char*, h->n_entries+1);
929 HASHMAP_FOREACH(item, h, it)
936 void *hashmap_next(Hashmap *h, const void *key) {
938 struct hashmap_entry *e;
946 hash = bucket_hash(h, key);
947 e = hash_scan(h, hash, key);