1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
32 #define INITIAL_N_BUCKETS 31
34 struct hashmap_entry {
37 struct hashmap_entry *bucket_next, *bucket_previous;
38 struct hashmap_entry *iterate_next, *iterate_previous;
42 const struct hash_ops *hash_ops;
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
46 struct hashmap_entry ** buckets;
47 unsigned n_buckets, n_entries;
49 uint8_t hash_key[HASH_KEY_SIZE];
59 static struct pool *first_hashmap_pool = NULL;
60 static void *first_hashmap_tile = NULL;
62 static struct pool *first_entry_pool = NULL;
63 static void *first_entry_tile = NULL;
65 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
68 /* When a tile is released we add it to the list and simply
69 * place the next pointer at its offset 0. */
71 assert(tile_size >= sizeof(void*));
78 *first_tile = * (void**) (*first_tile);
82 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
87 n = *first_pool ? (*first_pool)->n_tiles : 0;
88 n = MAX(at_least, n * 2);
89 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
90 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
96 p->next = *first_pool;
103 i = (*first_pool)->n_used++;
105 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
108 static void deallocate_tile(void **first_tile, void *p) {
109 * (void**) p = *first_tile;
115 static void drop_pool(struct pool *p) {
124 __attribute__((destructor)) static void cleanup_pool(void) {
125 /* Be nice to valgrind */
127 drop_pool(first_hashmap_pool);
128 drop_pool(first_entry_pool);
133 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
135 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
136 return (unsigned long) u;
139 int string_compare_func(const void *a, const void *b) {
143 const struct hash_ops string_hash_ops = {
144 .hash = string_hash_func,
145 .compare = string_compare_func
148 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
150 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
151 return (unsigned long) u;
154 int trivial_compare_func(const void *a, const void *b) {
155 return a < b ? -1 : (a > b ? 1 : 0);
158 const struct hash_ops trivial_hash_ops = {
159 .hash = trivial_hash_func,
160 .compare = trivial_compare_func
163 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
165 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
166 return (unsigned long) u;
169 int uint64_compare_func(const void *_a, const void *_b) {
171 a = *(const uint64_t*) _a;
172 b = *(const uint64_t*) _b;
173 return a < b ? -1 : (a > b ? 1 : 0);
176 const struct hash_ops uint64_hash_ops = {
177 .hash = uint64_hash_func,
178 .compare = uint64_compare_func
181 #if SIZEOF_DEV_T != 8
182 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
184 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
185 return (unsigned long) u;
188 int devt_compare_func(const void *_a, const void *_b) {
190 a = *(const dev_t*) _a;
191 b = *(const dev_t*) _b;
192 return a < b ? -1 : (a > b ? 1 : 0);
195 const struct hash_ops devt_hash_ops = {
196 .hash = devt_hash_func,
197 .compare = devt_compare_func
201 static unsigned bucket_hash(Hashmap *h, const void *p) {
202 return (unsigned) (h->hash_ops->hash(p, h->hash_key) % h->n_buckets);
205 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
206 static uint8_t current[HASH_KEY_SIZE];
207 static bool current_initialized = false;
209 /* Returns a hash function key to use. In order to keep things
210 * fast we will not generate a new key each time we allocate a
211 * new hash table. Instead, we'll just reuse the most recently
212 * generated one, except if we never generated one or when we
213 * are rehashing an entire hash table because we reached a
216 if (!current_initialized || !reuse_is_ok) {
217 random_bytes(current, sizeof(current));
218 current_initialized = true;
221 memcpy(hash_key, current, sizeof(current));
224 Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
229 b = is_main_thread();
231 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
234 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
246 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
248 h->n_buckets = INITIAL_N_BUCKETS;
250 h->iterate_list_head = h->iterate_list_tail = NULL;
252 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
256 get_hash_key(h->hash_key, true);
261 int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
269 q = hashmap_new(hash_ops);
277 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
281 /* Insert into hash table */
282 e->bucket_next = h->buckets[hash];
283 e->bucket_previous = NULL;
284 if (h->buckets[hash])
285 h->buckets[hash]->bucket_previous = e;
286 h->buckets[hash] = e;
288 /* Insert into iteration list */
289 e->iterate_previous = h->iterate_list_tail;
290 e->iterate_next = NULL;
291 if (h->iterate_list_tail) {
292 assert(h->iterate_list_head);
293 h->iterate_list_tail->iterate_next = e;
295 assert(!h->iterate_list_head);
296 h->iterate_list_head = e;
298 h->iterate_list_tail = e;
301 assert(h->n_entries >= 1);
304 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
308 /* Remove from iteration list */
310 e->iterate_next->iterate_previous = e->iterate_previous;
312 h->iterate_list_tail = e->iterate_previous;
314 if (e->iterate_previous)
315 e->iterate_previous->iterate_next = e->iterate_next;
317 h->iterate_list_head = e->iterate_next;
319 /* Remove from hash table bucket list */
321 e->bucket_next->bucket_previous = e->bucket_previous;
323 if (e->bucket_previous)
324 e->bucket_previous->bucket_next = e->bucket_next;
326 h->buckets[hash] = e->bucket_next;
328 assert(h->n_entries >= 1);
332 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
338 hash = bucket_hash(h, e->key);
339 unlink_entry(h, e, hash);
342 deallocate_tile(&first_entry_tile, e);
347 void hashmap_free(Hashmap*h) {
349 /* Free the hashmap, but nothing in it */
356 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
360 deallocate_tile(&first_hashmap_tile, h);
365 void hashmap_free_free(Hashmap *h) {
367 /* Free the hashmap and all data objects in it, but not the
373 hashmap_clear_free(h);
377 void hashmap_free_free_free(Hashmap *h) {
379 /* Free the hashmap and all data and key objects in it */
384 hashmap_clear_free_free(h);
388 void hashmap_clear(Hashmap *h) {
392 while (h->iterate_list_head)
393 remove_entry(h, h->iterate_list_head);
396 void hashmap_clear_free(Hashmap *h) {
402 while ((p = hashmap_steal_first(h)))
406 void hashmap_clear_free_free(Hashmap *h) {
410 while (h->iterate_list_head) {
413 a = h->iterate_list_head->value;
414 b = (void*) h->iterate_list_head->key;
415 remove_entry(h, h->iterate_list_head);
421 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
422 struct hashmap_entry *e;
424 assert(hash < h->n_buckets);
426 for (e = h->buckets[hash]; e; e = e->bucket_next)
427 if (h->hash_ops->compare(e->key, key) == 0)
433 static bool resize_buckets(Hashmap *h) {
434 struct hashmap_entry **n, *i;
436 uint8_t nkey[HASH_KEY_SIZE];
440 if (_likely_(h->n_entries*4 < h->n_buckets*3))
443 /* Increase by four */
444 m = (h->n_entries+1)*4-1;
446 /* If we hit OOM we simply risk packed hashmaps... */
447 n = new0(struct hashmap_entry*, m);
451 /* Let's use a different randomized hash key for the
452 * extension, so that people cannot guess what we are using
454 get_hash_key(nkey, false);
456 for (i = h->iterate_list_head; i; i = i->iterate_next) {
457 unsigned long old_bucket, new_bucket;
459 old_bucket = h->hash_ops->hash(i->key, h->hash_key) % h->n_buckets;
461 /* First, drop from old bucket table */
463 i->bucket_next->bucket_previous = i->bucket_previous;
465 if (i->bucket_previous)
466 i->bucket_previous->bucket_next = i->bucket_next;
468 h->buckets[old_bucket] = i->bucket_next;
470 /* Then, add to new backet table */
471 new_bucket = h->hash_ops->hash(i->key, nkey) % m;
473 i->bucket_next = n[new_bucket];
474 i->bucket_previous = NULL;
476 n[new_bucket]->bucket_previous = i;
480 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
486 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
491 static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash) {
492 /* For when we know no such entry exists yet */
494 struct hashmap_entry *e;
496 if (resize_buckets(h))
497 hash = bucket_hash(h, key);
500 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
502 e = new(struct hashmap_entry, 1);
510 link_entry(h, e, hash);
515 int hashmap_put(Hashmap *h, const void *key, void *value) {
516 struct hashmap_entry *e;
521 hash = bucket_hash(h, key);
522 e = hash_scan(h, hash, key);
524 if (e->value == value)
529 return __hashmap_put(h, key, value, hash);
532 int hashmap_replace(Hashmap *h, const void *key, void *value) {
533 struct hashmap_entry *e;
538 hash = bucket_hash(h, key);
539 e = hash_scan(h, hash, key);
546 return __hashmap_put(h, key, value, hash);
549 int hashmap_update(Hashmap *h, const void *key, void *value) {
550 struct hashmap_entry *e;
555 hash = bucket_hash(h, key);
556 e = hash_scan(h, hash, key);
564 void* hashmap_get(Hashmap *h, const void *key) {
566 struct hashmap_entry *e;
571 hash = bucket_hash(h, key);
572 e = hash_scan(h, hash, key);
579 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
581 struct hashmap_entry *e;
586 hash = bucket_hash(h, key);
587 e = hash_scan(h, hash, key);
592 *key2 = (void*) e->key;
597 bool hashmap_contains(Hashmap *h, const void *key) {
603 hash = bucket_hash(h, key);
604 return !!hash_scan(h, hash, key);
607 void* hashmap_remove(Hashmap *h, const void *key) {
608 struct hashmap_entry *e;
615 hash = bucket_hash(h, key);
616 e = hash_scan(h, hash, key);
626 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
627 struct hashmap_entry *e;
637 hash = bucket_hash(h, key);
638 e = hash_scan(h, hash, key);
647 *rkey = (void*) e->key;
654 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
655 struct hashmap_entry *e;
656 unsigned old_hash, new_hash;
661 old_hash = bucket_hash(h, old_key);
662 e = hash_scan(h, old_hash, old_key);
666 new_hash = bucket_hash(h, new_key);
667 if (hash_scan(h, new_hash, new_key))
670 unlink_entry(h, e, old_hash);
675 link_entry(h, e, new_hash);
680 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
681 struct hashmap_entry *e, *k;
682 unsigned old_hash, new_hash;
687 old_hash = bucket_hash(h, old_key);
688 e = hash_scan(h, old_hash, old_key);
692 new_hash = bucket_hash(h, new_key);
693 k = hash_scan(h, new_hash, new_key);
698 unlink_entry(h, e, old_hash);
703 link_entry(h, e, new_hash);
708 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
709 struct hashmap_entry *e;
715 hash = bucket_hash(h, key);
717 e = hash_scan(h, hash, key);
721 if (e->value != value)
729 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
730 struct hashmap_entry *e;
737 if (*i == ITERATOR_LAST)
740 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
743 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
746 *i = (Iterator) e->iterate_next;
764 void* hashmap_first(Hashmap *h) {
769 if (!h->iterate_list_head)
772 return h->iterate_list_head->value;
775 void* hashmap_first_key(Hashmap *h) {
780 if (!h->iterate_list_head)
783 return (void*) h->iterate_list_head->key;
786 void* hashmap_steal_first(Hashmap *h) {
792 if (!h->iterate_list_head)
795 data = h->iterate_list_head->value;
796 remove_entry(h, h->iterate_list_head);
801 void* hashmap_steal_first_key(Hashmap *h) {
807 if (!h->iterate_list_head)
810 key = (void*) h->iterate_list_head->key;
811 remove_entry(h, h->iterate_list_head);
816 unsigned hashmap_size(Hashmap *h) {
824 unsigned hashmap_buckets(Hashmap *h) {
832 bool hashmap_isempty(Hashmap *h) {
837 return h->n_entries == 0;
840 int hashmap_merge(Hashmap *h, Hashmap *other) {
841 struct hashmap_entry *e;
848 for (e = other->iterate_list_head; e; e = e->iterate_next) {
851 r = hashmap_put(h, e->key, e->value);
852 if (r < 0 && r != -EEXIST)
859 void hashmap_move(Hashmap *h, Hashmap *other) {
860 struct hashmap_entry *e, *n;
864 /* The same as hashmap_merge(), but every new item from other
865 * is moved to h. This function is guaranteed to succeed. */
870 for (e = other->iterate_list_head; e; e = n) {
871 unsigned h_hash, other_hash;
875 h_hash = bucket_hash(h, e->key);
876 if (hash_scan(h, h_hash, e->key))
879 other_hash = bucket_hash(other, e->key);
880 unlink_entry(other, e, other_hash);
881 link_entry(h, e, h_hash);
885 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
886 unsigned h_hash, other_hash;
887 struct hashmap_entry *e;
894 h_hash = bucket_hash(h, key);
895 if (hash_scan(h, h_hash, key))
898 other_hash = bucket_hash(other, key);
899 e = hash_scan(other, other_hash, key);
903 unlink_entry(other, e, other_hash);
904 link_entry(h, e, h_hash);
909 Hashmap *hashmap_copy(Hashmap *h) {
914 copy = hashmap_new(h->hash_ops);
918 if (hashmap_merge(copy, h) < 0) {
926 char **hashmap_get_strv(Hashmap *h) {
932 sv = new(char*, h->n_entries+1);
937 HASHMAP_FOREACH(item, h, it)
944 void *hashmap_next(Hashmap *h, const void *key) {
946 struct hashmap_entry *e;
954 hash = bucket_hash(h, key);
955 e = hash_scan(h, hash, key);