1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
33 #define INITIAL_N_BUCKETS 31
35 struct hashmap_entry {
38 struct hashmap_entry *bucket_next, *bucket_previous;
39 struct hashmap_entry *iterate_next, *iterate_previous;
43 const struct hash_ops *hash_ops;
45 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
47 struct hashmap_entry ** buckets;
48 unsigned n_buckets, n_entries;
50 uint8_t hash_key[HASH_KEY_SIZE];
56 struct hashmap_entry *initial_buckets[INITIAL_N_BUCKETS];
59 static DEFINE_MEMPOOL(hashmap_pool, struct hashmap_tile, 8);
60 static DEFINE_MEMPOOL(hashmap_entry_pool, struct hashmap_entry, 64);
64 __attribute__((destructor)) static void cleanup_pools(void) {
65 /* Be nice to valgrind */
67 mempool_drop(&hashmap_entry_pool);
68 mempool_drop(&hashmap_pool);
73 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
75 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
76 return (unsigned long) u;
79 int string_compare_func(const void *a, const void *b) {
83 const struct hash_ops string_hash_ops = {
84 .hash = string_hash_func,
85 .compare = string_compare_func
88 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
90 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
91 return (unsigned long) u;
94 int trivial_compare_func(const void *a, const void *b) {
95 return a < b ? -1 : (a > b ? 1 : 0);
98 const struct hash_ops trivial_hash_ops = {
99 .hash = trivial_hash_func,
100 .compare = trivial_compare_func
103 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
105 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
106 return (unsigned long) u;
109 int uint64_compare_func(const void *_a, const void *_b) {
111 a = *(const uint64_t*) _a;
112 b = *(const uint64_t*) _b;
113 return a < b ? -1 : (a > b ? 1 : 0);
116 const struct hash_ops uint64_hash_ops = {
117 .hash = uint64_hash_func,
118 .compare = uint64_compare_func
121 #if SIZEOF_DEV_T != 8
122 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
124 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
125 return (unsigned long) u;
128 int devt_compare_func(const void *_a, const void *_b) {
130 a = *(const dev_t*) _a;
131 b = *(const dev_t*) _b;
132 return a < b ? -1 : (a > b ? 1 : 0);
135 const struct hash_ops devt_hash_ops = {
136 .hash = devt_hash_func,
137 .compare = devt_compare_func
141 static unsigned bucket_hash(Hashmap *h, const void *p) {
142 return (unsigned) (h->hash_ops->hash(p, h->hash_key) % h->n_buckets);
145 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
146 static uint8_t current[HASH_KEY_SIZE];
147 static bool current_initialized = false;
149 /* Returns a hash function key to use. In order to keep things
150 * fast we will not generate a new key each time we allocate a
151 * new hash table. Instead, we'll just reuse the most recently
152 * generated one, except if we never generated one or when we
153 * are rehashing an entire hash table because we reached a
156 if (!current_initialized || !reuse_is_ok) {
157 random_bytes(current, sizeof(current));
158 current_initialized = true;
161 memcpy(hash_key, current, sizeof(current));
164 Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
166 struct hashmap_tile *ht;
169 b = is_main_thread();
172 ht = mempool_alloc_tile(&hashmap_pool);
176 memzero(ht, sizeof(struct hashmap_tile));
178 ht = malloc0(sizeof(struct hashmap_tile));
185 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
187 h->n_buckets = INITIAL_N_BUCKETS;
189 h->iterate_list_head = h->iterate_list_tail = NULL;
191 h->buckets = ht->initial_buckets;
195 get_hash_key(h->hash_key, true);
200 int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
208 q = hashmap_new(hash_ops);
216 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
220 /* Insert into hash table */
221 e->bucket_next = h->buckets[hash];
222 e->bucket_previous = NULL;
223 if (h->buckets[hash])
224 h->buckets[hash]->bucket_previous = e;
225 h->buckets[hash] = e;
227 /* Insert into iteration list */
228 e->iterate_previous = h->iterate_list_tail;
229 e->iterate_next = NULL;
230 if (h->iterate_list_tail) {
231 assert(h->iterate_list_head);
232 h->iterate_list_tail->iterate_next = e;
234 assert(!h->iterate_list_head);
235 h->iterate_list_head = e;
237 h->iterate_list_tail = e;
240 assert(h->n_entries >= 1);
243 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
247 /* Remove from iteration list */
249 e->iterate_next->iterate_previous = e->iterate_previous;
251 h->iterate_list_tail = e->iterate_previous;
253 if (e->iterate_previous)
254 e->iterate_previous->iterate_next = e->iterate_next;
256 h->iterate_list_head = e->iterate_next;
258 /* Remove from hash table bucket list */
260 e->bucket_next->bucket_previous = e->bucket_previous;
262 if (e->bucket_previous)
263 e->bucket_previous->bucket_next = e->bucket_next;
265 h->buckets[hash] = e->bucket_next;
267 assert(h->n_entries >= 1);
271 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
277 hash = bucket_hash(h, e->key);
278 unlink_entry(h, e, hash);
281 mempool_free_tile(&hashmap_entry_pool, e);
286 void hashmap_free(Hashmap*h) {
288 /* Free the hashmap, but nothing in it */
295 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
299 mempool_free_tile(&hashmap_pool, container_of(h, struct hashmap_tile, h));
304 void hashmap_free_free(Hashmap *h) {
306 /* Free the hashmap and all data objects in it, but not the
312 hashmap_clear_free(h);
316 void hashmap_free_free_free(Hashmap *h) {
318 /* Free the hashmap and all data and key objects in it */
323 hashmap_clear_free_free(h);
327 void hashmap_clear(Hashmap *h) {
331 while (h->iterate_list_head)
332 remove_entry(h, h->iterate_list_head);
335 void hashmap_clear_free(Hashmap *h) {
341 while ((p = hashmap_steal_first(h)))
345 void hashmap_clear_free_free(Hashmap *h) {
349 while (h->iterate_list_head) {
352 a = h->iterate_list_head->value;
353 b = (void*) h->iterate_list_head->key;
354 remove_entry(h, h->iterate_list_head);
360 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
361 struct hashmap_entry *e;
363 assert(hash < h->n_buckets);
365 for (e = h->buckets[hash]; e; e = e->bucket_next)
366 if (h->hash_ops->compare(e->key, key) == 0)
372 static int resize_buckets(Hashmap *h) {
373 struct hashmap_entry **n, *i;
375 uint8_t nkey[HASH_KEY_SIZE];
379 if (_likely_(h->n_entries*4 < h->n_buckets*3))
382 /* Increase by four */
383 m = (h->n_entries+1)*4-1;
385 /* If we hit OOM we simply risk packed hashmaps... */
386 n = new0(struct hashmap_entry*, m);
390 /* Let's use a different randomized hash key for the
391 * extension, so that people cannot guess what we are using
393 get_hash_key(nkey, false);
395 for (i = h->iterate_list_head; i; i = i->iterate_next) {
396 unsigned long old_bucket, new_bucket;
398 old_bucket = h->hash_ops->hash(i->key, h->hash_key) % h->n_buckets;
400 /* First, drop from old bucket table */
402 i->bucket_next->bucket_previous = i->bucket_previous;
404 if (i->bucket_previous)
405 i->bucket_previous->bucket_next = i->bucket_next;
407 h->buckets[old_bucket] = i->bucket_next;
409 /* Then, add to new backet table */
410 new_bucket = h->hash_ops->hash(i->key, nkey) % m;
412 i->bucket_next = n[new_bucket];
413 i->bucket_previous = NULL;
415 n[new_bucket]->bucket_previous = i;
419 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
425 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
430 static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash) {
431 /* For when we know no such entry exists yet */
433 struct hashmap_entry *e;
435 if (resize_buckets(h) > 0)
436 hash = bucket_hash(h, key);
439 e = mempool_alloc_tile(&hashmap_entry_pool);
441 e = new(struct hashmap_entry, 1);
449 link_entry(h, e, hash);
454 int hashmap_put(Hashmap *h, const void *key, void *value) {
455 struct hashmap_entry *e;
460 hash = bucket_hash(h, key);
461 e = hash_scan(h, hash, key);
463 if (e->value == value)
468 return __hashmap_put(h, key, value, hash);
471 int hashmap_replace(Hashmap *h, const void *key, void *value) {
472 struct hashmap_entry *e;
477 hash = bucket_hash(h, key);
478 e = hash_scan(h, hash, key);
485 return __hashmap_put(h, key, value, hash);
488 int hashmap_update(Hashmap *h, const void *key, void *value) {
489 struct hashmap_entry *e;
494 hash = bucket_hash(h, key);
495 e = hash_scan(h, hash, key);
503 void* hashmap_get(Hashmap *h, const void *key) {
505 struct hashmap_entry *e;
510 hash = bucket_hash(h, key);
511 e = hash_scan(h, hash, key);
518 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
520 struct hashmap_entry *e;
525 hash = bucket_hash(h, key);
526 e = hash_scan(h, hash, key);
531 *key2 = (void*) e->key;
536 bool hashmap_contains(Hashmap *h, const void *key) {
542 hash = bucket_hash(h, key);
543 return !!hash_scan(h, hash, key);
546 void* hashmap_remove(Hashmap *h, const void *key) {
547 struct hashmap_entry *e;
554 hash = bucket_hash(h, key);
555 e = hash_scan(h, hash, key);
565 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
566 struct hashmap_entry *e;
576 hash = bucket_hash(h, key);
577 e = hash_scan(h, hash, key);
586 *rkey = (void*) e->key;
593 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
594 struct hashmap_entry *e;
595 unsigned old_hash, new_hash;
600 old_hash = bucket_hash(h, old_key);
601 e = hash_scan(h, old_hash, old_key);
605 new_hash = bucket_hash(h, new_key);
606 if (hash_scan(h, new_hash, new_key))
609 unlink_entry(h, e, old_hash);
614 link_entry(h, e, new_hash);
619 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
620 struct hashmap_entry *e, *k;
621 unsigned old_hash, new_hash;
626 old_hash = bucket_hash(h, old_key);
627 e = hash_scan(h, old_hash, old_key);
631 new_hash = bucket_hash(h, new_key);
632 k = hash_scan(h, new_hash, new_key);
637 unlink_entry(h, e, old_hash);
642 link_entry(h, e, new_hash);
647 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
648 struct hashmap_entry *e;
654 hash = bucket_hash(h, key);
656 e = hash_scan(h, hash, key);
660 if (e->value != value)
668 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
669 struct hashmap_entry *e;
676 if (*i == ITERATOR_LAST)
679 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
682 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
685 *i = (Iterator) e->iterate_next;
703 void* hashmap_first(Hashmap *h) {
708 if (!h->iterate_list_head)
711 return h->iterate_list_head->value;
714 void* hashmap_first_key(Hashmap *h) {
719 if (!h->iterate_list_head)
722 return (void*) h->iterate_list_head->key;
725 void* hashmap_steal_first(Hashmap *h) {
731 if (!h->iterate_list_head)
734 data = h->iterate_list_head->value;
735 remove_entry(h, h->iterate_list_head);
740 void* hashmap_steal_first_key(Hashmap *h) {
746 if (!h->iterate_list_head)
749 key = (void*) h->iterate_list_head->key;
750 remove_entry(h, h->iterate_list_head);
755 unsigned hashmap_size(Hashmap *h) {
763 unsigned hashmap_buckets(Hashmap *h) {
771 bool hashmap_isempty(Hashmap *h) {
776 return h->n_entries == 0;
779 int hashmap_merge(Hashmap *h, Hashmap *other) {
780 struct hashmap_entry *e;
787 for (e = other->iterate_list_head; e; e = e->iterate_next) {
790 r = hashmap_put(h, e->key, e->value);
791 if (r < 0 && r != -EEXIST)
798 void hashmap_move(Hashmap *h, Hashmap *other) {
799 struct hashmap_entry *e, *n;
803 /* The same as hashmap_merge(), but every new item from other
804 * is moved to h. This function is guaranteed to succeed. */
809 for (e = other->iterate_list_head; e; e = n) {
810 unsigned h_hash, other_hash;
814 h_hash = bucket_hash(h, e->key);
815 if (hash_scan(h, h_hash, e->key))
818 other_hash = bucket_hash(other, e->key);
819 unlink_entry(other, e, other_hash);
820 link_entry(h, e, h_hash);
824 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
825 unsigned h_hash, other_hash;
826 struct hashmap_entry *e;
830 h_hash = bucket_hash(h, key);
831 if (hash_scan(h, h_hash, key))
837 other_hash = bucket_hash(other, key);
838 e = hash_scan(other, other_hash, key);
842 unlink_entry(other, e, other_hash);
843 link_entry(h, e, h_hash);
848 Hashmap *hashmap_copy(Hashmap *h) {
853 copy = hashmap_new(h->hash_ops);
857 if (hashmap_merge(copy, h) < 0) {
865 char **hashmap_get_strv(Hashmap *h) {
871 sv = new(char*, h->n_entries+1);
876 HASHMAP_FOREACH(item, h, it)
883 void *hashmap_next(Hashmap *h, const void *key) {
885 struct hashmap_entry *e;
892 hash = bucket_hash(h, key);
893 e = hash_scan(h, hash, key);