1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
33 #define INITIAL_N_BUCKETS 31
35 struct hashmap_entry {
38 struct hashmap_entry *bucket_next, *bucket_previous;
39 struct hashmap_entry *iterate_next, *iterate_previous;
43 const struct hash_ops *hash_ops;
45 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
47 struct hashmap_entry ** buckets;
48 unsigned n_buckets, n_entries;
50 uint8_t hash_key[HASH_KEY_SIZE];
56 struct hashmap_entry *initial_buckets[INITIAL_N_BUCKETS];
59 static DEFINE_MEMPOOL(hashmap_pool, struct hashmap_tile, 8);
60 static DEFINE_MEMPOOL(hashmap_entry_pool, struct hashmap_entry, 64);
64 __attribute__((destructor)) static void cleanup_pools(void) {
65 /* Be nice to valgrind */
67 mempool_drop(&hashmap_entry_pool);
68 mempool_drop(&hashmap_pool);
73 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
75 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
76 return (unsigned long) u;
79 int string_compare_func(const void *a, const void *b) {
83 const struct hash_ops string_hash_ops = {
84 .hash = string_hash_func,
85 .compare = string_compare_func
88 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
90 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
91 return (unsigned long) u;
94 int trivial_compare_func(const void *a, const void *b) {
95 return a < b ? -1 : (a > b ? 1 : 0);
98 const struct hash_ops trivial_hash_ops = {
99 .hash = trivial_hash_func,
100 .compare = trivial_compare_func
103 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
105 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
106 return (unsigned long) u;
109 int uint64_compare_func(const void *_a, const void *_b) {
111 a = *(const uint64_t*) _a;
112 b = *(const uint64_t*) _b;
113 return a < b ? -1 : (a > b ? 1 : 0);
116 const struct hash_ops uint64_hash_ops = {
117 .hash = uint64_hash_func,
118 .compare = uint64_compare_func
121 #if SIZEOF_DEV_T != 8
122 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
124 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
125 return (unsigned long) u;
128 int devt_compare_func(const void *_a, const void *_b) {
130 a = *(const dev_t*) _a;
131 b = *(const dev_t*) _b;
132 return a < b ? -1 : (a > b ? 1 : 0);
135 const struct hash_ops devt_hash_ops = {
136 .hash = devt_hash_func,
137 .compare = devt_compare_func
141 static unsigned bucket_hash(Hashmap *h, const void *p) {
142 return (unsigned) (h->hash_ops->hash(p, h->hash_key) % h->n_buckets);
145 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
146 static uint8_t current[HASH_KEY_SIZE];
147 static bool current_initialized = false;
149 /* Returns a hash function key to use. In order to keep things
150 * fast we will not generate a new key each time we allocate a
151 * new hash table. Instead, we'll just reuse the most recently
152 * generated one, except if we never generated one or when we
153 * are rehashing an entire hash table because we reached a
156 if (!current_initialized || !reuse_is_ok) {
157 random_bytes(current, sizeof(current));
158 current_initialized = true;
161 memcpy(hash_key, current, sizeof(current));
164 Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
166 struct hashmap_tile *ht;
169 b = is_main_thread();
172 ht = mempool_alloc_tile(&hashmap_pool);
176 memzero(ht, sizeof(struct hashmap_tile));
178 ht = malloc0(sizeof(struct hashmap_tile));
185 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
187 h->n_buckets = INITIAL_N_BUCKETS;
189 h->iterate_list_head = h->iterate_list_tail = NULL;
191 h->buckets = ht->initial_buckets;
195 get_hash_key(h->hash_key, true);
200 int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
208 q = hashmap_new(hash_ops);
216 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
220 /* Insert into hash table */
221 e->bucket_next = h->buckets[hash];
222 e->bucket_previous = NULL;
223 if (h->buckets[hash])
224 h->buckets[hash]->bucket_previous = e;
225 h->buckets[hash] = e;
227 /* Insert into iteration list */
228 e->iterate_previous = h->iterate_list_tail;
229 e->iterate_next = NULL;
230 if (h->iterate_list_tail) {
231 assert(h->iterate_list_head);
232 h->iterate_list_tail->iterate_next = e;
234 assert(!h->iterate_list_head);
235 h->iterate_list_head = e;
237 h->iterate_list_tail = e;
240 assert(h->n_entries >= 1);
243 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
247 /* Remove from iteration list */
249 e->iterate_next->iterate_previous = e->iterate_previous;
251 h->iterate_list_tail = e->iterate_previous;
253 if (e->iterate_previous)
254 e->iterate_previous->iterate_next = e->iterate_next;
256 h->iterate_list_head = e->iterate_next;
258 /* Remove from hash table bucket list */
260 e->bucket_next->bucket_previous = e->bucket_previous;
262 if (e->bucket_previous)
263 e->bucket_previous->bucket_next = e->bucket_next;
265 h->buckets[hash] = e->bucket_next;
267 assert(h->n_entries >= 1);
271 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
277 hash = bucket_hash(h, e->key);
278 unlink_entry(h, e, hash);
281 mempool_free_tile(&hashmap_entry_pool, e);
286 void hashmap_free(Hashmap*h) {
288 /* Free the hashmap, but nothing in it */
295 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
299 mempool_free_tile(&hashmap_pool, container_of(h, struct hashmap_tile, h));
304 void hashmap_free_free(Hashmap *h) {
306 /* Free the hashmap and all data objects in it, but not the
312 hashmap_clear_free(h);
316 void hashmap_free_free_free(Hashmap *h) {
318 /* Free the hashmap and all data and key objects in it */
323 hashmap_clear_free_free(h);
327 void hashmap_clear(Hashmap *h) {
331 while (h->iterate_list_head)
332 remove_entry(h, h->iterate_list_head);
335 void hashmap_clear_free(Hashmap *h) {
341 while ((p = hashmap_steal_first(h)))
345 void hashmap_clear_free_free(Hashmap *h) {
349 while (h->iterate_list_head) {
352 a = h->iterate_list_head->value;
353 b = (void*) h->iterate_list_head->key;
354 remove_entry(h, h->iterate_list_head);
360 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
361 struct hashmap_entry *e;
363 assert(hash < h->n_buckets);
365 for (e = h->buckets[hash]; e; e = e->bucket_next)
366 if (h->hash_ops->compare(e->key, key) == 0)
372 static int resize_buckets(Hashmap *h, unsigned entries_add) {
373 struct hashmap_entry **n, *i;
374 unsigned m, new_n_entries, new_n_buckets;
375 uint8_t nkey[HASH_KEY_SIZE];
379 new_n_entries = h->n_entries + entries_add;
382 if (_unlikely_(new_n_entries < entries_add || new_n_entries > UINT_MAX / 4))
385 new_n_buckets = new_n_entries * 4 / 3;
387 if (_likely_(new_n_buckets <= h->n_buckets))
390 /* Increase by four at least */
391 m = MAX((h->n_entries+1)*4-1, new_n_buckets);
393 /* If we hit OOM we simply risk packed hashmaps... */
394 n = new0(struct hashmap_entry*, m);
398 /* Let's use a different randomized hash key for the
399 * extension, so that people cannot guess what we are using
401 get_hash_key(nkey, false);
403 for (i = h->iterate_list_head; i; i = i->iterate_next) {
404 unsigned long old_bucket, new_bucket;
406 old_bucket = h->hash_ops->hash(i->key, h->hash_key) % h->n_buckets;
408 /* First, drop from old bucket table */
410 i->bucket_next->bucket_previous = i->bucket_previous;
412 if (i->bucket_previous)
413 i->bucket_previous->bucket_next = i->bucket_next;
415 h->buckets[old_bucket] = i->bucket_next;
417 /* Then, add to new backet table */
418 new_bucket = h->hash_ops->hash(i->key, nkey) % m;
420 i->bucket_next = n[new_bucket];
421 i->bucket_previous = NULL;
423 n[new_bucket]->bucket_previous = i;
427 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
433 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
438 static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash) {
439 /* For when we know no such entry exists yet */
441 struct hashmap_entry *e;
443 if (resize_buckets(h, 1) > 0)
444 hash = bucket_hash(h, key);
447 e = mempool_alloc_tile(&hashmap_entry_pool);
449 e = new(struct hashmap_entry, 1);
457 link_entry(h, e, hash);
462 int hashmap_put(Hashmap *h, const void *key, void *value) {
463 struct hashmap_entry *e;
468 hash = bucket_hash(h, key);
469 e = hash_scan(h, hash, key);
471 if (e->value == value)
476 return __hashmap_put(h, key, value, hash);
479 int hashmap_replace(Hashmap *h, const void *key, void *value) {
480 struct hashmap_entry *e;
485 hash = bucket_hash(h, key);
486 e = hash_scan(h, hash, key);
493 return __hashmap_put(h, key, value, hash);
496 int hashmap_update(Hashmap *h, const void *key, void *value) {
497 struct hashmap_entry *e;
502 hash = bucket_hash(h, key);
503 e = hash_scan(h, hash, key);
511 void* hashmap_get(Hashmap *h, const void *key) {
513 struct hashmap_entry *e;
518 hash = bucket_hash(h, key);
519 e = hash_scan(h, hash, key);
526 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
528 struct hashmap_entry *e;
533 hash = bucket_hash(h, key);
534 e = hash_scan(h, hash, key);
539 *key2 = (void*) e->key;
544 bool hashmap_contains(Hashmap *h, const void *key) {
550 hash = bucket_hash(h, key);
551 return !!hash_scan(h, hash, key);
554 void* hashmap_remove(Hashmap *h, const void *key) {
555 struct hashmap_entry *e;
562 hash = bucket_hash(h, key);
563 e = hash_scan(h, hash, key);
573 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
574 struct hashmap_entry *e;
584 hash = bucket_hash(h, key);
585 e = hash_scan(h, hash, key);
594 *rkey = (void*) e->key;
601 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
602 struct hashmap_entry *e;
603 unsigned old_hash, new_hash;
608 old_hash = bucket_hash(h, old_key);
609 e = hash_scan(h, old_hash, old_key);
613 new_hash = bucket_hash(h, new_key);
614 if (hash_scan(h, new_hash, new_key))
617 unlink_entry(h, e, old_hash);
622 link_entry(h, e, new_hash);
627 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
628 struct hashmap_entry *e, *k;
629 unsigned old_hash, new_hash;
634 old_hash = bucket_hash(h, old_key);
635 e = hash_scan(h, old_hash, old_key);
639 new_hash = bucket_hash(h, new_key);
640 k = hash_scan(h, new_hash, new_key);
645 unlink_entry(h, e, old_hash);
650 link_entry(h, e, new_hash);
655 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
656 struct hashmap_entry *e;
662 hash = bucket_hash(h, key);
664 e = hash_scan(h, hash, key);
668 if (e->value != value)
676 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
677 struct hashmap_entry *e;
684 if (*i == ITERATOR_LAST)
687 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
690 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
693 *i = (Iterator) e->iterate_next;
711 void* hashmap_first(Hashmap *h) {
716 if (!h->iterate_list_head)
719 return h->iterate_list_head->value;
722 void* hashmap_first_key(Hashmap *h) {
727 if (!h->iterate_list_head)
730 return (void*) h->iterate_list_head->key;
733 void* hashmap_steal_first(Hashmap *h) {
739 if (!h->iterate_list_head)
742 data = h->iterate_list_head->value;
743 remove_entry(h, h->iterate_list_head);
748 void* hashmap_steal_first_key(Hashmap *h) {
754 if (!h->iterate_list_head)
757 key = (void*) h->iterate_list_head->key;
758 remove_entry(h, h->iterate_list_head);
763 unsigned hashmap_size(Hashmap *h) {
771 unsigned hashmap_buckets(Hashmap *h) {
779 bool hashmap_isempty(Hashmap *h) {
784 return h->n_entries == 0;
787 int hashmap_merge(Hashmap *h, Hashmap *other) {
788 struct hashmap_entry *e;
795 for (e = other->iterate_list_head; e; e = e->iterate_next) {
798 r = hashmap_put(h, e->key, e->value);
799 if (r < 0 && r != -EEXIST)
806 int hashmap_reserve(Hashmap *h, unsigned entries_add) {
811 r = resize_buckets(h, entries_add);
818 int hashmap_move(Hashmap *h, Hashmap *other) {
819 struct hashmap_entry *e, *n;
823 /* The same as hashmap_merge(), but every new item from other
829 for (e = other->iterate_list_head; e; e = n) {
830 unsigned h_hash, other_hash;
834 h_hash = bucket_hash(h, e->key);
835 if (hash_scan(h, h_hash, e->key))
838 other_hash = bucket_hash(other, e->key);
839 unlink_entry(other, e, other_hash);
840 link_entry(h, e, h_hash);
846 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
847 unsigned h_hash, other_hash;
848 struct hashmap_entry *e;
852 h_hash = bucket_hash(h, key);
853 if (hash_scan(h, h_hash, key))
859 other_hash = bucket_hash(other, key);
860 e = hash_scan(other, other_hash, key);
864 unlink_entry(other, e, other_hash);
865 link_entry(h, e, h_hash);
870 Hashmap *hashmap_copy(Hashmap *h) {
875 copy = hashmap_new(h->hash_ops);
879 if (hashmap_merge(copy, h) < 0) {
887 char **hashmap_get_strv(Hashmap *h) {
893 sv = new(char*, h->n_entries+1);
898 HASHMAP_FOREACH(item, h, it)
905 void *hashmap_next(Hashmap *h, const void *key) {
907 struct hashmap_entry *e;
914 hash = bucket_hash(h, key);
915 e = hash_scan(h, hash, key);