1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #ifdef HAVE_SYS_AUXV_H
35 #define INITIAL_N_BUCKETS 31
37 struct hashmap_entry {
40 struct hashmap_entry *bucket_next, *bucket_previous;
41 struct hashmap_entry *iterate_next, *iterate_previous;
45 hash_func_t hash_func;
46 compare_func_t compare_func;
48 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 struct hashmap_entry ** buckets;
51 unsigned n_buckets, n_entries;
63 static struct pool *first_hashmap_pool = NULL;
64 static void *first_hashmap_tile = NULL;
66 static struct pool *first_entry_pool = NULL;
67 static void *first_entry_tile = NULL;
69 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) {
72 /* When a tile is released we add it to the list and simply
73 * place the next pointer at its offset 0. */
75 assert(tile_size >= sizeof(void*));
81 *first_tile = * (void**) (*first_tile);
85 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
90 n = *first_pool ? (*first_pool)->n_tiles : 0;
92 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
93 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
99 p->next = *first_pool;
106 i = (*first_pool)->n_used++;
108 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
111 static void deallocate_tile(void **first_tile, void *p) {
112 * (void**) p = *first_tile;
118 static void drop_pool(struct pool *p) {
127 __attribute__((destructor)) static void cleanup_pool(void) {
128 /* Be nice to valgrind */
130 drop_pool(first_hashmap_pool);
131 drop_pool(first_entry_pool);
136 unsigned string_hash_func(const void *p) {
137 unsigned hash = 5381;
138 const signed char *c;
140 /* DJB's hash function */
143 hash = (hash << 5) + hash + (unsigned) *c;
148 int string_compare_func(const void *a, const void *b) {
152 unsigned trivial_hash_func(const void *p) {
153 return PTR_TO_UINT(p);
156 int trivial_compare_func(const void *a, const void *b) {
157 return a < b ? -1 : (a > b ? 1 : 0);
160 unsigned uint64_hash_func(const void *p) {
163 assert_cc(sizeof(uint64_t) == 2*sizeof(unsigned));
165 u = *(const uint64_t*) p;
167 return (unsigned) ((u >> 32) ^ u);
170 int uint64_compare_func(const void *_a, const void *_b) {
173 a = *(const uint64_t*) _a;
174 b = *(const uint64_t*) _b;
176 return a < b ? -1 : (a > b ? 1 : 0);
179 static unsigned bucket_hash(Hashmap *h, const void *p) {
180 return (h->hash_func(p) ^ h->random_xor) % h->n_buckets;
183 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
189 b = is_main_thread();
191 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
194 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size);
206 h->hash_func = hash_func ? hash_func : trivial_hash_func;
207 h->compare_func = compare_func ? compare_func : trivial_compare_func;
209 h->n_buckets = INITIAL_N_BUCKETS;
211 h->iterate_list_head = h->iterate_list_tail = NULL;
213 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
217 /* Let's randomize our hash functions a bit so that they are
218 * harder to guess for clients. For this, start out by cheaply
219 * using some bits the kernel passed into the process using
220 * the auxiliary vector. If the hashmap grows later on we will
221 * rehash everything using a new random XOR mask from
223 #ifdef HAVE_SYS_AUXV_H
224 auxv = (void*) getauxval(AT_RANDOM);
225 h->random_xor = auxv ? *(unsigned*) auxv : random_u();
227 h->random_xor = random_u();
233 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
241 q = hashmap_new(hash_func, compare_func);
249 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
253 /* Insert into hash table */
254 e->bucket_next = h->buckets[hash];
255 e->bucket_previous = NULL;
256 if (h->buckets[hash])
257 h->buckets[hash]->bucket_previous = e;
258 h->buckets[hash] = e;
260 /* Insert into iteration list */
261 e->iterate_previous = h->iterate_list_tail;
262 e->iterate_next = NULL;
263 if (h->iterate_list_tail) {
264 assert(h->iterate_list_head);
265 h->iterate_list_tail->iterate_next = e;
267 assert(!h->iterate_list_head);
268 h->iterate_list_head = e;
270 h->iterate_list_tail = e;
273 assert(h->n_entries >= 1);
276 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
280 /* Remove from iteration list */
282 e->iterate_next->iterate_previous = e->iterate_previous;
284 h->iterate_list_tail = e->iterate_previous;
286 if (e->iterate_previous)
287 e->iterate_previous->iterate_next = e->iterate_next;
289 h->iterate_list_head = e->iterate_next;
291 /* Remove from hash table bucket list */
293 e->bucket_next->bucket_previous = e->bucket_previous;
295 if (e->bucket_previous)
296 e->bucket_previous->bucket_next = e->bucket_next;
298 h->buckets[hash] = e->bucket_next;
300 assert(h->n_entries >= 1);
304 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
310 hash = bucket_hash(h, e->key);
311 unlink_entry(h, e, hash);
314 deallocate_tile(&first_entry_tile, e);
319 void hashmap_free(Hashmap*h) {
321 /* Free the hashmap, but nothing in it */
328 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
332 deallocate_tile(&first_hashmap_tile, h);
337 void hashmap_free_free(Hashmap *h) {
339 /* Free the hashmap and all data objects in it, but not the
345 hashmap_clear_free(h);
349 void hashmap_free_free_free(Hashmap *h) {
351 /* Free the hashmap and all data and key objects in it */
356 hashmap_clear_free_free(h);
360 void hashmap_clear(Hashmap *h) {
364 while (h->iterate_list_head)
365 remove_entry(h, h->iterate_list_head);
368 void hashmap_clear_free(Hashmap *h) {
374 while ((p = hashmap_steal_first(h)))
378 void hashmap_clear_free_free(Hashmap *h) {
382 while (h->iterate_list_head) {
385 a = h->iterate_list_head->value;
386 b = (void*) h->iterate_list_head->key;
387 remove_entry(h, h->iterate_list_head);
393 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
394 struct hashmap_entry *e;
396 assert(hash < h->n_buckets);
398 for (e = h->buckets[hash]; e; e = e->bucket_next)
399 if (h->compare_func(e->key, key) == 0)
405 static bool resize_buckets(Hashmap *h) {
406 struct hashmap_entry **n, *i;
411 if (_likely_(h->n_entries*4 < h->n_buckets*3))
414 /* Increase by four */
415 m = (h->n_entries+1)*4-1;
417 /* If we hit OOM we simply risk packed hashmaps... */
418 n = new0(struct hashmap_entry*, m);
422 /* Let's use a different randomized xor value for the
423 * extension, so that people cannot guess what we are using
427 for (i = h->iterate_list_head; i; i = i->iterate_next) {
430 hash = h->hash_func(i->key);
432 /* First, drop from old bucket table */
434 i->bucket_next->bucket_previous = i->bucket_previous;
436 if (i->bucket_previous)
437 i->bucket_previous->bucket_next = i->bucket_next;
439 h->buckets[(hash ^ h->random_xor) % h->n_buckets] = i->bucket_next;
441 /* Then, add to new backet table */
442 x = (hash ^ nxor) % m;
444 i->bucket_next = n[x];
445 i->bucket_previous = NULL;
447 n[x]->bucket_previous = i;
451 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
456 h->random_xor = nxor;
461 int hashmap_put(Hashmap *h, const void *key, void *value) {
462 struct hashmap_entry *e;
467 hash = bucket_hash(h, key);
468 e = hash_scan(h, hash, key);
470 if (e->value == value)
475 if (resize_buckets(h))
476 hash = bucket_hash(h, key);
479 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry));
481 e = new(struct hashmap_entry, 1);
489 link_entry(h, e, hash);
494 int hashmap_replace(Hashmap *h, const void *key, void *value) {
495 struct hashmap_entry *e;
500 hash = bucket_hash(h, key);
501 e = hash_scan(h, hash, key);
508 return hashmap_put(h, key, value);
511 int hashmap_update(Hashmap *h, const void *key, void *value) {
512 struct hashmap_entry *e;
517 hash = bucket_hash(h, key);
518 e = hash_scan(h, hash, key);
526 void* hashmap_get(Hashmap *h, const void *key) {
528 struct hashmap_entry *e;
533 hash = bucket_hash(h, key);
534 e = hash_scan(h, hash, key);
541 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
543 struct hashmap_entry *e;
548 hash = bucket_hash(h, key);
549 e = hash_scan(h, hash, key);
554 *key2 = (void*) e->key;
559 bool hashmap_contains(Hashmap *h, const void *key) {
565 hash = bucket_hash(h, key);
566 return !!hash_scan(h, hash, key);
569 void* hashmap_remove(Hashmap *h, const void *key) {
570 struct hashmap_entry *e;
577 hash = bucket_hash(h, key);
578 e = hash_scan(h, hash, key);
588 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
589 struct hashmap_entry *e;
590 unsigned old_hash, new_hash;
595 old_hash = bucket_hash(h, old_key);
596 e = hash_scan(h, old_hash, old_key);
600 new_hash = bucket_hash(h, new_key);
601 if (hash_scan(h, new_hash, new_key))
604 unlink_entry(h, e, old_hash);
609 link_entry(h, e, new_hash);
614 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
615 struct hashmap_entry *e, *k;
616 unsigned old_hash, new_hash;
621 old_hash = bucket_hash(h, old_key);
622 e = hash_scan(h, old_hash, old_key);
626 new_hash = bucket_hash(h, new_key);
627 k = hash_scan(h, new_hash, new_key);
632 unlink_entry(h, e, old_hash);
637 link_entry(h, e, new_hash);
642 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
643 struct hashmap_entry *e;
649 hash = bucket_hash(h, key);
651 e = hash_scan(h, hash, key);
655 if (e->value != value)
663 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
664 struct hashmap_entry *e;
671 if (*i == ITERATOR_LAST)
674 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
677 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
680 *i = (Iterator) e->iterate_next;
698 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
699 struct hashmap_entry *e;
706 if (*i == ITERATOR_FIRST)
709 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
712 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
714 if (e->iterate_previous)
715 *i = (Iterator) e->iterate_previous;
733 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
735 struct hashmap_entry *e;
740 hash = bucket_hash(h, key);
742 e = hash_scan(h, hash, key);
751 void* hashmap_first(Hashmap *h) {
756 if (!h->iterate_list_head)
759 return h->iterate_list_head->value;
762 void* hashmap_first_key(Hashmap *h) {
767 if (!h->iterate_list_head)
770 return (void*) h->iterate_list_head->key;
773 void* hashmap_last(Hashmap *h) {
778 if (!h->iterate_list_tail)
781 return h->iterate_list_tail->value;
784 void* hashmap_steal_first(Hashmap *h) {
790 if (!h->iterate_list_head)
793 data = h->iterate_list_head->value;
794 remove_entry(h, h->iterate_list_head);
799 void* hashmap_steal_first_key(Hashmap *h) {
805 if (!h->iterate_list_head)
808 key = (void*) h->iterate_list_head->key;
809 remove_entry(h, h->iterate_list_head);
814 unsigned hashmap_size(Hashmap *h) {
822 unsigned hashmap_buckets(Hashmap *h) {
830 bool hashmap_isempty(Hashmap *h) {
835 return h->n_entries == 0;
838 int hashmap_merge(Hashmap *h, Hashmap *other) {
839 struct hashmap_entry *e;
846 for (e = other->iterate_list_head; e; e = e->iterate_next) {
849 r = hashmap_put(h, e->key, e->value);
850 if (r < 0 && r != -EEXIST)
857 void hashmap_move(Hashmap *h, Hashmap *other) {
858 struct hashmap_entry *e, *n;
862 /* The same as hashmap_merge(), but every new item from other
863 * is moved to h. This function is guaranteed to succeed. */
868 for (e = other->iterate_list_head; e; e = n) {
869 unsigned h_hash, other_hash;
873 h_hash = bucket_hash(h, e->key);
874 if (hash_scan(h, h_hash, e->key))
877 other_hash = bucket_hash(other, e->key);
878 unlink_entry(other, e, other_hash);
879 link_entry(h, e, h_hash);
883 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
884 unsigned h_hash, other_hash;
885 struct hashmap_entry *e;
892 h_hash = bucket_hash(h, key);
893 if (hash_scan(h, h_hash, key))
896 other_hash = bucket_hash(other, key);
897 e = hash_scan(other, other_hash, key);
901 unlink_entry(other, e, other_hash);
902 link_entry(h, e, h_hash);
907 Hashmap *hashmap_copy(Hashmap *h) {
912 copy = hashmap_new(h->hash_func, h->compare_func);
916 if (hashmap_merge(copy, h) < 0) {
924 char **hashmap_get_strv(Hashmap *h) {
930 sv = new(char*, h->n_entries+1);
935 HASHMAP_FOREACH(item, h, it)
942 void *hashmap_next(Hashmap *h, const void *key) {
944 struct hashmap_entry *e;
952 hash = bucket_hash(h, key);
953 e = hash_scan(h, hash, key);