1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #ifdef HAVE_SYS_AUXV_H
35 #define INITIAL_N_BUCKETS 31
37 struct hashmap_entry {
40 struct hashmap_entry *bucket_next, *bucket_previous;
41 struct hashmap_entry *iterate_next, *iterate_previous;
45 hash_func_t hash_func;
46 compare_func_t compare_func;
48 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 struct hashmap_entry ** buckets;
51 unsigned n_buckets, n_entries;
63 static struct pool *first_hashmap_pool = NULL;
64 static void *first_hashmap_tile = NULL;
66 static struct pool *first_entry_pool = NULL;
67 static void *first_entry_tile = NULL;
69 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
72 /* When a tile is released we add it to the list and simply
73 * place the next pointer at its offset 0. */
75 assert(tile_size >= sizeof(void*));
82 *first_tile = * (void**) (*first_tile);
86 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
91 n = *first_pool ? (*first_pool)->n_tiles : 0;
92 n = MAX(at_least, n * 2);
93 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
94 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
100 p->next = *first_pool;
107 i = (*first_pool)->n_used++;
109 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
112 static void deallocate_tile(void **first_tile, void *p) {
113 * (void**) p = *first_tile;
119 static void drop_pool(struct pool *p) {
128 __attribute__((destructor)) static void cleanup_pool(void) {
129 /* Be nice to valgrind */
131 drop_pool(first_hashmap_pool);
132 drop_pool(first_entry_pool);
137 unsigned string_hash_func(const void *p) {
138 unsigned hash = 5381;
139 const signed char *c;
141 /* DJB's hash function */
144 hash = (hash << 5) + hash + (unsigned) *c;
149 int string_compare_func(const void *a, const void *b) {
153 unsigned trivial_hash_func(const void *p) {
154 return PTR_TO_UINT(p);
157 int trivial_compare_func(const void *a, const void *b) {
158 return a < b ? -1 : (a > b ? 1 : 0);
161 unsigned uint64_hash_func(const void *p) {
164 assert_cc(sizeof(uint64_t) == 2*sizeof(unsigned));
166 u = *(const uint64_t*) p;
168 return (unsigned) ((u >> 32) ^ u);
171 int uint64_compare_func(const void *_a, const void *_b) {
174 a = *(const uint64_t*) _a;
175 b = *(const uint64_t*) _b;
177 return a < b ? -1 : (a > b ? 1 : 0);
180 static unsigned bucket_hash(Hashmap *h, const void *p) {
181 return (h->hash_func(p) ^ h->random_xor) % h->n_buckets;
184 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
190 b = is_main_thread();
192 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
195 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
207 h->hash_func = hash_func ? hash_func : trivial_hash_func;
208 h->compare_func = compare_func ? compare_func : trivial_compare_func;
210 h->n_buckets = INITIAL_N_BUCKETS;
212 h->iterate_list_head = h->iterate_list_tail = NULL;
214 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
218 /* Let's randomize our hash functions a bit so that they are
219 * harder to guess for clients. For this, start out by cheaply
220 * using some bits the kernel passed into the process using
221 * the auxiliary vector. If the hashmap grows later on we will
222 * rehash everything using a new random XOR mask from
224 #ifdef HAVE_SYS_AUXV_H
225 auxv = (void*) getauxval(AT_RANDOM);
226 h->random_xor = auxv ? *(unsigned*) auxv : random_u();
228 h->random_xor = random_u();
234 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
242 q = hashmap_new(hash_func, compare_func);
250 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
254 /* Insert into hash table */
255 e->bucket_next = h->buckets[hash];
256 e->bucket_previous = NULL;
257 if (h->buckets[hash])
258 h->buckets[hash]->bucket_previous = e;
259 h->buckets[hash] = e;
261 /* Insert into iteration list */
262 e->iterate_previous = h->iterate_list_tail;
263 e->iterate_next = NULL;
264 if (h->iterate_list_tail) {
265 assert(h->iterate_list_head);
266 h->iterate_list_tail->iterate_next = e;
268 assert(!h->iterate_list_head);
269 h->iterate_list_head = e;
271 h->iterate_list_tail = e;
274 assert(h->n_entries >= 1);
277 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
281 /* Remove from iteration list */
283 e->iterate_next->iterate_previous = e->iterate_previous;
285 h->iterate_list_tail = e->iterate_previous;
287 if (e->iterate_previous)
288 e->iterate_previous->iterate_next = e->iterate_next;
290 h->iterate_list_head = e->iterate_next;
292 /* Remove from hash table bucket list */
294 e->bucket_next->bucket_previous = e->bucket_previous;
296 if (e->bucket_previous)
297 e->bucket_previous->bucket_next = e->bucket_next;
299 h->buckets[hash] = e->bucket_next;
301 assert(h->n_entries >= 1);
305 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
311 hash = bucket_hash(h, e->key);
312 unlink_entry(h, e, hash);
315 deallocate_tile(&first_entry_tile, e);
320 void hashmap_free(Hashmap*h) {
322 /* Free the hashmap, but nothing in it */
329 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
333 deallocate_tile(&first_hashmap_tile, h);
338 void hashmap_free_free(Hashmap *h) {
340 /* Free the hashmap and all data objects in it, but not the
346 hashmap_clear_free(h);
350 void hashmap_free_free_free(Hashmap *h) {
352 /* Free the hashmap and all data and key objects in it */
357 hashmap_clear_free_free(h);
361 void hashmap_clear(Hashmap *h) {
365 while (h->iterate_list_head)
366 remove_entry(h, h->iterate_list_head);
369 void hashmap_clear_free(Hashmap *h) {
375 while ((p = hashmap_steal_first(h)))
379 void hashmap_clear_free_free(Hashmap *h) {
383 while (h->iterate_list_head) {
386 a = h->iterate_list_head->value;
387 b = (void*) h->iterate_list_head->key;
388 remove_entry(h, h->iterate_list_head);
394 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
395 struct hashmap_entry *e;
397 assert(hash < h->n_buckets);
399 for (e = h->buckets[hash]; e; e = e->bucket_next)
400 if (h->compare_func(e->key, key) == 0)
406 static bool resize_buckets(Hashmap *h) {
407 struct hashmap_entry **n, *i;
412 if (_likely_(h->n_entries*4 < h->n_buckets*3))
415 /* Increase by four */
416 m = (h->n_entries+1)*4-1;
418 /* If we hit OOM we simply risk packed hashmaps... */
419 n = new0(struct hashmap_entry*, m);
423 /* Let's use a different randomized xor value for the
424 * extension, so that people cannot guess what we are using
428 for (i = h->iterate_list_head; i; i = i->iterate_next) {
431 hash = h->hash_func(i->key);
433 /* First, drop from old bucket table */
435 i->bucket_next->bucket_previous = i->bucket_previous;
437 if (i->bucket_previous)
438 i->bucket_previous->bucket_next = i->bucket_next;
440 h->buckets[(hash ^ h->random_xor) % h->n_buckets] = i->bucket_next;
442 /* Then, add to new backet table */
443 x = (hash ^ nxor) % m;
445 i->bucket_next = n[x];
446 i->bucket_previous = NULL;
448 n[x]->bucket_previous = i;
452 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
457 h->random_xor = nxor;
462 int hashmap_put(Hashmap *h, const void *key, void *value) {
463 struct hashmap_entry *e;
468 hash = bucket_hash(h, key);
469 e = hash_scan(h, hash, key);
471 if (e->value == value)
476 if (resize_buckets(h))
477 hash = bucket_hash(h, key);
480 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
482 e = new(struct hashmap_entry, 1);
490 link_entry(h, e, hash);
495 int hashmap_replace(Hashmap *h, const void *key, void *value) {
496 struct hashmap_entry *e;
501 hash = bucket_hash(h, key);
502 e = hash_scan(h, hash, key);
509 return hashmap_put(h, key, value);
512 int hashmap_update(Hashmap *h, const void *key, void *value) {
513 struct hashmap_entry *e;
518 hash = bucket_hash(h, key);
519 e = hash_scan(h, hash, key);
527 void* hashmap_get(Hashmap *h, const void *key) {
529 struct hashmap_entry *e;
534 hash = bucket_hash(h, key);
535 e = hash_scan(h, hash, key);
542 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
544 struct hashmap_entry *e;
549 hash = bucket_hash(h, key);
550 e = hash_scan(h, hash, key);
555 *key2 = (void*) e->key;
560 bool hashmap_contains(Hashmap *h, const void *key) {
566 hash = bucket_hash(h, key);
567 return !!hash_scan(h, hash, key);
570 void* hashmap_remove(Hashmap *h, const void *key) {
571 struct hashmap_entry *e;
578 hash = bucket_hash(h, key);
579 e = hash_scan(h, hash, key);
589 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
590 struct hashmap_entry *e;
591 unsigned old_hash, new_hash;
596 old_hash = bucket_hash(h, old_key);
597 e = hash_scan(h, old_hash, old_key);
601 new_hash = bucket_hash(h, new_key);
602 if (hash_scan(h, new_hash, new_key))
605 unlink_entry(h, e, old_hash);
610 link_entry(h, e, new_hash);
615 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
616 struct hashmap_entry *e, *k;
617 unsigned old_hash, new_hash;
622 old_hash = bucket_hash(h, old_key);
623 e = hash_scan(h, old_hash, old_key);
627 new_hash = bucket_hash(h, new_key);
628 k = hash_scan(h, new_hash, new_key);
633 unlink_entry(h, e, old_hash);
638 link_entry(h, e, new_hash);
643 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
644 struct hashmap_entry *e;
650 hash = bucket_hash(h, key);
652 e = hash_scan(h, hash, key);
656 if (e->value != value)
664 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
665 struct hashmap_entry *e;
672 if (*i == ITERATOR_LAST)
675 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
678 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
681 *i = (Iterator) e->iterate_next;
699 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
700 struct hashmap_entry *e;
707 if (*i == ITERATOR_FIRST)
710 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
713 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
715 if (e->iterate_previous)
716 *i = (Iterator) e->iterate_previous;
734 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
736 struct hashmap_entry *e;
741 hash = bucket_hash(h, key);
743 e = hash_scan(h, hash, key);
752 void* hashmap_first(Hashmap *h) {
757 if (!h->iterate_list_head)
760 return h->iterate_list_head->value;
763 void* hashmap_first_key(Hashmap *h) {
768 if (!h->iterate_list_head)
771 return (void*) h->iterate_list_head->key;
774 void* hashmap_last(Hashmap *h) {
779 if (!h->iterate_list_tail)
782 return h->iterate_list_tail->value;
785 void* hashmap_steal_first(Hashmap *h) {
791 if (!h->iterate_list_head)
794 data = h->iterate_list_head->value;
795 remove_entry(h, h->iterate_list_head);
800 void* hashmap_steal_first_key(Hashmap *h) {
806 if (!h->iterate_list_head)
809 key = (void*) h->iterate_list_head->key;
810 remove_entry(h, h->iterate_list_head);
815 unsigned hashmap_size(Hashmap *h) {
823 unsigned hashmap_buckets(Hashmap *h) {
831 bool hashmap_isempty(Hashmap *h) {
836 return h->n_entries == 0;
839 int hashmap_merge(Hashmap *h, Hashmap *other) {
840 struct hashmap_entry *e;
847 for (e = other->iterate_list_head; e; e = e->iterate_next) {
850 r = hashmap_put(h, e->key, e->value);
851 if (r < 0 && r != -EEXIST)
858 void hashmap_move(Hashmap *h, Hashmap *other) {
859 struct hashmap_entry *e, *n;
863 /* The same as hashmap_merge(), but every new item from other
864 * is moved to h. This function is guaranteed to succeed. */
869 for (e = other->iterate_list_head; e; e = n) {
870 unsigned h_hash, other_hash;
874 h_hash = bucket_hash(h, e->key);
875 if (hash_scan(h, h_hash, e->key))
878 other_hash = bucket_hash(other, e->key);
879 unlink_entry(other, e, other_hash);
880 link_entry(h, e, h_hash);
884 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
885 unsigned h_hash, other_hash;
886 struct hashmap_entry *e;
893 h_hash = bucket_hash(h, key);
894 if (hash_scan(h, h_hash, key))
897 other_hash = bucket_hash(other, key);
898 e = hash_scan(other, other_hash, key);
902 unlink_entry(other, e, other_hash);
903 link_entry(h, e, h_hash);
908 Hashmap *hashmap_copy(Hashmap *h) {
913 copy = hashmap_new(h->hash_func, h->compare_func);
917 if (hashmap_merge(copy, h) < 0) {
925 char **hashmap_get_strv(Hashmap *h) {
931 sv = new(char*, h->n_entries+1);
936 HASHMAP_FOREACH(item, h, it)
943 void *hashmap_next(Hashmap *h, const void *key) {
945 struct hashmap_entry *e;
953 hash = bucket_hash(h, key);
954 e = hash_scan(h, hash, key);