1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #ifdef HAVE_SYS_AUXV_H
35 #define INITIAL_N_BUCKETS 31
37 struct hashmap_entry {
40 struct hashmap_entry *bucket_next, *bucket_previous;
41 struct hashmap_entry *iterate_next, *iterate_previous;
45 hash_func_t hash_func;
46 compare_func_t compare_func;
48 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 struct hashmap_entry ** buckets;
51 unsigned n_buckets, n_entries;
63 static struct pool *first_hashmap_pool = NULL;
64 static void *first_hashmap_tile = NULL;
66 static struct pool *first_entry_pool = NULL;
67 static void *first_entry_tile = NULL;
69 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
72 /* When a tile is released we add it to the list and simply
73 * place the next pointer at its offset 0. */
75 assert(tile_size >= sizeof(void*));
82 *first_tile = * (void**) (*first_tile);
86 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
91 n = *first_pool ? (*first_pool)->n_tiles : 0;
92 n = MAX(at_least, n * 2);
93 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
94 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
100 p->next = *first_pool;
107 i = (*first_pool)->n_used++;
109 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
112 static void deallocate_tile(void **first_tile, void *p) {
113 * (void**) p = *first_tile;
119 static void drop_pool(struct pool *p) {
128 __attribute__((destructor)) static void cleanup_pool(void) {
129 /* Be nice to valgrind */
131 drop_pool(first_hashmap_pool);
132 drop_pool(first_entry_pool);
137 unsigned string_hash_func(const void *p) {
138 unsigned hash = 5381;
139 const signed char *c;
141 /* DJB's hash function */
144 hash = (hash << 5) + hash + (unsigned) *c;
149 int string_compare_func(const void *a, const void *b) {
153 unsigned trivial_hash_func(const void *p) {
154 return PTR_TO_UINT(p);
157 int trivial_compare_func(const void *a, const void *b) {
158 return a < b ? -1 : (a > b ? 1 : 0);
161 unsigned uint64_hash_func(const void *p) {
164 assert_cc(sizeof(uint64_t) == 2*sizeof(unsigned));
166 u = *(const uint64_t*) p;
168 return (unsigned) ((u >> 32) ^ u);
171 int uint64_compare_func(const void *_a, const void *_b) {
174 a = *(const uint64_t*) _a;
175 b = *(const uint64_t*) _b;
177 return a < b ? -1 : (a > b ? 1 : 0);
180 static unsigned bucket_hash(Hashmap *h, const void *p) {
181 return (h->hash_func(p) ^ h->random_xor) % h->n_buckets;
184 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
189 b = is_main_thread();
191 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
194 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
206 h->hash_func = hash_func ? hash_func : trivial_hash_func;
207 h->compare_func = compare_func ? compare_func : trivial_compare_func;
209 h->n_buckets = INITIAL_N_BUCKETS;
211 h->iterate_list_head = h->iterate_list_tail = NULL;
213 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
217 /* Let's randomize our hash functions a bit so that they are
218 * harder to guess for clients. For this, start out by cheaply
219 * using some bits the kernel passed into the process using
220 * the auxiliary vector. If the hashmap grows later on we will
221 * rehash everything using a new random XOR mask from
223 #ifdef HAVE_SYS_AUXV_H
226 auxv = (void*) getauxval(AT_RANDOM);
227 h->random_xor = auxv ? *(unsigned*) auxv : random_u();
230 h->random_xor = random_u();
236 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
244 q = hashmap_new(hash_func, compare_func);
252 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
256 /* Insert into hash table */
257 e->bucket_next = h->buckets[hash];
258 e->bucket_previous = NULL;
259 if (h->buckets[hash])
260 h->buckets[hash]->bucket_previous = e;
261 h->buckets[hash] = e;
263 /* Insert into iteration list */
264 e->iterate_previous = h->iterate_list_tail;
265 e->iterate_next = NULL;
266 if (h->iterate_list_tail) {
267 assert(h->iterate_list_head);
268 h->iterate_list_tail->iterate_next = e;
270 assert(!h->iterate_list_head);
271 h->iterate_list_head = e;
273 h->iterate_list_tail = e;
276 assert(h->n_entries >= 1);
279 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
283 /* Remove from iteration list */
285 e->iterate_next->iterate_previous = e->iterate_previous;
287 h->iterate_list_tail = e->iterate_previous;
289 if (e->iterate_previous)
290 e->iterate_previous->iterate_next = e->iterate_next;
292 h->iterate_list_head = e->iterate_next;
294 /* Remove from hash table bucket list */
296 e->bucket_next->bucket_previous = e->bucket_previous;
298 if (e->bucket_previous)
299 e->bucket_previous->bucket_next = e->bucket_next;
301 h->buckets[hash] = e->bucket_next;
303 assert(h->n_entries >= 1);
307 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
313 hash = bucket_hash(h, e->key);
314 unlink_entry(h, e, hash);
317 deallocate_tile(&first_entry_tile, e);
322 void hashmap_free(Hashmap*h) {
324 /* Free the hashmap, but nothing in it */
331 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
335 deallocate_tile(&first_hashmap_tile, h);
340 void hashmap_free_free(Hashmap *h) {
342 /* Free the hashmap and all data objects in it, but not the
348 hashmap_clear_free(h);
352 void hashmap_free_free_free(Hashmap *h) {
354 /* Free the hashmap and all data and key objects in it */
359 hashmap_clear_free_free(h);
363 void hashmap_clear(Hashmap *h) {
367 while (h->iterate_list_head)
368 remove_entry(h, h->iterate_list_head);
371 void hashmap_clear_free(Hashmap *h) {
377 while ((p = hashmap_steal_first(h)))
381 void hashmap_clear_free_free(Hashmap *h) {
385 while (h->iterate_list_head) {
388 a = h->iterate_list_head->value;
389 b = (void*) h->iterate_list_head->key;
390 remove_entry(h, h->iterate_list_head);
396 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
397 struct hashmap_entry *e;
399 assert(hash < h->n_buckets);
401 for (e = h->buckets[hash]; e; e = e->bucket_next)
402 if (h->compare_func(e->key, key) == 0)
408 static bool resize_buckets(Hashmap *h) {
409 struct hashmap_entry **n, *i;
414 if (_likely_(h->n_entries*4 < h->n_buckets*3))
417 /* Increase by four */
418 m = (h->n_entries+1)*4-1;
420 /* If we hit OOM we simply risk packed hashmaps... */
421 n = new0(struct hashmap_entry*, m);
425 /* Let's use a different randomized xor value for the
426 * extension, so that people cannot guess what we are using
430 for (i = h->iterate_list_head; i; i = i->iterate_next) {
433 hash = h->hash_func(i->key);
435 /* First, drop from old bucket table */
437 i->bucket_next->bucket_previous = i->bucket_previous;
439 if (i->bucket_previous)
440 i->bucket_previous->bucket_next = i->bucket_next;
442 h->buckets[(hash ^ h->random_xor) % h->n_buckets] = i->bucket_next;
444 /* Then, add to new backet table */
445 x = (hash ^ nxor) % m;
447 i->bucket_next = n[x];
448 i->bucket_previous = NULL;
450 n[x]->bucket_previous = i;
454 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
459 h->random_xor = nxor;
464 int hashmap_put(Hashmap *h, const void *key, void *value) {
465 struct hashmap_entry *e;
470 hash = bucket_hash(h, key);
471 e = hash_scan(h, hash, key);
473 if (e->value == value)
478 if (resize_buckets(h))
479 hash = bucket_hash(h, key);
482 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
484 e = new(struct hashmap_entry, 1);
492 link_entry(h, e, hash);
497 int hashmap_replace(Hashmap *h, const void *key, void *value) {
498 struct hashmap_entry *e;
503 hash = bucket_hash(h, key);
504 e = hash_scan(h, hash, key);
511 return hashmap_put(h, key, value);
514 int hashmap_update(Hashmap *h, const void *key, void *value) {
515 struct hashmap_entry *e;
520 hash = bucket_hash(h, key);
521 e = hash_scan(h, hash, key);
529 void* hashmap_get(Hashmap *h, const void *key) {
531 struct hashmap_entry *e;
536 hash = bucket_hash(h, key);
537 e = hash_scan(h, hash, key);
544 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
546 struct hashmap_entry *e;
551 hash = bucket_hash(h, key);
552 e = hash_scan(h, hash, key);
557 *key2 = (void*) e->key;
562 bool hashmap_contains(Hashmap *h, const void *key) {
568 hash = bucket_hash(h, key);
569 return !!hash_scan(h, hash, key);
572 void* hashmap_remove(Hashmap *h, const void *key) {
573 struct hashmap_entry *e;
580 hash = bucket_hash(h, key);
581 e = hash_scan(h, hash, key);
591 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
592 struct hashmap_entry *e;
593 unsigned old_hash, new_hash;
598 old_hash = bucket_hash(h, old_key);
599 e = hash_scan(h, old_hash, old_key);
603 new_hash = bucket_hash(h, new_key);
604 if (hash_scan(h, new_hash, new_key))
607 unlink_entry(h, e, old_hash);
612 link_entry(h, e, new_hash);
617 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
618 struct hashmap_entry *e, *k;
619 unsigned old_hash, new_hash;
624 old_hash = bucket_hash(h, old_key);
625 e = hash_scan(h, old_hash, old_key);
629 new_hash = bucket_hash(h, new_key);
630 k = hash_scan(h, new_hash, new_key);
635 unlink_entry(h, e, old_hash);
640 link_entry(h, e, new_hash);
645 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
646 struct hashmap_entry *e;
652 hash = bucket_hash(h, key);
654 e = hash_scan(h, hash, key);
658 if (e->value != value)
666 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
667 struct hashmap_entry *e;
674 if (*i == ITERATOR_LAST)
677 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
680 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
683 *i = (Iterator) e->iterate_next;
701 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
702 struct hashmap_entry *e;
709 if (*i == ITERATOR_FIRST)
712 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
715 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
717 if (e->iterate_previous)
718 *i = (Iterator) e->iterate_previous;
736 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
738 struct hashmap_entry *e;
743 hash = bucket_hash(h, key);
745 e = hash_scan(h, hash, key);
754 void* hashmap_first(Hashmap *h) {
759 if (!h->iterate_list_head)
762 return h->iterate_list_head->value;
765 void* hashmap_first_key(Hashmap *h) {
770 if (!h->iterate_list_head)
773 return (void*) h->iterate_list_head->key;
776 void* hashmap_last(Hashmap *h) {
781 if (!h->iterate_list_tail)
784 return h->iterate_list_tail->value;
787 void* hashmap_steal_first(Hashmap *h) {
793 if (!h->iterate_list_head)
796 data = h->iterate_list_head->value;
797 remove_entry(h, h->iterate_list_head);
802 void* hashmap_steal_first_key(Hashmap *h) {
808 if (!h->iterate_list_head)
811 key = (void*) h->iterate_list_head->key;
812 remove_entry(h, h->iterate_list_head);
817 unsigned hashmap_size(Hashmap *h) {
825 unsigned hashmap_buckets(Hashmap *h) {
833 bool hashmap_isempty(Hashmap *h) {
838 return h->n_entries == 0;
841 int hashmap_merge(Hashmap *h, Hashmap *other) {
842 struct hashmap_entry *e;
849 for (e = other->iterate_list_head; e; e = e->iterate_next) {
852 r = hashmap_put(h, e->key, e->value);
853 if (r < 0 && r != -EEXIST)
860 void hashmap_move(Hashmap *h, Hashmap *other) {
861 struct hashmap_entry *e, *n;
865 /* The same as hashmap_merge(), but every new item from other
866 * is moved to h. This function is guaranteed to succeed. */
871 for (e = other->iterate_list_head; e; e = n) {
872 unsigned h_hash, other_hash;
876 h_hash = bucket_hash(h, e->key);
877 if (hash_scan(h, h_hash, e->key))
880 other_hash = bucket_hash(other, e->key);
881 unlink_entry(other, e, other_hash);
882 link_entry(h, e, h_hash);
886 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
887 unsigned h_hash, other_hash;
888 struct hashmap_entry *e;
895 h_hash = bucket_hash(h, key);
896 if (hash_scan(h, h_hash, key))
899 other_hash = bucket_hash(other, key);
900 e = hash_scan(other, other_hash, key);
904 unlink_entry(other, e, other_hash);
905 link_entry(h, e, h_hash);
910 Hashmap *hashmap_copy(Hashmap *h) {
915 copy = hashmap_new(h->hash_func, h->compare_func);
919 if (hashmap_merge(copy, h) < 0) {
927 char **hashmap_get_strv(Hashmap *h) {
933 sv = new(char*, h->n_entries+1);
938 HASHMAP_FOREACH(item, h, it)
945 void *hashmap_next(Hashmap *h, const void *key) {
947 struct hashmap_entry *e;
955 hash = bucket_hash(h, key);
956 e = hash_scan(h, hash, key);