1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
33 struct hashmap_entry {
36 struct hashmap_entry *bucket_next, *bucket_previous;
37 struct hashmap_entry *iterate_next, *iterate_previous;
41 hash_func_t hash_func;
42 compare_func_t compare_func;
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 #define BY_HASH(h) ((struct hashmap_entry**) ((uint8_t*) (h) + ALIGN(sizeof(Hashmap))))
58 static struct pool *first_hashmap_pool = NULL;
59 static void *first_hashmap_tile = NULL;
61 static struct pool *first_entry_pool = NULL;
62 static void *first_entry_tile = NULL;
64 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) {
71 *first_tile = * (void**) (*first_tile);
75 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
80 n = *first_pool ? (*first_pool)->n_tiles : 0;
82 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
83 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
89 p->next = *first_pool;
96 i = (*first_pool)->n_used++;
98 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
101 static void deallocate_tile(void **first_tile, void *p) {
102 * (void**) p = *first_tile;
108 static void drop_pool(struct pool *p) {
117 __attribute__((destructor)) static void cleanup_pool(void) {
118 /* Be nice to valgrind */
120 drop_pool(first_hashmap_pool);
121 drop_pool(first_entry_pool);
126 unsigned string_hash_func(const void *p) {
127 unsigned hash = 5381;
128 const signed char *c;
130 /* DJB's hash function */
133 hash = (hash << 5) + hash + (unsigned) *c;
138 int string_compare_func(const void *a, const void *b) {
142 unsigned trivial_hash_func(const void *p) {
143 return PTR_TO_UINT(p);
146 int trivial_compare_func(const void *a, const void *b) {
147 return a < b ? -1 : (a > b ? 1 : 0);
150 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
155 b = is_main_thread();
157 size = ALIGN(sizeof(Hashmap)) + NBUCKETS * sizeof(struct hashmap_entry*);
160 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size);
172 h->hash_func = hash_func ? hash_func : trivial_hash_func;
173 h->compare_func = compare_func ? compare_func : trivial_compare_func;
176 h->iterate_list_head = h->iterate_list_tail = NULL;
183 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
189 if (!(*h = hashmap_new(hash_func, compare_func)))
195 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
199 /* Insert into hash table */
200 e->bucket_next = BY_HASH(h)[hash];
201 e->bucket_previous = NULL;
202 if (BY_HASH(h)[hash])
203 BY_HASH(h)[hash]->bucket_previous = e;
204 BY_HASH(h)[hash] = e;
206 /* Insert into iteration list */
207 e->iterate_previous = h->iterate_list_tail;
208 e->iterate_next = NULL;
209 if (h->iterate_list_tail) {
210 assert(h->iterate_list_head);
211 h->iterate_list_tail->iterate_next = e;
213 assert(!h->iterate_list_head);
214 h->iterate_list_head = e;
216 h->iterate_list_tail = e;
219 assert(h->n_entries >= 1);
222 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
226 /* Remove from iteration list */
228 e->iterate_next->iterate_previous = e->iterate_previous;
230 h->iterate_list_tail = e->iterate_previous;
232 if (e->iterate_previous)
233 e->iterate_previous->iterate_next = e->iterate_next;
235 h->iterate_list_head = e->iterate_next;
237 /* Remove from hash table bucket list */
239 e->bucket_next->bucket_previous = e->bucket_previous;
241 if (e->bucket_previous)
242 e->bucket_previous->bucket_next = e->bucket_next;
244 BY_HASH(h)[hash] = e->bucket_next;
246 assert(h->n_entries >= 1);
250 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
256 hash = h->hash_func(e->key) % NBUCKETS;
258 unlink_entry(h, e, hash);
261 deallocate_tile(&first_entry_tile, e);
266 void hashmap_free(Hashmap*h) {
274 deallocate_tile(&first_hashmap_tile, h);
279 void hashmap_free_free(Hashmap *h) {
280 hashmap_clear_free(h);
284 void hashmap_clear(Hashmap *h) {
288 while (h->iterate_list_head)
289 remove_entry(h, h->iterate_list_head);
292 void hashmap_clear_free(Hashmap *h) {
297 while ((p = hashmap_steal_first(h)))
301 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
302 struct hashmap_entry *e;
304 assert(hash < NBUCKETS);
306 for (e = BY_HASH(h)[hash]; e; e = e->bucket_next)
307 if (h->compare_func(e->key, key) == 0)
313 int hashmap_put(Hashmap *h, const void *key, void *value) {
314 struct hashmap_entry *e;
319 hash = h->hash_func(key) % NBUCKETS;
321 if ((e = hash_scan(h, hash, key))) {
323 if (e->value == value)
330 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry));
332 e = new(struct hashmap_entry, 1);
340 link_entry(h, e, hash);
345 int hashmap_replace(Hashmap *h, const void *key, void *value) {
346 struct hashmap_entry *e;
351 hash = h->hash_func(key) % NBUCKETS;
353 if ((e = hash_scan(h, hash, key))) {
359 return hashmap_put(h, key, value);
362 void* hashmap_get(Hashmap *h, const void *key) {
364 struct hashmap_entry *e;
369 hash = h->hash_func(key) % NBUCKETS;
371 if (!(e = hash_scan(h, hash, key)))
377 void* hashmap_remove(Hashmap *h, const void *key) {
378 struct hashmap_entry *e;
385 hash = h->hash_func(key) % NBUCKETS;
387 if (!(e = hash_scan(h, hash, key)))
396 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
397 struct hashmap_entry *e;
398 unsigned old_hash, new_hash;
403 old_hash = h->hash_func(old_key) % NBUCKETS;
404 if (!(e = hash_scan(h, old_hash, old_key)))
407 new_hash = h->hash_func(new_key) % NBUCKETS;
408 if (hash_scan(h, new_hash, new_key))
411 unlink_entry(h, e, old_hash);
416 link_entry(h, e, new_hash);
421 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
422 struct hashmap_entry *e, *k;
423 unsigned old_hash, new_hash;
428 old_hash = h->hash_func(old_key) % NBUCKETS;
429 if (!(e = hash_scan(h, old_hash, old_key)))
432 new_hash = h->hash_func(new_key) % NBUCKETS;
434 if ((k = hash_scan(h, new_hash, new_key)))
438 unlink_entry(h, e, old_hash);
443 link_entry(h, e, new_hash);
448 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
449 struct hashmap_entry *e;
455 hash = h->hash_func(key) % NBUCKETS;
457 if (!(e = hash_scan(h, hash, key)))
460 if (e->value != value)
468 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
469 struct hashmap_entry *e;
476 if (*i == ITERATOR_LAST)
479 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
482 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
485 *i = (Iterator) e->iterate_next;
503 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
504 struct hashmap_entry *e;
511 if (*i == ITERATOR_FIRST)
514 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
517 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
519 if (e->iterate_previous)
520 *i = (Iterator) e->iterate_previous;
538 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
540 struct hashmap_entry *e;
545 hash = h->hash_func(key) % NBUCKETS;
547 if (!(e = hash_scan(h, hash, key)))
555 void* hashmap_first(Hashmap *h) {
560 if (!h->iterate_list_head)
563 return h->iterate_list_head->value;
566 void* hashmap_first_key(Hashmap *h) {
571 if (!h->iterate_list_head)
574 return (void*) h->iterate_list_head->key;
577 void* hashmap_last(Hashmap *h) {
582 if (!h->iterate_list_tail)
585 return h->iterate_list_tail->value;
588 void* hashmap_steal_first(Hashmap *h) {
594 if (!h->iterate_list_head)
597 data = h->iterate_list_head->value;
598 remove_entry(h, h->iterate_list_head);
603 void* hashmap_steal_first_key(Hashmap *h) {
609 if (!h->iterate_list_head)
612 key = (void*) h->iterate_list_head->key;
613 remove_entry(h, h->iterate_list_head);
618 unsigned hashmap_size(Hashmap *h) {
626 bool hashmap_isempty(Hashmap *h) {
631 return h->n_entries == 0;
634 int hashmap_merge(Hashmap *h, Hashmap *other) {
635 struct hashmap_entry *e;
642 for (e = other->iterate_list_head; e; e = e->iterate_next) {
645 if ((r = hashmap_put(h, e->key, e->value)) < 0)
653 void hashmap_move(Hashmap *h, Hashmap *other) {
654 struct hashmap_entry *e, *n;
658 /* The same as hashmap_merge(), but every new item from other
659 * is moved to h. This function is guaranteed to succeed. */
664 for (e = other->iterate_list_head; e; e = n) {
665 unsigned h_hash, other_hash;
669 h_hash = h->hash_func(e->key) % NBUCKETS;
671 if (hash_scan(h, h_hash, e->key))
674 other_hash = other->hash_func(e->key) % NBUCKETS;
676 unlink_entry(other, e, other_hash);
677 link_entry(h, e, h_hash);
681 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
682 unsigned h_hash, other_hash;
683 struct hashmap_entry *e;
690 h_hash = h->hash_func(key) % NBUCKETS;
691 if (hash_scan(h, h_hash, key))
694 other_hash = other->hash_func(key) % NBUCKETS;
695 if (!(e = hash_scan(other, other_hash, key)))
698 unlink_entry(other, e, other_hash);
699 link_entry(h, e, h_hash);
704 Hashmap *hashmap_copy(Hashmap *h) {
709 if (!(copy = hashmap_new(h->hash_func, h->compare_func)))
712 if (hashmap_merge(copy, h) < 0) {
720 char **hashmap_get_strv(Hashmap *h) {
726 sv = new(char*, h->n_entries+1);
731 HASHMAP_FOREACH(item, h, it)