1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
33 struct hashmap_entry {
36 struct hashmap_entry *bucket_next, *bucket_previous;
37 struct hashmap_entry *iterate_next, *iterate_previous;
41 hash_func_t hash_func;
42 compare_func_t compare_func;
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 #define BY_HASH(h) ((struct hashmap_entry**) ((uint8_t*) (h) + ALIGN(sizeof(Hashmap))))
58 static struct pool *first_hashmap_pool = NULL;
59 static void *first_hashmap_tile = NULL;
61 static struct pool *first_entry_pool = NULL;
62 static void *first_entry_tile = NULL;
64 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) {
71 *first_tile = * (void**) (*first_tile);
75 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
80 n = *first_pool ? (*first_pool)->n_tiles : 0;
82 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
83 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
89 p->next = *first_pool;
96 i = (*first_pool)->n_used++;
98 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
101 static void deallocate_tile(void **first_tile, void *p) {
102 * (void**) p = *first_tile;
108 static void drop_pool(struct pool *p) {
117 __attribute__((destructor)) static void cleanup_pool(void) {
118 /* Be nice to valgrind */
120 drop_pool(first_hashmap_pool);
121 drop_pool(first_entry_pool);
126 unsigned string_hash_func(const void *p) {
127 unsigned hash = 5381;
128 const signed char *c;
130 /* DJB's hash function */
133 hash = (hash << 5) + hash + (unsigned) *c;
138 int string_compare_func(const void *a, const void *b) {
142 unsigned trivial_hash_func(const void *p) {
143 return PTR_TO_UINT(p);
146 int trivial_compare_func(const void *a, const void *b) {
147 return a < b ? -1 : (a > b ? 1 : 0);
150 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
155 b = is_main_thread();
157 size = ALIGN(sizeof(Hashmap)) + NBUCKETS * sizeof(struct hashmap_entry*);
160 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size);
172 h->hash_func = hash_func ? hash_func : trivial_hash_func;
173 h->compare_func = compare_func ? compare_func : trivial_compare_func;
176 h->iterate_list_head = h->iterate_list_tail = NULL;
183 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
189 if (!(*h = hashmap_new(hash_func, compare_func)))
195 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
199 /* Insert into hash table */
200 e->bucket_next = BY_HASH(h)[hash];
201 e->bucket_previous = NULL;
202 if (BY_HASH(h)[hash])
203 BY_HASH(h)[hash]->bucket_previous = e;
204 BY_HASH(h)[hash] = e;
206 /* Insert into iteration list */
207 e->iterate_previous = h->iterate_list_tail;
208 e->iterate_next = NULL;
209 if (h->iterate_list_tail) {
210 assert(h->iterate_list_head);
211 h->iterate_list_tail->iterate_next = e;
213 assert(!h->iterate_list_head);
214 h->iterate_list_head = e;
216 h->iterate_list_tail = e;
219 assert(h->n_entries >= 1);
222 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
226 /* Remove from iteration list */
228 e->iterate_next->iterate_previous = e->iterate_previous;
230 h->iterate_list_tail = e->iterate_previous;
232 if (e->iterate_previous)
233 e->iterate_previous->iterate_next = e->iterate_next;
235 h->iterate_list_head = e->iterate_next;
237 /* Remove from hash table bucket list */
239 e->bucket_next->bucket_previous = e->bucket_previous;
241 if (e->bucket_previous)
242 e->bucket_previous->bucket_next = e->bucket_next;
244 BY_HASH(h)[hash] = e->bucket_next;
246 assert(h->n_entries >= 1);
250 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
256 hash = h->hash_func(e->key) % NBUCKETS;
258 unlink_entry(h, e, hash);
261 deallocate_tile(&first_entry_tile, e);
266 void hashmap_free(Hashmap*h) {
274 deallocate_tile(&first_hashmap_tile, h);
279 void hashmap_free_free(Hashmap *h) {
282 while ((p = hashmap_steal_first(h)))
288 void hashmap_clear(Hashmap *h) {
292 while (h->iterate_list_head)
293 remove_entry(h, h->iterate_list_head);
296 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
297 struct hashmap_entry *e;
299 assert(hash < NBUCKETS);
301 for (e = BY_HASH(h)[hash]; e; e = e->bucket_next)
302 if (h->compare_func(e->key, key) == 0)
308 int hashmap_put(Hashmap *h, const void *key, void *value) {
309 struct hashmap_entry *e;
314 hash = h->hash_func(key) % NBUCKETS;
316 if ((e = hash_scan(h, hash, key))) {
318 if (e->value == value)
325 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry));
327 e = new(struct hashmap_entry, 1);
335 link_entry(h, e, hash);
340 int hashmap_replace(Hashmap *h, const void *key, void *value) {
341 struct hashmap_entry *e;
346 hash = h->hash_func(key) % NBUCKETS;
348 if ((e = hash_scan(h, hash, key))) {
354 return hashmap_put(h, key, value);
357 void* hashmap_get(Hashmap *h, const void *key) {
359 struct hashmap_entry *e;
364 hash = h->hash_func(key) % NBUCKETS;
366 if (!(e = hash_scan(h, hash, key)))
372 void* hashmap_remove(Hashmap *h, const void *key) {
373 struct hashmap_entry *e;
380 hash = h->hash_func(key) % NBUCKETS;
382 if (!(e = hash_scan(h, hash, key)))
391 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
392 struct hashmap_entry *e;
393 unsigned old_hash, new_hash;
398 old_hash = h->hash_func(old_key) % NBUCKETS;
399 if (!(e = hash_scan(h, old_hash, old_key)))
402 new_hash = h->hash_func(new_key) % NBUCKETS;
403 if (hash_scan(h, new_hash, new_key))
406 unlink_entry(h, e, old_hash);
411 link_entry(h, e, new_hash);
416 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
417 struct hashmap_entry *e, *k;
418 unsigned old_hash, new_hash;
423 old_hash = h->hash_func(old_key) % NBUCKETS;
424 if (!(e = hash_scan(h, old_hash, old_key)))
427 new_hash = h->hash_func(new_key) % NBUCKETS;
429 if ((k = hash_scan(h, new_hash, new_key)))
433 unlink_entry(h, e, old_hash);
438 link_entry(h, e, new_hash);
443 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
444 struct hashmap_entry *e;
450 hash = h->hash_func(key) % NBUCKETS;
452 if (!(e = hash_scan(h, hash, key)))
455 if (e->value != value)
463 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
464 struct hashmap_entry *e;
471 if (*i == ITERATOR_LAST)
474 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
477 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
480 *i = (Iterator) e->iterate_next;
498 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
499 struct hashmap_entry *e;
506 if (*i == ITERATOR_FIRST)
509 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
512 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
514 if (e->iterate_previous)
515 *i = (Iterator) e->iterate_previous;
533 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
535 struct hashmap_entry *e;
540 hash = h->hash_func(key) % NBUCKETS;
542 if (!(e = hash_scan(h, hash, key)))
550 void* hashmap_first(Hashmap *h) {
555 if (!h->iterate_list_head)
558 return h->iterate_list_head->value;
561 void* hashmap_first_key(Hashmap *h) {
566 if (!h->iterate_list_head)
569 return (void*) h->iterate_list_head->key;
572 void* hashmap_last(Hashmap *h) {
577 if (!h->iterate_list_tail)
580 return h->iterate_list_tail->value;
583 void* hashmap_steal_first(Hashmap *h) {
589 if (!h->iterate_list_head)
592 data = h->iterate_list_head->value;
593 remove_entry(h, h->iterate_list_head);
598 void* hashmap_steal_first_key(Hashmap *h) {
604 if (!h->iterate_list_head)
607 key = (void*) h->iterate_list_head->key;
608 remove_entry(h, h->iterate_list_head);
613 unsigned hashmap_size(Hashmap *h) {
621 bool hashmap_isempty(Hashmap *h) {
626 return h->n_entries == 0;
629 int hashmap_merge(Hashmap *h, Hashmap *other) {
630 struct hashmap_entry *e;
637 for (e = other->iterate_list_head; e; e = e->iterate_next) {
640 if ((r = hashmap_put(h, e->key, e->value)) < 0)
648 void hashmap_move(Hashmap *h, Hashmap *other) {
649 struct hashmap_entry *e, *n;
653 /* The same as hashmap_merge(), but every new item from other
654 * is moved to h. This function is guaranteed to succeed. */
659 for (e = other->iterate_list_head; e; e = n) {
660 unsigned h_hash, other_hash;
664 h_hash = h->hash_func(e->key) % NBUCKETS;
666 if (hash_scan(h, h_hash, e->key))
669 other_hash = other->hash_func(e->key) % NBUCKETS;
671 unlink_entry(other, e, other_hash);
672 link_entry(h, e, h_hash);
676 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
677 unsigned h_hash, other_hash;
678 struct hashmap_entry *e;
685 h_hash = h->hash_func(key) % NBUCKETS;
686 if (hash_scan(h, h_hash, key))
689 other_hash = other->hash_func(key) % NBUCKETS;
690 if (!(e = hash_scan(other, other_hash, key)))
693 unlink_entry(other, e, other_hash);
694 link_entry(h, e, h_hash);
699 Hashmap *hashmap_copy(Hashmap *h) {
704 if (!(copy = hashmap_new(h->hash_func, h->compare_func)))
707 if (hashmap_merge(copy, h) < 0) {
715 char **hashmap_get_strv(Hashmap *h) {
721 sv = new(char*, h->n_entries+1);
726 HASHMAP_FOREACH(item, h, it)