1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
33 struct hashmap_entry {
36 struct hashmap_entry *bucket_next, *bucket_previous;
37 struct hashmap_entry *iterate_next, *iterate_previous;
41 hash_func_t hash_func;
42 compare_func_t compare_func;
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
50 #define BY_HASH(h) ((struct hashmap_entry**) ((uint8_t*) (h) + ALIGN(sizeof(Hashmap))))
58 struct pool *first_hashmap_pool = NULL;
59 static void *first_hashmap_tile = NULL;
61 struct pool *first_entry_pool = NULL;
62 static void *first_entry_tile = NULL;
64 static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) {
71 *first_tile = * (void**) (*first_tile);
75 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
80 n = *first_pool ? (*first_pool)->n_tiles : 0;
82 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
83 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
89 p->next = *first_pool;
96 i = (*first_pool)->n_used++;
98 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
101 static void deallocate_tile(void **first_tile, void *p) {
102 * (void**) p = *first_tile;
108 static void drop_pool(struct pool *p) {
117 __attribute__((destructor)) static void cleanup_pool(void) {
118 /* Be nice to valgrind */
120 drop_pool(first_hashmap_pool);
121 drop_pool(first_entry_pool);
126 unsigned string_hash_func(const void *p) {
131 hash = 31 * hash + (unsigned) *c;
136 int string_compare_func(const void *a, const void *b) {
140 unsigned trivial_hash_func(const void *p) {
141 return PTR_TO_UINT(p);
144 int trivial_compare_func(const void *a, const void *b) {
145 return a < b ? -1 : (a > b ? 1 : 0);
148 Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
153 b = is_main_thread();
155 size = ALIGN(sizeof(Hashmap)) + NBUCKETS * sizeof(struct hashmap_entry*);
158 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size);
170 h->hash_func = hash_func ? hash_func : trivial_hash_func;
171 h->compare_func = compare_func ? compare_func : trivial_compare_func;
174 h->iterate_list_head = h->iterate_list_tail = NULL;
181 int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) {
187 if (!(*h = hashmap_new(hash_func, compare_func)))
193 static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
197 /* Insert into hash table */
198 e->bucket_next = BY_HASH(h)[hash];
199 e->bucket_previous = NULL;
200 if (BY_HASH(h)[hash])
201 BY_HASH(h)[hash]->bucket_previous = e;
202 BY_HASH(h)[hash] = e;
204 /* Insert into iteration list */
205 e->iterate_previous = h->iterate_list_tail;
206 e->iterate_next = NULL;
207 if (h->iterate_list_tail) {
208 assert(h->iterate_list_head);
209 h->iterate_list_tail->iterate_next = e;
211 assert(!h->iterate_list_head);
212 h->iterate_list_head = e;
214 h->iterate_list_tail = e;
217 assert(h->n_entries >= 1);
220 static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
224 /* Remove from iteration list */
226 e->iterate_next->iterate_previous = e->iterate_previous;
228 h->iterate_list_tail = e->iterate_previous;
230 if (e->iterate_previous)
231 e->iterate_previous->iterate_next = e->iterate_next;
233 h->iterate_list_head = e->iterate_next;
235 /* Remove from hash table bucket list */
237 e->bucket_next->bucket_previous = e->bucket_previous;
239 if (e->bucket_previous)
240 e->bucket_previous->bucket_next = e->bucket_next;
242 BY_HASH(h)[hash] = e->bucket_next;
244 assert(h->n_entries >= 1);
248 static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
254 hash = h->hash_func(e->key) % NBUCKETS;
256 unlink_entry(h, e, hash);
259 deallocate_tile(&first_entry_tile, e);
264 void hashmap_free(Hashmap*h) {
272 deallocate_tile(&first_hashmap_tile, h);
277 void hashmap_free_free(Hashmap *h) {
280 while ((p = hashmap_steal_first(h)))
286 void hashmap_clear(Hashmap *h) {
290 while (h->iterate_list_head)
291 remove_entry(h, h->iterate_list_head);
294 static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
295 struct hashmap_entry *e;
297 assert(hash < NBUCKETS);
299 for (e = BY_HASH(h)[hash]; e; e = e->bucket_next)
300 if (h->compare_func(e->key, key) == 0)
306 int hashmap_put(Hashmap *h, const void *key, void *value) {
307 struct hashmap_entry *e;
312 hash = h->hash_func(key) % NBUCKETS;
314 if ((e = hash_scan(h, hash, key))) {
316 if (e->value == value)
323 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry));
325 e = new(struct hashmap_entry, 1);
333 link_entry(h, e, hash);
338 int hashmap_replace(Hashmap *h, const void *key, void *value) {
339 struct hashmap_entry *e;
344 hash = h->hash_func(key) % NBUCKETS;
346 if ((e = hash_scan(h, hash, key))) {
352 return hashmap_put(h, key, value);
355 void* hashmap_get(Hashmap *h, const void *key) {
357 struct hashmap_entry *e;
362 hash = h->hash_func(key) % NBUCKETS;
364 if (!(e = hash_scan(h, hash, key)))
370 void* hashmap_remove(Hashmap *h, const void *key) {
371 struct hashmap_entry *e;
378 hash = h->hash_func(key) % NBUCKETS;
380 if (!(e = hash_scan(h, hash, key)))
389 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
390 struct hashmap_entry *e;
391 unsigned old_hash, new_hash;
396 old_hash = h->hash_func(old_key) % NBUCKETS;
397 if (!(e = hash_scan(h, old_hash, old_key)))
400 new_hash = h->hash_func(new_key) % NBUCKETS;
401 if (hash_scan(h, new_hash, new_key))
404 unlink_entry(h, e, old_hash);
409 link_entry(h, e, new_hash);
414 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
415 struct hashmap_entry *e, *k;
416 unsigned old_hash, new_hash;
421 old_hash = h->hash_func(old_key) % NBUCKETS;
422 if (!(e = hash_scan(h, old_hash, old_key)))
425 new_hash = h->hash_func(new_key) % NBUCKETS;
427 if ((k = hash_scan(h, new_hash, new_key)))
431 unlink_entry(h, e, old_hash);
436 link_entry(h, e, new_hash);
441 void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
442 struct hashmap_entry *e;
448 hash = h->hash_func(key) % NBUCKETS;
450 if (!(e = hash_scan(h, hash, key)))
453 if (e->value != value)
461 void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
462 struct hashmap_entry *e;
469 if (*i == ITERATOR_LAST)
472 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
475 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
478 *i = (Iterator) e->iterate_next;
496 void *hashmap_iterate_backwards(Hashmap *h, Iterator *i, const void **key) {
497 struct hashmap_entry *e;
504 if (*i == ITERATOR_FIRST)
507 if (*i == ITERATOR_LAST && !h->iterate_list_tail)
510 e = *i == ITERATOR_LAST ? h->iterate_list_tail : (struct hashmap_entry*) *i;
512 if (e->iterate_previous)
513 *i = (Iterator) e->iterate_previous;
531 void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) {
533 struct hashmap_entry *e;
538 hash = h->hash_func(key) % NBUCKETS;
540 if (!(e = hash_scan(h, hash, key)))
548 void* hashmap_first(Hashmap *h) {
553 if (!h->iterate_list_head)
556 return h->iterate_list_head->value;
559 void* hashmap_last(Hashmap *h) {
564 if (!h->iterate_list_tail)
567 return h->iterate_list_tail->value;
570 void* hashmap_steal_first(Hashmap *h) {
576 if (!h->iterate_list_head)
579 data = h->iterate_list_head->value;
580 remove_entry(h, h->iterate_list_head);
585 void* hashmap_steal_first_key(Hashmap *h) {
591 if (!h->iterate_list_head)
594 key = (void*) h->iterate_list_head->key;
595 remove_entry(h, h->iterate_list_head);
600 unsigned hashmap_size(Hashmap *h) {
608 bool hashmap_isempty(Hashmap *h) {
613 return h->n_entries == 0;
616 int hashmap_merge(Hashmap *h, Hashmap *other) {
617 struct hashmap_entry *e;
624 for (e = other->iterate_list_head; e; e = e->iterate_next) {
627 if ((r = hashmap_put(h, e->key, e->value)) < 0)
635 void hashmap_move(Hashmap *h, Hashmap *other) {
636 struct hashmap_entry *e, *n;
640 /* The same as hashmap_merge(), but every new item from other
641 * is moved to h. This function is guaranteed to succeed. */
646 for (e = other->iterate_list_head; e; e = n) {
647 unsigned h_hash, other_hash;
651 h_hash = h->hash_func(e->key) % NBUCKETS;
653 if (hash_scan(h, h_hash, e->key))
656 other_hash = other->hash_func(e->key) % NBUCKETS;
658 unlink_entry(other, e, other_hash);
659 link_entry(h, e, h_hash);
663 int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
664 unsigned h_hash, other_hash;
665 struct hashmap_entry *e;
672 h_hash = h->hash_func(key) % NBUCKETS;
673 if (hash_scan(h, h_hash, key))
676 other_hash = other->hash_func(key) % NBUCKETS;
677 if (!(e = hash_scan(other, other_hash, key)))
680 unlink_entry(other, e, other_hash);
681 link_entry(h, e, h_hash);
686 Hashmap *hashmap_copy(Hashmap *h) {
691 if (!(copy = hashmap_new(h->hash_func, h->compare_func)))
694 if (hashmap_merge(copy, h) < 0) {
702 char **hashmap_get_strv(Hashmap *h) {
708 sv = new(char*, h->n_entries+1);
713 HASHMAP_FOREACH(item, h, it)