1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #include "alloc-util.h"
31 #include "process-util.h"
32 #include "random-util.h"
34 #include "siphash24.h"
38 #ifdef ENABLE_DEBUG_HASHMAP
43 * Implementation of hashmaps.
45 * - uses less RAM compared to closed addressing (chaining), because
46 * our entries are small (especially in Sets, which tend to contain
47 * the majority of entries in systemd).
48 * Collision resolution: Robin Hood
49 * - tends to equalize displacement of entries from their optimal buckets.
50 * Probe sequence: linear
51 * - though theoretically worse than random probing/uniform hashing/double
52 * hashing, it is good for cache locality.
55 * Celis, P. 1986. Robin Hood Hashing.
56 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
57 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
58 * - The results are derived for random probing. Suggests deletion with
59 * tombstones and two mean-centered search methods. None of that works
60 * well for linear probing.
62 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
63 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
64 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
65 * http://www.math.uu.se/~svante/papers/sj157.pdf
66 * - Applies to Robin Hood with linear probing. Contains remarks on
67 * the unsuitability of mean-centered search with linear probing.
69 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
70 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
71 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
72 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
73 * in a successful search), and Janson writes about displacement. C = d + 1.
75 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
76 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
77 * - Explanation of backward shift deletion with pictures.
79 * Khuong, P. 2013. The Other Robin Hood Hashing.
80 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
81 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
85 * XXX Ideas for improvement:
86 * For unordered hashmaps, randomize iteration order, similarly to Perl:
87 * http://blog.booking.com/hardening-perls-hash-function.html
90 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
91 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
92 #define INV_KEEP_FREE 5U
94 /* Fields common to entries of all hashmap/set types */
95 struct hashmap_base_entry {
99 /* Entry types for specific hashmap/set types
100 * hashmap_base_entry must be at the beginning of each entry struct. */
102 struct plain_hashmap_entry {
103 struct hashmap_base_entry b;
107 struct ordered_hashmap_entry {
108 struct plain_hashmap_entry p;
109 unsigned iterate_next, iterate_previous;
113 struct hashmap_base_entry b;
116 /* In several functions it is advantageous to have the hash table extended
117 * virtually by a couple of additional buckets. We reserve special index values
118 * for these "swap" buckets. */
119 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
120 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
121 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
122 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
124 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
125 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
127 assert_cc(IDX_FIRST == _IDX_SWAP_END);
128 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
130 /* Storage space for the "swap" buckets.
131 * All entry types can fit into a ordered_hashmap_entry. */
132 struct swap_entries {
133 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
136 /* Distance from Initial Bucket */
137 typedef uint8_t dib_raw_t;
138 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
139 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
140 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
141 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
143 #define DIB_FREE UINT_MAX
145 #ifdef ENABLE_DEBUG_HASHMAP
146 struct hashmap_debug_info {
147 LIST_FIELDS(struct hashmap_debug_info, debug_list);
148 unsigned max_entries; /* high watermark of n_entries */
150 /* who allocated this hashmap */
155 /* fields to detect modification while iterating */
156 unsigned put_count; /* counts puts into the hashmap */
157 unsigned rem_count; /* counts removals from hashmap */
158 unsigned last_rem_idx; /* remembers last removal index */
161 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
162 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
163 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
165 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
167 #else /* !ENABLE_DEBUG_HASHMAP */
168 #define HASHMAP_DEBUG_FIELDS
169 #endif /* ENABLE_DEBUG_HASHMAP */
173 HASHMAP_TYPE_ORDERED,
178 struct _packed_ indirect_storage {
179 char *storage; /* where buckets and DIBs are stored */
180 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
182 unsigned n_entries; /* number of stored entries */
183 unsigned n_buckets; /* number of buckets */
185 unsigned idx_lowest_entry; /* Index below which all buckets are free.
186 Makes "while(hashmap_steal_first())" loops
187 O(n) instead of O(n^2) for unordered hashmaps. */
188 uint8_t _pad[3]; /* padding for the whole HashmapBase */
189 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
192 struct direct_storage {
193 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
194 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
195 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
196 char storage[sizeof(struct indirect_storage)];
199 #define DIRECT_BUCKETS(entry_t) \
200 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
202 /* We should be able to store at least one entry directly. */
203 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
205 /* We have 3 bits for n_direct_entries. */
206 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
208 /* Hashmaps with directly stored entries all use this shared hash key.
209 * It's no big deal if the key is guessed, because there can be only
210 * a handful of directly stored entries in a hashmap. When a hashmap
211 * outgrows direct storage, it gets its own key for indirect storage. */
212 static uint8_t shared_hash_key[HASH_KEY_SIZE];
213 static bool shared_hash_key_initialized;
215 /* Fields that all hashmap/set types must have */
217 const struct hash_ops *hash_ops; /* hash and compare ops to use */
220 struct indirect_storage indirect; /* if has_indirect */
221 struct direct_storage direct; /* if !has_indirect */
224 enum HashmapType type:2; /* HASHMAP_TYPE_* */
225 bool has_indirect:1; /* whether indirect storage is used */
226 unsigned n_direct_entries:3; /* Number of entries in direct storage.
227 * Only valid if !has_indirect. */
228 bool from_pool:1; /* whether was allocated from mempool */
229 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
232 /* Specific hash types
233 * HashmapBase must be at the beginning of each hashmap struct. */
236 struct HashmapBase b;
239 struct OrderedHashmap {
240 struct HashmapBase b;
241 unsigned iterate_list_head, iterate_list_tail;
245 struct HashmapBase b;
248 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
249 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
250 /* No need for a separate Set pool */
251 assert_cc(sizeof(Hashmap) == sizeof(Set));
253 struct hashmap_type_info {
256 struct mempool *mempool;
257 unsigned n_direct_buckets;
260 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
261 [HASHMAP_TYPE_PLAIN] = {
262 .head_size = sizeof(Hashmap),
263 .entry_size = sizeof(struct plain_hashmap_entry),
264 .mempool = &hashmap_pool,
265 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
267 [HASHMAP_TYPE_ORDERED] = {
268 .head_size = sizeof(OrderedHashmap),
269 .entry_size = sizeof(struct ordered_hashmap_entry),
270 .mempool = &ordered_hashmap_pool,
271 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
273 [HASHMAP_TYPE_SET] = {
274 .head_size = sizeof(Set),
275 .entry_size = sizeof(struct set_entry),
276 .mempool = &hashmap_pool,
277 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
281 void string_hash_func(const void *p, struct siphash *state) {
282 siphash24_compress(p, strlen(p) + 1, state);
285 int string_compare_func(const void *a, const void *b) {
289 const struct hash_ops string_hash_ops = {
290 .hash = string_hash_func,
291 .compare = string_compare_func
294 void trivial_hash_func(const void *p, struct siphash *state) {
295 siphash24_compress(&p, sizeof(p), state);
298 int trivial_compare_func(const void *a, const void *b) {
299 return a < b ? -1 : (a > b ? 1 : 0);
302 const struct hash_ops trivial_hash_ops = {
303 .hash = trivial_hash_func,
304 .compare = trivial_compare_func
307 void uint64_hash_func(const void *p, struct siphash *state) {
308 siphash24_compress(p, sizeof(uint64_t), state);
311 int uint64_compare_func(const void *_a, const void *_b) {
313 a = *(const uint64_t*) _a;
314 b = *(const uint64_t*) _b;
315 return a < b ? -1 : (a > b ? 1 : 0);
318 const struct hash_ops uint64_hash_ops = {
319 .hash = uint64_hash_func,
320 .compare = uint64_compare_func
323 #if SIZEOF_DEV_T != 8
324 void devt_hash_func(const void *p, struct siphash *state) {
325 siphash24_compress(p, sizeof(dev_t), state);
328 int devt_compare_func(const void *_a, const void *_b) {
330 a = *(const dev_t*) _a;
331 b = *(const dev_t*) _b;
332 return a < b ? -1 : (a > b ? 1 : 0);
335 const struct hash_ops devt_hash_ops = {
336 .hash = devt_hash_func,
337 .compare = devt_compare_func
341 static unsigned n_buckets(HashmapBase *h) {
342 return h->has_indirect ? h->indirect.n_buckets
343 : hashmap_type_info[h->type].n_direct_buckets;
346 static unsigned n_entries(HashmapBase *h) {
347 return h->has_indirect ? h->indirect.n_entries
348 : h->n_direct_entries;
351 static void n_entries_inc(HashmapBase *h) {
353 h->indirect.n_entries++;
355 h->n_direct_entries++;
358 static void n_entries_dec(HashmapBase *h) {
360 h->indirect.n_entries--;
362 h->n_direct_entries--;
365 static char *storage_ptr(HashmapBase *h) {
366 return h->has_indirect ? h->indirect.storage
370 static uint8_t *hash_key(HashmapBase *h) {
371 return h->has_indirect ? h->indirect.hash_key
375 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
376 struct siphash state;
379 siphash24_init(&state, hash_key(h));
381 h->hash_ops->hash(p, &state);
383 hash = siphash24_finalize(&state);
385 return (unsigned) (hash % n_buckets(h));
387 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
389 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
390 static uint8_t current[HASH_KEY_SIZE];
391 static bool current_initialized = false;
393 /* Returns a hash function key to use. In order to keep things
394 * fast we will not generate a new key each time we allocate a
395 * new hash table. Instead, we'll just reuse the most recently
396 * generated one, except if we never generated one or when we
397 * are rehashing an entire hash table because we reached a
400 if (!current_initialized || !reuse_is_ok) {
401 random_bytes(current, sizeof(current));
402 current_initialized = true;
405 memcpy(hash_key, current, sizeof(current));
408 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
409 return (struct hashmap_base_entry*)
410 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
413 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
414 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
417 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
418 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
421 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
422 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
425 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
426 return &swap->e[idx - _IDX_SWAP_BEGIN];
429 /* Returns a pointer to the bucket at index idx.
430 * Understands real indexes and swap indexes, hence "_virtual". */
431 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
433 if (idx < _IDX_SWAP_BEGIN)
434 return bucket_at(h, idx);
436 if (idx < _IDX_SWAP_END)
437 return &bucket_at_swap(swap, idx)->p.b;
439 assert_not_reached("Invalid index");
442 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
444 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
447 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
448 return idx >= from ? idx - from
449 : n_buckets(h) + idx - from;
452 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
453 unsigned initial_bucket;
455 if (raw_dib == DIB_RAW_FREE)
458 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
462 * Having an overflow DIB value is very unlikely. The hash function
463 * would have to be bad. For example, in a table of size 2^24 filled
464 * to load factor 0.9 the maximum observed DIB is only about 60.
465 * In theory (assuming I used Maxima correctly), for an infinite size
466 * hash table with load factor 0.8 the probability of a given entry
467 * having DIB > 40 is 1.9e-8.
468 * This returns the correct DIB value by recomputing the hash value in
469 * the unlikely case. XXX Hitting this case could be a hint to rehash.
471 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
472 return bucket_distance(h, idx, initial_bucket);
475 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
476 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
479 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
482 dibs = dib_raw_ptr(h);
484 for ( ; idx < n_buckets(h); idx++)
485 if (dibs[idx] != DIB_RAW_FREE)
491 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
492 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
493 bucket_set_dib(h, idx, DIB_FREE);
496 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
497 unsigned from, unsigned to) {
498 struct hashmap_base_entry *e_from, *e_to;
502 e_from = bucket_at_virtual(h, swap, from);
503 e_to = bucket_at_virtual(h, swap, to);
505 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
507 if (h->type == HASHMAP_TYPE_ORDERED) {
508 OrderedHashmap *lh = (OrderedHashmap*) h;
509 struct ordered_hashmap_entry *le, *le_to;
511 le_to = (struct ordered_hashmap_entry*) e_to;
513 if (le_to->iterate_next != IDX_NIL) {
514 le = (struct ordered_hashmap_entry*)
515 bucket_at_virtual(h, swap, le_to->iterate_next);
516 le->iterate_previous = to;
519 if (le_to->iterate_previous != IDX_NIL) {
520 le = (struct ordered_hashmap_entry*)
521 bucket_at_virtual(h, swap, le_to->iterate_previous);
522 le->iterate_next = to;
525 if (lh->iterate_list_head == from)
526 lh->iterate_list_head = to;
527 if (lh->iterate_list_tail == from)
528 lh->iterate_list_tail = to;
532 static unsigned next_idx(HashmapBase *h, unsigned idx) {
533 return (idx + 1U) % n_buckets(h);
536 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
537 return (n_buckets(h) + idx - 1U) % n_buckets(h);
540 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
543 case HASHMAP_TYPE_PLAIN:
544 case HASHMAP_TYPE_ORDERED:
545 return ((struct plain_hashmap_entry*)e)->value;
547 case HASHMAP_TYPE_SET:
548 return (void*) e->key;
551 assert_not_reached("Unknown hashmap type");
555 static void base_remove_entry(HashmapBase *h, unsigned idx) {
556 unsigned left, right, prev, dib;
557 dib_raw_t raw_dib, *dibs;
559 dibs = dib_raw_ptr(h);
560 assert(dibs[idx] != DIB_RAW_FREE);
562 #ifdef ENABLE_DEBUG_HASHMAP
563 h->debug.rem_count++;
564 h->debug.last_rem_idx = idx;
568 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
569 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
570 raw_dib = dibs[right];
571 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
574 /* The buckets are not supposed to be all occupied and with DIB > 0.
575 * That would mean we could make everyone better off by shifting them
576 * backward. This scenario is impossible. */
577 assert(left != right);
580 if (h->type == HASHMAP_TYPE_ORDERED) {
581 OrderedHashmap *lh = (OrderedHashmap*) h;
582 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
584 if (le->iterate_next != IDX_NIL)
585 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
587 lh->iterate_list_tail = le->iterate_previous;
589 if (le->iterate_previous != IDX_NIL)
590 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
592 lh->iterate_list_head = le->iterate_next;
595 /* Now shift all buckets in the interval (left, right) one step backwards */
596 for (prev = left, left = next_idx(h, left); left != right;
597 prev = left, left = next_idx(h, left)) {
598 dib = bucket_calculate_dib(h, left, dibs[left]);
600 bucket_move_entry(h, NULL, left, prev);
601 bucket_set_dib(h, prev, dib - 1);
604 bucket_mark_free(h, prev);
607 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
609 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
610 struct ordered_hashmap_entry *e;
616 if (i->idx == IDX_NIL)
619 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
622 if (i->idx == IDX_FIRST) {
623 idx = h->iterate_list_head;
624 e = ordered_bucket_at(h, idx);
627 e = ordered_bucket_at(h, idx);
629 * We allow removing the current entry while iterating, but removal may cause
630 * a backward shift. The next entry may thus move one bucket to the left.
631 * To detect when it happens, we remember the key pointer of the entry we were
632 * going to iterate next. If it does not match, there was a backward shift.
634 if (e->p.b.key != i->next_key) {
635 idx = prev_idx(HASHMAP_BASE(h), idx);
636 e = ordered_bucket_at(h, idx);
638 assert(e->p.b.key == i->next_key);
641 #ifdef ENABLE_DEBUG_HASHMAP
645 if (e->iterate_next != IDX_NIL) {
646 struct ordered_hashmap_entry *n;
647 i->idx = e->iterate_next;
648 n = ordered_bucket_at(h, i->idx);
649 i->next_key = n->p.b.key;
660 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
666 if (i->idx == IDX_NIL)
669 if (i->idx == IDX_FIRST) {
670 /* fast forward to the first occupied bucket */
671 if (h->has_indirect) {
672 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
673 h->indirect.idx_lowest_entry = i->idx;
675 i->idx = skip_free_buckets(h, 0);
677 if (i->idx == IDX_NIL)
680 struct hashmap_base_entry *e;
684 e = bucket_at(h, i->idx);
686 * We allow removing the current entry while iterating, but removal may cause
687 * a backward shift. The next entry may thus move one bucket to the left.
688 * To detect when it happens, we remember the key pointer of the entry we were
689 * going to iterate next. If it does not match, there was a backward shift.
691 if (e->key != i->next_key)
692 e = bucket_at(h, --i->idx);
694 assert(e->key == i->next_key);
698 #ifdef ENABLE_DEBUG_HASHMAP
702 i->idx = skip_free_buckets(h, i->idx + 1);
703 if (i->idx != IDX_NIL)
704 i->next_key = bucket_at(h, i->idx)->key;
715 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
721 #ifdef ENABLE_DEBUG_HASHMAP
722 if (i->idx == IDX_FIRST) {
723 i->put_count = h->debug.put_count;
724 i->rem_count = h->debug.rem_count;
726 /* While iterating, must not add any new entries */
727 assert(i->put_count == h->debug.put_count);
728 /* ... or remove entries other than the current one */
729 assert(i->rem_count == h->debug.rem_count ||
730 (i->rem_count == h->debug.rem_count - 1 &&
731 i->prev_idx == h->debug.last_rem_idx));
732 /* Reset our removals counter */
733 i->rem_count = h->debug.rem_count;
737 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
738 : hashmap_iterate_in_internal_order(h, i);
741 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
742 struct hashmap_base_entry *e;
746 idx = hashmap_iterate_entry(h, i);
747 if (idx == IDX_NIL) {
756 e = bucket_at(h, idx);
757 data = entry_value(h, e);
766 bool set_iterate(Set *s, Iterator *i, void **value) {
767 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
770 #define HASHMAP_FOREACH_IDX(idx, h, i) \
771 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
773 (idx) = hashmap_iterate_entry((h), &(i)))
775 static void reset_direct_storage(HashmapBase *h) {
776 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
779 assert(!h->has_indirect);
781 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
782 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
785 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
787 const struct hashmap_type_info *hi = &hashmap_type_info[type];
790 use_pool = is_main_thread();
792 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
798 h->from_pool = use_pool;
799 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
801 if (type == HASHMAP_TYPE_ORDERED) {
802 OrderedHashmap *lh = (OrderedHashmap*)h;
803 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
806 reset_direct_storage(h);
808 if (!shared_hash_key_initialized) {
809 random_bytes(shared_hash_key, sizeof(shared_hash_key));
810 shared_hash_key_initialized= true;
813 #ifdef ENABLE_DEBUG_HASHMAP
814 h->debug.func = func;
815 h->debug.file = file;
816 h->debug.line = line;
817 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
818 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
819 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
825 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
826 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
829 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
830 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
833 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
834 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
837 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
838 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
846 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
854 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
855 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
858 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
859 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
862 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
863 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
866 static void hashmap_free_no_clear(HashmapBase *h) {
867 assert(!h->has_indirect);
868 assert(!h->n_direct_entries);
870 #ifdef ENABLE_DEBUG_HASHMAP
871 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
872 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
873 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
877 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
882 HashmapBase *internal_hashmap_free(HashmapBase *h) {
884 /* Free the hashmap, but nothing in it */
887 internal_hashmap_clear(h);
888 hashmap_free_no_clear(h);
894 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
896 /* Free the hashmap and all data objects in it, but not the
900 internal_hashmap_clear_free(h);
901 hashmap_free_no_clear(h);
907 Hashmap *hashmap_free_free_free(Hashmap *h) {
909 /* Free the hashmap and all data and key objects in it */
912 hashmap_clear_free_free(h);
913 hashmap_free_no_clear(HASHMAP_BASE(h));
919 void internal_hashmap_clear(HashmapBase *h) {
923 if (h->has_indirect) {
924 free(h->indirect.storage);
925 h->has_indirect = false;
928 h->n_direct_entries = 0;
929 reset_direct_storage(h);
931 if (h->type == HASHMAP_TYPE_ORDERED) {
932 OrderedHashmap *lh = (OrderedHashmap*) h;
933 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
937 void internal_hashmap_clear_free(HashmapBase *h) {
943 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
944 idx = skip_free_buckets(h, idx + 1))
945 free(entry_value(h, bucket_at(h, idx)));
947 internal_hashmap_clear(h);
950 void hashmap_clear_free_free(Hashmap *h) {
956 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
957 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
958 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
959 free((void*)e->b.key);
963 internal_hashmap_clear(HASHMAP_BASE(h));
966 static int resize_buckets(HashmapBase *h, unsigned entries_add);
969 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
970 * Performs Robin Hood swaps as it goes. The entry to put must be placed
971 * by the caller into swap slot IDX_PUT.
972 * If used for in-place resizing, may leave a displaced entry in swap slot
973 * IDX_PUT. Caller must rehash it next.
974 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
977 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
978 struct swap_entries *swap) {
979 dib_raw_t raw_dib, *dibs;
980 unsigned dib, distance;
982 #ifdef ENABLE_DEBUG_HASHMAP
983 h->debug.put_count++;
986 dibs = dib_raw_ptr(h);
988 for (distance = 0; ; distance++) {
990 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
991 if (raw_dib == DIB_RAW_REHASH)
992 bucket_move_entry(h, swap, idx, IDX_TMP);
994 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
995 h->indirect.idx_lowest_entry = idx;
997 bucket_set_dib(h, idx, distance);
998 bucket_move_entry(h, swap, IDX_PUT, idx);
999 if (raw_dib == DIB_RAW_REHASH) {
1000 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1007 dib = bucket_calculate_dib(h, idx, raw_dib);
1009 if (dib < distance) {
1010 /* Found a wealthier entry. Go Robin Hood! */
1011 bucket_set_dib(h, idx, distance);
1013 /* swap the entries */
1014 bucket_move_entry(h, swap, idx, IDX_TMP);
1015 bucket_move_entry(h, swap, IDX_PUT, idx);
1016 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1021 idx = next_idx(h, idx);
1026 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1027 * The caller must place the entry (only its key and value, not link indexes)
1028 * in swap slot IDX_PUT.
1029 * Caller must ensure: the key does not exist yet in the hashmap.
1030 * that resize is not needed if !may_resize.
1031 * Returns: 1 if entry was put successfully.
1032 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1033 * Cannot return -ENOMEM if !may_resize.
1035 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1036 struct swap_entries *swap, bool may_resize) {
1037 struct ordered_hashmap_entry *new_entry;
1040 assert(idx < n_buckets(h));
1042 new_entry = bucket_at_swap(swap, IDX_PUT);
1045 r = resize_buckets(h, 1);
1049 idx = bucket_hash(h, new_entry->p.b.key);
1051 assert(n_entries(h) < n_buckets(h));
1053 if (h->type == HASHMAP_TYPE_ORDERED) {
1054 OrderedHashmap *lh = (OrderedHashmap*) h;
1056 new_entry->iterate_next = IDX_NIL;
1057 new_entry->iterate_previous = lh->iterate_list_tail;
1059 if (lh->iterate_list_tail != IDX_NIL) {
1060 struct ordered_hashmap_entry *old_tail;
1062 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1063 assert(old_tail->iterate_next == IDX_NIL);
1064 old_tail->iterate_next = IDX_PUT;
1067 lh->iterate_list_tail = IDX_PUT;
1068 if (lh->iterate_list_head == IDX_NIL)
1069 lh->iterate_list_head = IDX_PUT;
1072 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1075 #ifdef ENABLE_DEBUG_HASHMAP
1076 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1081 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1082 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1085 * Returns 0 if resize is not needed.
1086 * 1 if successfully resized.
1087 * -ENOMEM on allocation failure.
1089 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1090 struct swap_entries swap;
1092 dib_raw_t *old_dibs, *new_dibs;
1093 const struct hashmap_type_info *hi;
1094 unsigned idx, optimal_idx;
1095 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1101 hi = &hashmap_type_info[h->type];
1102 new_n_entries = n_entries(h) + entries_add;
1105 if (_unlikely_(new_n_entries < entries_add))
1108 /* For direct storage we allow 100% load, because it's tiny. */
1109 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1113 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1114 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1116 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1118 if (_unlikely_(new_n_buckets < new_n_entries))
1121 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1124 old_n_buckets = n_buckets(h);
1126 if (_likely_(new_n_buckets <= old_n_buckets))
1129 new_shift = log2u_round_up(MAX(
1130 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1131 2 * sizeof(struct direct_storage)));
1133 /* Realloc storage (buckets and DIB array). */
1134 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1139 /* Must upgrade direct to indirect storage. */
1140 if (!h->has_indirect) {
1141 memcpy(new_storage, h->direct.storage,
1142 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1143 h->indirect.n_entries = h->n_direct_entries;
1144 h->indirect.idx_lowest_entry = 0;
1145 h->n_direct_entries = 0;
1148 /* Get a new hash key. If we've just upgraded to indirect storage,
1149 * allow reusing a previously generated key. It's still a different key
1150 * from the shared one that we used for direct storage. */
1151 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1153 h->has_indirect = true;
1154 h->indirect.storage = new_storage;
1155 h->indirect.n_buckets = (1U << new_shift) /
1156 (hi->entry_size + sizeof(dib_raw_t));
1158 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1159 new_dibs = dib_raw_ptr(h);
1162 * Move the DIB array to the new place, replacing valid DIB values with
1163 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1164 * Note: Overlap is not possible, because we have at least doubled the
1165 * number of buckets and dib_raw_t is smaller than any entry type.
1167 for (idx = 0; idx < old_n_buckets; idx++) {
1168 assert(old_dibs[idx] != DIB_RAW_REHASH);
1169 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1173 /* Zero the area of newly added entries (including the old DIB area) */
1174 memzero(bucket_at(h, old_n_buckets),
1175 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1177 /* The upper half of the new DIB array needs initialization */
1178 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1179 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1181 /* Rehash entries that need it */
1183 for (idx = 0; idx < old_n_buckets; idx++) {
1184 if (new_dibs[idx] != DIB_RAW_REHASH)
1187 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1190 * Not much to do if by luck the entry hashes to its current
1191 * location. Just set its DIB.
1193 if (optimal_idx == idx) {
1199 new_dibs[idx] = DIB_RAW_FREE;
1200 bucket_move_entry(h, &swap, idx, IDX_PUT);
1201 /* bucket_move_entry does not clear the source */
1202 memzero(bucket_at(h, idx), hi->entry_size);
1206 * Find the new bucket for the current entry. This may make
1207 * another entry homeless and load it into IDX_PUT.
1209 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1212 /* Did the current entry displace another one? */
1214 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1215 } while (rehash_next);
1218 assert(n_rehashed == n_entries(h));
1224 * Finds an entry with a matching key
1225 * Returns: index of the found entry, or IDX_NIL if not found.
1227 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1228 struct hashmap_base_entry *e;
1229 unsigned dib, distance;
1230 dib_raw_t *dibs = dib_raw_ptr(h);
1232 assert(idx < n_buckets(h));
1234 for (distance = 0; ; distance++) {
1235 if (dibs[idx] == DIB_RAW_FREE)
1238 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1242 if (dib == distance) {
1243 e = bucket_at(h, idx);
1244 if (h->hash_ops->compare(e->key, key) == 0)
1248 idx = next_idx(h, idx);
1251 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1253 int hashmap_put(Hashmap *h, const void *key, void *value) {
1254 struct swap_entries swap;
1255 struct plain_hashmap_entry *e;
1260 hash = bucket_hash(h, key);
1261 idx = bucket_scan(h, hash, key);
1262 if (idx != IDX_NIL) {
1263 e = plain_bucket_at(h, idx);
1264 if (e->value == value)
1269 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1272 return hashmap_put_boldly(h, hash, &swap, true);
1275 int set_put(Set *s, const void *key) {
1276 struct swap_entries swap;
1277 struct hashmap_base_entry *e;
1282 hash = bucket_hash(s, key);
1283 idx = bucket_scan(s, hash, key);
1287 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1289 return hashmap_put_boldly(s, hash, &swap, true);
1292 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1293 struct swap_entries swap;
1294 struct plain_hashmap_entry *e;
1299 hash = bucket_hash(h, key);
1300 idx = bucket_scan(h, hash, key);
1301 if (idx != IDX_NIL) {
1302 e = plain_bucket_at(h, idx);
1303 #ifdef ENABLE_DEBUG_HASHMAP
1304 /* Although the key is equal, the key pointer may have changed,
1305 * and this would break our assumption for iterating. So count
1306 * this operation as incompatible with iteration. */
1307 if (e->b.key != key) {
1308 h->b.debug.put_count++;
1309 h->b.debug.rem_count++;
1310 h->b.debug.last_rem_idx = idx;
1318 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1321 return hashmap_put_boldly(h, hash, &swap, true);
1324 int hashmap_update(Hashmap *h, const void *key, void *value) {
1325 struct plain_hashmap_entry *e;
1330 hash = bucket_hash(h, key);
1331 idx = bucket_scan(h, hash, key);
1335 e = plain_bucket_at(h, idx);
1340 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1341 struct hashmap_base_entry *e;
1347 hash = bucket_hash(h, key);
1348 idx = bucket_scan(h, hash, key);
1352 e = bucket_at(h, idx);
1353 return entry_value(h, e);
1356 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1357 struct plain_hashmap_entry *e;
1363 hash = bucket_hash(h, key);
1364 idx = bucket_scan(h, hash, key);
1368 e = plain_bucket_at(h, idx);
1370 *key2 = (void*) e->b.key;
1375 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1381 hash = bucket_hash(h, key);
1382 return bucket_scan(h, hash, key) != IDX_NIL;
1385 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1386 struct hashmap_base_entry *e;
1393 hash = bucket_hash(h, key);
1394 idx = bucket_scan(h, hash, key);
1398 e = bucket_at(h, idx);
1399 data = entry_value(h, e);
1400 remove_entry(h, idx);
1405 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1406 struct plain_hashmap_entry *e;
1416 hash = bucket_hash(h, key);
1417 idx = bucket_scan(h, hash, key);
1418 if (idx == IDX_NIL) {
1424 e = plain_bucket_at(h, idx);
1427 *rkey = (void*) e->b.key;
1429 remove_entry(h, idx);
1434 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1435 struct swap_entries swap;
1436 struct plain_hashmap_entry *e;
1437 unsigned old_hash, new_hash, idx;
1442 old_hash = bucket_hash(h, old_key);
1443 idx = bucket_scan(h, old_hash, old_key);
1447 new_hash = bucket_hash(h, new_key);
1448 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1451 remove_entry(h, idx);
1453 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1456 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1461 #if 0 /// UNNEEDED by elogind
1462 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1463 struct swap_entries swap;
1464 struct hashmap_base_entry *e;
1465 unsigned old_hash, new_hash, idx;
1470 old_hash = bucket_hash(s, old_key);
1471 idx = bucket_scan(s, old_hash, old_key);
1475 new_hash = bucket_hash(s, new_key);
1476 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1479 remove_entry(s, idx);
1481 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1483 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1489 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1490 struct swap_entries swap;
1491 struct plain_hashmap_entry *e;
1492 unsigned old_hash, new_hash, idx_old, idx_new;
1497 old_hash = bucket_hash(h, old_key);
1498 idx_old = bucket_scan(h, old_hash, old_key);
1499 if (idx_old == IDX_NIL)
1502 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1504 new_hash = bucket_hash(h, new_key);
1505 idx_new = bucket_scan(h, new_hash, new_key);
1506 if (idx_new != IDX_NIL)
1507 if (idx_old != idx_new) {
1508 remove_entry(h, idx_new);
1509 /* Compensate for a possible backward shift. */
1510 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1511 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1512 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1515 remove_entry(h, idx_old);
1517 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1520 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1525 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1526 struct plain_hashmap_entry *e;
1532 hash = bucket_hash(h, key);
1533 idx = bucket_scan(h, hash, key);
1537 e = plain_bucket_at(h, idx);
1538 if (e->value != value)
1541 remove_entry(h, idx);
1546 static unsigned find_first_entry(HashmapBase *h) {
1547 Iterator i = ITERATOR_FIRST;
1549 if (!h || !n_entries(h))
1552 return hashmap_iterate_entry(h, &i);
1555 void *internal_hashmap_first(HashmapBase *h) {
1558 idx = find_first_entry(h);
1562 return entry_value(h, bucket_at(h, idx));
1565 void *internal_hashmap_first_key(HashmapBase *h) {
1566 struct hashmap_base_entry *e;
1569 idx = find_first_entry(h);
1573 e = bucket_at(h, idx);
1574 return (void*) e->key;
1577 void *internal_hashmap_steal_first(HashmapBase *h) {
1578 struct hashmap_base_entry *e;
1582 idx = find_first_entry(h);
1586 e = bucket_at(h, idx);
1587 data = entry_value(h, e);
1588 remove_entry(h, idx);
1593 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1594 struct hashmap_base_entry *e;
1598 idx = find_first_entry(h);
1602 e = bucket_at(h, idx);
1603 key = (void*) e->key;
1604 remove_entry(h, idx);
1609 unsigned internal_hashmap_size(HashmapBase *h) {
1614 return n_entries(h);
1617 unsigned internal_hashmap_buckets(HashmapBase *h) {
1622 return n_buckets(h);
1625 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1631 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1632 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1635 r = hashmap_put(h, pe->b.key, pe->value);
1636 if (r < 0 && r != -EEXIST)
1643 int set_merge(Set *s, Set *other) {
1649 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1650 struct set_entry *se = set_bucket_at(other, idx);
1653 r = set_put(s, se->b.key);
1661 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1666 r = resize_buckets(h, entries_add);
1674 * The same as hashmap_merge(), but every new item from other is moved to h.
1675 * Keys already in h are skipped and stay in other.
1676 * Returns: 0 on success.
1677 * -ENOMEM on alloc failure, in which case no move has been done.
1679 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1680 struct swap_entries swap;
1681 struct hashmap_base_entry *e, *n;
1691 assert(other->type == h->type);
1694 * This reserves buckets for the worst case, where none of other's
1695 * entries are yet present in h. This is preferable to risking
1696 * an allocation failure in the middle of the moving and having to
1697 * rollback or return a partial result.
1699 r = resize_buckets(h, n_entries(other));
1703 HASHMAP_FOREACH_IDX(idx, other, i) {
1706 e = bucket_at(other, idx);
1707 h_hash = bucket_hash(h, e->key);
1708 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1711 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1713 if (h->type != HASHMAP_TYPE_SET)
1714 ((struct plain_hashmap_entry*) n)->value =
1715 ((struct plain_hashmap_entry*) e)->value;
1716 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1718 remove_entry(other, idx);
1724 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1725 struct swap_entries swap;
1726 unsigned h_hash, other_hash, idx;
1727 struct hashmap_base_entry *e, *n;
1732 h_hash = bucket_hash(h, key);
1733 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1739 assert(other->type == h->type);
1741 other_hash = bucket_hash(other, key);
1742 idx = bucket_scan(other, other_hash, key);
1746 e = bucket_at(other, idx);
1748 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1750 if (h->type != HASHMAP_TYPE_SET)
1751 ((struct plain_hashmap_entry*) n)->value =
1752 ((struct plain_hashmap_entry*) e)->value;
1753 r = hashmap_put_boldly(h, h_hash, &swap, true);
1757 remove_entry(other, idx);
1761 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1767 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1772 case HASHMAP_TYPE_PLAIN:
1773 case HASHMAP_TYPE_ORDERED:
1774 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1776 case HASHMAP_TYPE_SET:
1777 r = set_merge((Set*)copy, (Set*)h);
1780 assert_not_reached("Unknown hashmap type");
1784 internal_hashmap_free(copy);
1791 char **internal_hashmap_get_strv(HashmapBase *h) {
1796 sv = new(char*, n_entries(h)+1);
1801 HASHMAP_FOREACH_IDX(idx, h, i)
1802 sv[n++] = entry_value(h, bucket_at(h, idx));
1808 #if 0 /// UNNEEDED by elogind
1809 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1810 struct ordered_hashmap_entry *e;
1816 hash = bucket_hash(h, key);
1817 idx = bucket_scan(h, hash, key);
1821 e = ordered_bucket_at(h, idx);
1822 if (e->iterate_next == IDX_NIL)
1824 return ordered_bucket_at(h, e->iterate_next)->p.value;
1828 int set_consume(Set *s, void *value) {
1831 r = set_put(s, value);
1838 int set_put_strdup(Set *s, const char *p) {
1849 r = set_consume(s, c);
1856 #if 0 /// UNNEEDED by elogind
1857 int set_put_strdupv(Set *s, char **l) {
1861 STRV_FOREACH(i, l) {
1862 r = set_put_strdup(s, *i);