1 /* SPDX-License-Identifier: LGPL-2.1+ */
8 #include "alloc-util.h"
13 #include "process-util.h"
14 #include "random-util.h"
16 #include "siphash24.h"
17 #include "string-util.h"
21 #if ENABLE_DEBUG_HASHMAP
27 * Implementation of hashmaps.
29 * - uses less RAM compared to closed addressing (chaining), because
30 * our entries are small (especially in Sets, which tend to contain
31 * the majority of entries in systemd).
32 * Collision resolution: Robin Hood
33 * - tends to equalize displacement of entries from their optimal buckets.
34 * Probe sequence: linear
35 * - though theoretically worse than random probing/uniform hashing/double
36 * hashing, it is good for cache locality.
39 * Celis, P. 1986. Robin Hood Hashing.
40 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
41 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
42 * - The results are derived for random probing. Suggests deletion with
43 * tombstones and two mean-centered search methods. None of that works
44 * well for linear probing.
46 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
47 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
48 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
49 * http://www.math.uu.se/~svante/papers/sj157.pdf
50 * - Applies to Robin Hood with linear probing. Contains remarks on
51 * the unsuitability of mean-centered search with linear probing.
53 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
54 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
55 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
56 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
57 * in a successful search), and Janson writes about displacement. C = d + 1.
59 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
60 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
61 * - Explanation of backward shift deletion with pictures.
63 * Khuong, P. 2013. The Other Robin Hood Hashing.
64 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
65 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
69 * XXX Ideas for improvement:
70 * For unordered hashmaps, randomize iteration order, similarly to Perl:
71 * http://blog.booking.com/hardening-perls-hash-function.html
74 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
75 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
76 #define INV_KEEP_FREE 5U
78 /* Fields common to entries of all hashmap/set types */
79 struct hashmap_base_entry {
83 /* Entry types for specific hashmap/set types
84 * hashmap_base_entry must be at the beginning of each entry struct. */
86 struct plain_hashmap_entry {
87 struct hashmap_base_entry b;
91 struct ordered_hashmap_entry {
92 struct plain_hashmap_entry p;
93 unsigned iterate_next, iterate_previous;
97 struct hashmap_base_entry b;
100 /* In several functions it is advantageous to have the hash table extended
101 * virtually by a couple of additional buckets. We reserve special index values
102 * for these "swap" buckets. */
103 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
104 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
105 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
106 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
108 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
109 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
111 assert_cc(IDX_FIRST == _IDX_SWAP_END);
112 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
114 /* Storage space for the "swap" buckets.
115 * All entry types can fit into a ordered_hashmap_entry. */
116 struct swap_entries {
117 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
120 /* Distance from Initial Bucket */
121 typedef uint8_t dib_raw_t;
122 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
123 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
124 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
125 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
127 #define DIB_FREE UINT_MAX
129 #if ENABLE_DEBUG_HASHMAP
130 struct hashmap_debug_info {
131 LIST_FIELDS(struct hashmap_debug_info, debug_list);
132 unsigned max_entries; /* high watermark of n_entries */
134 /* who allocated this hashmap */
139 /* fields to detect modification while iterating */
140 unsigned put_count; /* counts puts into the hashmap */
141 unsigned rem_count; /* counts removals from hashmap */
142 unsigned last_rem_idx; /* remembers last removal index */
145 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
146 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
147 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
149 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
151 #else /* !ENABLE_DEBUG_HASHMAP */
152 #define HASHMAP_DEBUG_FIELDS
153 #endif /* ENABLE_DEBUG_HASHMAP */
157 HASHMAP_TYPE_ORDERED,
162 struct _packed_ indirect_storage {
163 void *storage; /* where buckets and DIBs are stored */
164 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
166 unsigned n_entries; /* number of stored entries */
167 unsigned n_buckets; /* number of buckets */
169 unsigned idx_lowest_entry; /* Index below which all buckets are free.
170 Makes "while(hashmap_steal_first())" loops
171 O(n) instead of O(n^2) for unordered hashmaps. */
172 uint8_t _pad[3]; /* padding for the whole HashmapBase */
173 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
176 struct direct_storage {
177 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
178 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
179 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
180 uint8_t storage[sizeof(struct indirect_storage)];
183 #define DIRECT_BUCKETS(entry_t) \
184 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
186 /* We should be able to store at least one entry directly. */
187 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
189 /* We have 3 bits for n_direct_entries. */
190 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
192 /* Hashmaps with directly stored entries all use this shared hash key.
193 * It's no big deal if the key is guessed, because there can be only
194 * a handful of directly stored entries in a hashmap. When a hashmap
195 * outgrows direct storage, it gets its own key for indirect storage. */
196 static uint8_t shared_hash_key[HASH_KEY_SIZE];
197 static bool shared_hash_key_initialized;
199 /* Fields that all hashmap/set types must have */
201 const struct hash_ops *hash_ops; /* hash and compare ops to use */
204 struct indirect_storage indirect; /* if has_indirect */
205 struct direct_storage direct; /* if !has_indirect */
208 enum HashmapType type:2; /* HASHMAP_TYPE_* */
209 bool has_indirect:1; /* whether indirect storage is used */
210 unsigned n_direct_entries:3; /* Number of entries in direct storage.
211 * Only valid if !has_indirect. */
212 bool from_pool:1; /* whether was allocated from mempool */
213 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
214 bool cached:1; /* whether this hashmap is being cached */
215 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
218 /* Specific hash types
219 * HashmapBase must be at the beginning of each hashmap struct. */
222 struct HashmapBase b;
225 struct OrderedHashmap {
226 struct HashmapBase b;
227 unsigned iterate_list_head, iterate_list_tail;
231 struct HashmapBase b;
234 typedef struct CacheMem {
236 size_t n_populated, n_allocated;
240 struct IteratedCache {
241 HashmapBase *hashmap;
242 CacheMem keys, values;
245 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
246 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
247 /* No need for a separate Set pool */
248 assert_cc(sizeof(Hashmap) == sizeof(Set));
250 struct hashmap_type_info {
253 struct mempool *mempool;
254 unsigned n_direct_buckets;
257 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
258 [HASHMAP_TYPE_PLAIN] = {
259 .head_size = sizeof(Hashmap),
260 .entry_size = sizeof(struct plain_hashmap_entry),
261 .mempool = &hashmap_pool,
262 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
264 [HASHMAP_TYPE_ORDERED] = {
265 .head_size = sizeof(OrderedHashmap),
266 .entry_size = sizeof(struct ordered_hashmap_entry),
267 .mempool = &ordered_hashmap_pool,
268 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
270 [HASHMAP_TYPE_SET] = {
271 .head_size = sizeof(Set),
272 .entry_size = sizeof(struct set_entry),
273 .mempool = &hashmap_pool,
274 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
279 __attribute__((destructor)) static void cleanup_pools(void) {
280 _cleanup_free_ char *t = NULL;
283 /* Be nice to valgrind */
285 /* The pool is only allocated by the main thread, but the memory can
286 * be passed to other threads. Let's clean up if we are the main thread
287 * and no other threads are live. */
288 if (!is_main_thread())
291 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
292 if (r < 0 || !streq(t, "1"))
295 mempool_drop(&hashmap_pool);
296 mempool_drop(&ordered_hashmap_pool);
300 static unsigned n_buckets(HashmapBase *h) {
301 return h->has_indirect ? h->indirect.n_buckets
302 : hashmap_type_info[h->type].n_direct_buckets;
305 static unsigned n_entries(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.n_entries
307 : h->n_direct_entries;
310 static void n_entries_inc(HashmapBase *h) {
312 h->indirect.n_entries++;
314 h->n_direct_entries++;
317 static void n_entries_dec(HashmapBase *h) {
319 h->indirect.n_entries--;
321 h->n_direct_entries--;
324 static void *storage_ptr(HashmapBase *h) {
325 return h->has_indirect ? h->indirect.storage
329 static uint8_t *hash_key(HashmapBase *h) {
330 return h->has_indirect ? h->indirect.hash_key
334 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
335 struct siphash state;
338 siphash24_init(&state, hash_key(h));
340 h->hash_ops->hash(p, &state);
342 hash = siphash24_finalize(&state);
344 return (unsigned) (hash % n_buckets(h));
346 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
348 static inline void base_set_dirty(HashmapBase *h) {
351 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
353 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
354 static uint8_t current[HASH_KEY_SIZE];
355 static bool current_initialized = false;
357 /* Returns a hash function key to use. In order to keep things
358 * fast we will not generate a new key each time we allocate a
359 * new hash table. Instead, we'll just reuse the most recently
360 * generated one, except if we never generated one or when we
361 * are rehashing an entire hash table because we reached a
364 if (!current_initialized || !reuse_is_ok) {
365 random_bytes(current, sizeof(current));
366 current_initialized = true;
369 memcpy(hash_key, current, sizeof(current));
372 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
373 return (struct hashmap_base_entry*)
374 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
377 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
378 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
381 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
382 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
385 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
386 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
389 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
390 return &swap->e[idx - _IDX_SWAP_BEGIN];
393 /* Returns a pointer to the bucket at index idx.
394 * Understands real indexes and swap indexes, hence "_virtual". */
395 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
397 if (idx < _IDX_SWAP_BEGIN)
398 return bucket_at(h, idx);
400 if (idx < _IDX_SWAP_END)
401 return &bucket_at_swap(swap, idx)->p.b;
403 assert_not_reached("Invalid index");
406 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
408 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
411 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
412 return idx >= from ? idx - from
413 : n_buckets(h) + idx - from;
416 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
417 unsigned initial_bucket;
419 if (raw_dib == DIB_RAW_FREE)
422 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
426 * Having an overflow DIB value is very unlikely. The hash function
427 * would have to be bad. For example, in a table of size 2^24 filled
428 * to load factor 0.9 the maximum observed DIB is only about 60.
429 * In theory (assuming I used Maxima correctly), for an infinite size
430 * hash table with load factor 0.8 the probability of a given entry
431 * having DIB > 40 is 1.9e-8.
432 * This returns the correct DIB value by recomputing the hash value in
433 * the unlikely case. XXX Hitting this case could be a hint to rehash.
435 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
436 return bucket_distance(h, idx, initial_bucket);
439 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
440 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
443 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
446 dibs = dib_raw_ptr(h);
448 for ( ; idx < n_buckets(h); idx++)
449 if (dibs[idx] != DIB_RAW_FREE)
455 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
456 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
457 bucket_set_dib(h, idx, DIB_FREE);
460 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
461 unsigned from, unsigned to) {
462 struct hashmap_base_entry *e_from, *e_to;
466 e_from = bucket_at_virtual(h, swap, from);
467 e_to = bucket_at_virtual(h, swap, to);
469 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
471 if (h->type == HASHMAP_TYPE_ORDERED) {
472 OrderedHashmap *lh = (OrderedHashmap*) h;
473 struct ordered_hashmap_entry *le, *le_to;
475 le_to = (struct ordered_hashmap_entry*) e_to;
477 if (le_to->iterate_next != IDX_NIL) {
478 le = (struct ordered_hashmap_entry*)
479 bucket_at_virtual(h, swap, le_to->iterate_next);
480 le->iterate_previous = to;
483 if (le_to->iterate_previous != IDX_NIL) {
484 le = (struct ordered_hashmap_entry*)
485 bucket_at_virtual(h, swap, le_to->iterate_previous);
486 le->iterate_next = to;
489 if (lh->iterate_list_head == from)
490 lh->iterate_list_head = to;
491 if (lh->iterate_list_tail == from)
492 lh->iterate_list_tail = to;
496 static unsigned next_idx(HashmapBase *h, unsigned idx) {
497 return (idx + 1U) % n_buckets(h);
500 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
501 return (n_buckets(h) + idx - 1U) % n_buckets(h);
504 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
507 case HASHMAP_TYPE_PLAIN:
508 case HASHMAP_TYPE_ORDERED:
509 return ((struct plain_hashmap_entry*)e)->value;
511 case HASHMAP_TYPE_SET:
512 return (void*) e->key;
515 assert_not_reached("Unknown hashmap type");
519 static void base_remove_entry(HashmapBase *h, unsigned idx) {
520 unsigned left, right, prev, dib;
521 dib_raw_t raw_dib, *dibs;
523 dibs = dib_raw_ptr(h);
524 assert(dibs[idx] != DIB_RAW_FREE);
526 #if ENABLE_DEBUG_HASHMAP
527 h->debug.rem_count++;
528 h->debug.last_rem_idx = idx;
532 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
533 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
534 raw_dib = dibs[right];
535 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
538 /* The buckets are not supposed to be all occupied and with DIB > 0.
539 * That would mean we could make everyone better off by shifting them
540 * backward. This scenario is impossible. */
541 assert(left != right);
544 if (h->type == HASHMAP_TYPE_ORDERED) {
545 OrderedHashmap *lh = (OrderedHashmap*) h;
546 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
548 if (le->iterate_next != IDX_NIL)
549 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
551 lh->iterate_list_tail = le->iterate_previous;
553 if (le->iterate_previous != IDX_NIL)
554 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
556 lh->iterate_list_head = le->iterate_next;
559 /* Now shift all buckets in the interval (left, right) one step backwards */
560 for (prev = left, left = next_idx(h, left); left != right;
561 prev = left, left = next_idx(h, left)) {
562 dib = bucket_calculate_dib(h, left, dibs[left]);
564 bucket_move_entry(h, NULL, left, prev);
565 bucket_set_dib(h, prev, dib - 1);
568 bucket_mark_free(h, prev);
572 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
574 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
575 struct ordered_hashmap_entry *e;
581 if (i->idx == IDX_NIL)
584 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
587 if (i->idx == IDX_FIRST) {
588 idx = h->iterate_list_head;
589 e = ordered_bucket_at(h, idx);
592 e = ordered_bucket_at(h, idx);
594 * We allow removing the current entry while iterating, but removal may cause
595 * a backward shift. The next entry may thus move one bucket to the left.
596 * To detect when it happens, we remember the key pointer of the entry we were
597 * going to iterate next. If it does not match, there was a backward shift.
599 if (e->p.b.key != i->next_key) {
600 idx = prev_idx(HASHMAP_BASE(h), idx);
601 e = ordered_bucket_at(h, idx);
603 assert(e->p.b.key == i->next_key);
606 #if ENABLE_DEBUG_HASHMAP
610 if (e->iterate_next != IDX_NIL) {
611 struct ordered_hashmap_entry *n;
612 i->idx = e->iterate_next;
613 n = ordered_bucket_at(h, i->idx);
614 i->next_key = n->p.b.key;
625 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
631 if (i->idx == IDX_NIL)
634 if (i->idx == IDX_FIRST) {
635 /* fast forward to the first occupied bucket */
636 if (h->has_indirect) {
637 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
638 h->indirect.idx_lowest_entry = i->idx;
640 i->idx = skip_free_buckets(h, 0);
642 if (i->idx == IDX_NIL)
645 struct hashmap_base_entry *e;
649 e = bucket_at(h, i->idx);
651 * We allow removing the current entry while iterating, but removal may cause
652 * a backward shift. The next entry may thus move one bucket to the left.
653 * To detect when it happens, we remember the key pointer of the entry we were
654 * going to iterate next. If it does not match, there was a backward shift.
656 if (e->key != i->next_key)
657 e = bucket_at(h, --i->idx);
659 assert(e->key == i->next_key);
663 #if ENABLE_DEBUG_HASHMAP
667 i->idx = skip_free_buckets(h, i->idx + 1);
668 if (i->idx != IDX_NIL)
669 i->next_key = bucket_at(h, i->idx)->key;
680 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
686 #if ENABLE_DEBUG_HASHMAP
687 if (i->idx == IDX_FIRST) {
688 i->put_count = h->debug.put_count;
689 i->rem_count = h->debug.rem_count;
691 /* While iterating, must not add any new entries */
692 assert(i->put_count == h->debug.put_count);
693 /* ... or remove entries other than the current one */
694 assert(i->rem_count == h->debug.rem_count ||
695 (i->rem_count == h->debug.rem_count - 1 &&
696 i->prev_idx == h->debug.last_rem_idx));
697 /* Reset our removals counter */
698 i->rem_count = h->debug.rem_count;
702 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
703 : hashmap_iterate_in_internal_order(h, i);
706 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
707 struct hashmap_base_entry *e;
711 idx = hashmap_iterate_entry(h, i);
712 if (idx == IDX_NIL) {
721 e = bucket_at(h, idx);
722 data = entry_value(h, e);
731 bool set_iterate(Set *s, Iterator *i, void **value) {
732 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
735 #define HASHMAP_FOREACH_IDX(idx, h, i) \
736 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
738 (idx) = hashmap_iterate_entry((h), &(i)))
740 IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
741 IteratedCache *cache;
749 cache = new0(IteratedCache, 1);
759 static void reset_direct_storage(HashmapBase *h) {
760 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
763 assert(!h->has_indirect);
765 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
766 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
769 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
771 const struct hashmap_type_info *hi = &hashmap_type_info[type];
774 use_pool = is_main_thread();
776 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
782 h->from_pool = use_pool;
783 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
785 if (type == HASHMAP_TYPE_ORDERED) {
786 OrderedHashmap *lh = (OrderedHashmap*)h;
787 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
790 reset_direct_storage(h);
792 if (!shared_hash_key_initialized) {
793 random_bytes(shared_hash_key, sizeof(shared_hash_key));
794 shared_hash_key_initialized= true;
797 #if ENABLE_DEBUG_HASHMAP
798 h->debug.func = func;
799 h->debug.file = file;
800 h->debug.line = line;
801 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
802 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
803 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
809 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
810 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
813 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
814 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
817 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
818 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
821 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
822 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
830 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
838 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
839 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
842 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
843 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
846 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
847 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
850 static void hashmap_free_no_clear(HashmapBase *h) {
851 assert(!h->has_indirect);
852 assert(!h->n_direct_entries);
854 #if ENABLE_DEBUG_HASHMAP
855 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
856 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
857 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
861 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
866 HashmapBase *internal_hashmap_free(HashmapBase *h) {
868 /* Free the hashmap, but nothing in it */
871 internal_hashmap_clear(h);
872 hashmap_free_no_clear(h);
878 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
880 /* Free the hashmap and all data objects in it, but not the
884 internal_hashmap_clear_free(h);
885 hashmap_free_no_clear(h);
891 Hashmap *hashmap_free_free_free(Hashmap *h) {
893 /* Free the hashmap and all data and key objects in it */
896 hashmap_clear_free_free(h);
897 hashmap_free_no_clear(HASHMAP_BASE(h));
903 void internal_hashmap_clear(HashmapBase *h) {
907 if (h->has_indirect) {
908 free(h->indirect.storage);
909 h->has_indirect = false;
912 h->n_direct_entries = 0;
913 reset_direct_storage(h);
915 if (h->type == HASHMAP_TYPE_ORDERED) {
916 OrderedHashmap *lh = (OrderedHashmap*) h;
917 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
923 void internal_hashmap_clear_free(HashmapBase *h) {
929 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
930 idx = skip_free_buckets(h, idx + 1))
931 free(entry_value(h, bucket_at(h, idx)));
933 internal_hashmap_clear(h);
936 void hashmap_clear_free_free(Hashmap *h) {
942 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
943 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
944 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
945 free((void*)e->b.key);
949 internal_hashmap_clear(HASHMAP_BASE(h));
952 static int resize_buckets(HashmapBase *h, unsigned entries_add);
955 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
956 * Performs Robin Hood swaps as it goes. The entry to put must be placed
957 * by the caller into swap slot IDX_PUT.
958 * If used for in-place resizing, may leave a displaced entry in swap slot
959 * IDX_PUT. Caller must rehash it next.
960 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
963 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
964 struct swap_entries *swap) {
965 dib_raw_t raw_dib, *dibs;
966 unsigned dib, distance;
968 #if ENABLE_DEBUG_HASHMAP
969 h->debug.put_count++;
972 dibs = dib_raw_ptr(h);
974 for (distance = 0; ; distance++) {
976 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
977 if (raw_dib == DIB_RAW_REHASH)
978 bucket_move_entry(h, swap, idx, IDX_TMP);
980 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
981 h->indirect.idx_lowest_entry = idx;
983 bucket_set_dib(h, idx, distance);
984 bucket_move_entry(h, swap, IDX_PUT, idx);
985 if (raw_dib == DIB_RAW_REHASH) {
986 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
993 dib = bucket_calculate_dib(h, idx, raw_dib);
995 if (dib < distance) {
996 /* Found a wealthier entry. Go Robin Hood! */
997 bucket_set_dib(h, idx, distance);
999 /* swap the entries */
1000 bucket_move_entry(h, swap, idx, IDX_TMP);
1001 bucket_move_entry(h, swap, IDX_PUT, idx);
1002 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1007 idx = next_idx(h, idx);
1012 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1013 * The caller must place the entry (only its key and value, not link indexes)
1014 * in swap slot IDX_PUT.
1015 * Caller must ensure: the key does not exist yet in the hashmap.
1016 * that resize is not needed if !may_resize.
1017 * Returns: 1 if entry was put successfully.
1018 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1019 * Cannot return -ENOMEM if !may_resize.
1021 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1022 struct swap_entries *swap, bool may_resize) {
1023 struct ordered_hashmap_entry *new_entry;
1026 assert(idx < n_buckets(h));
1028 new_entry = bucket_at_swap(swap, IDX_PUT);
1031 r = resize_buckets(h, 1);
1035 idx = bucket_hash(h, new_entry->p.b.key);
1037 assert(n_entries(h) < n_buckets(h));
1039 if (h->type == HASHMAP_TYPE_ORDERED) {
1040 OrderedHashmap *lh = (OrderedHashmap*) h;
1042 new_entry->iterate_next = IDX_NIL;
1043 new_entry->iterate_previous = lh->iterate_list_tail;
1045 if (lh->iterate_list_tail != IDX_NIL) {
1046 struct ordered_hashmap_entry *old_tail;
1048 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1049 assert(old_tail->iterate_next == IDX_NIL);
1050 old_tail->iterate_next = IDX_PUT;
1053 lh->iterate_list_tail = IDX_PUT;
1054 if (lh->iterate_list_head == IDX_NIL)
1055 lh->iterate_list_head = IDX_PUT;
1058 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1061 #if ENABLE_DEBUG_HASHMAP
1062 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1069 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1070 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1073 * Returns 0 if resize is not needed.
1074 * 1 if successfully resized.
1075 * -ENOMEM on allocation failure.
1077 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1078 struct swap_entries swap;
1080 dib_raw_t *old_dibs, *new_dibs;
1081 const struct hashmap_type_info *hi;
1082 unsigned idx, optimal_idx;
1083 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1089 hi = &hashmap_type_info[h->type];
1090 new_n_entries = n_entries(h) + entries_add;
1093 if (_unlikely_(new_n_entries < entries_add))
1096 /* For direct storage we allow 100% load, because it's tiny. */
1097 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1101 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1102 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1104 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1106 if (_unlikely_(new_n_buckets < new_n_entries))
1109 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1112 old_n_buckets = n_buckets(h);
1114 if (_likely_(new_n_buckets <= old_n_buckets))
1117 new_shift = log2u_round_up(MAX(
1118 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1119 2 * sizeof(struct direct_storage)));
1121 /* Realloc storage (buckets and DIB array). */
1122 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1127 /* Must upgrade direct to indirect storage. */
1128 if (!h->has_indirect) {
1129 memcpy(new_storage, h->direct.storage,
1130 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1131 h->indirect.n_entries = h->n_direct_entries;
1132 h->indirect.idx_lowest_entry = 0;
1133 h->n_direct_entries = 0;
1136 /* Get a new hash key. If we've just upgraded to indirect storage,
1137 * allow reusing a previously generated key. It's still a different key
1138 * from the shared one that we used for direct storage. */
1139 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1141 h->has_indirect = true;
1142 h->indirect.storage = new_storage;
1143 h->indirect.n_buckets = (1U << new_shift) /
1144 (hi->entry_size + sizeof(dib_raw_t));
1146 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1147 new_dibs = dib_raw_ptr(h);
1150 * Move the DIB array to the new place, replacing valid DIB values with
1151 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1152 * Note: Overlap is not possible, because we have at least doubled the
1153 * number of buckets and dib_raw_t is smaller than any entry type.
1155 for (idx = 0; idx < old_n_buckets; idx++) {
1156 assert(old_dibs[idx] != DIB_RAW_REHASH);
1157 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1161 /* Zero the area of newly added entries (including the old DIB area) */
1162 memzero(bucket_at(h, old_n_buckets),
1163 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1165 /* The upper half of the new DIB array needs initialization */
1166 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1167 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1169 /* Rehash entries that need it */
1171 for (idx = 0; idx < old_n_buckets; idx++) {
1172 if (new_dibs[idx] != DIB_RAW_REHASH)
1175 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1178 * Not much to do if by luck the entry hashes to its current
1179 * location. Just set its DIB.
1181 if (optimal_idx == idx) {
1187 new_dibs[idx] = DIB_RAW_FREE;
1188 bucket_move_entry(h, &swap, idx, IDX_PUT);
1189 /* bucket_move_entry does not clear the source */
1190 memzero(bucket_at(h, idx), hi->entry_size);
1194 * Find the new bucket for the current entry. This may make
1195 * another entry homeless and load it into IDX_PUT.
1197 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1200 /* Did the current entry displace another one? */
1202 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1203 } while (rehash_next);
1206 assert(n_rehashed == n_entries(h));
1212 * Finds an entry with a matching key
1213 * Returns: index of the found entry, or IDX_NIL if not found.
1215 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1216 struct hashmap_base_entry *e;
1217 unsigned dib, distance;
1218 dib_raw_t *dibs = dib_raw_ptr(h);
1220 assert(idx < n_buckets(h));
1222 for (distance = 0; ; distance++) {
1223 if (dibs[idx] == DIB_RAW_FREE)
1226 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1230 if (dib == distance) {
1231 e = bucket_at(h, idx);
1232 if (h->hash_ops->compare(e->key, key) == 0)
1236 idx = next_idx(h, idx);
1239 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1241 int hashmap_put(Hashmap *h, const void *key, void *value) {
1242 struct swap_entries swap;
1243 struct plain_hashmap_entry *e;
1248 hash = bucket_hash(h, key);
1249 idx = bucket_scan(h, hash, key);
1250 if (idx != IDX_NIL) {
1251 e = plain_bucket_at(h, idx);
1252 if (e->value == value)
1257 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1260 return hashmap_put_boldly(h, hash, &swap, true);
1263 int set_put(Set *s, const void *key) {
1264 struct swap_entries swap;
1265 struct hashmap_base_entry *e;
1270 hash = bucket_hash(s, key);
1271 idx = bucket_scan(s, hash, key);
1275 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1277 return hashmap_put_boldly(s, hash, &swap, true);
1280 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1281 struct swap_entries swap;
1282 struct plain_hashmap_entry *e;
1287 hash = bucket_hash(h, key);
1288 idx = bucket_scan(h, hash, key);
1289 if (idx != IDX_NIL) {
1290 e = plain_bucket_at(h, idx);
1291 #if ENABLE_DEBUG_HASHMAP
1292 /* Although the key is equal, the key pointer may have changed,
1293 * and this would break our assumption for iterating. So count
1294 * this operation as incompatible with iteration. */
1295 if (e->b.key != key) {
1296 h->b.debug.put_count++;
1297 h->b.debug.rem_count++;
1298 h->b.debug.last_rem_idx = idx;
1303 hashmap_set_dirty(h);
1308 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1311 return hashmap_put_boldly(h, hash, &swap, true);
1314 int hashmap_update(Hashmap *h, const void *key, void *value) {
1315 struct plain_hashmap_entry *e;
1320 hash = bucket_hash(h, key);
1321 idx = bucket_scan(h, hash, key);
1325 e = plain_bucket_at(h, idx);
1327 hashmap_set_dirty(h);
1332 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1333 struct hashmap_base_entry *e;
1339 hash = bucket_hash(h, key);
1340 idx = bucket_scan(h, hash, key);
1344 e = bucket_at(h, idx);
1345 return entry_value(h, e);
1348 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1349 struct plain_hashmap_entry *e;
1355 hash = bucket_hash(h, key);
1356 idx = bucket_scan(h, hash, key);
1360 e = plain_bucket_at(h, idx);
1362 *key2 = (void*) e->b.key;
1367 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1373 hash = bucket_hash(h, key);
1374 return bucket_scan(h, hash, key) != IDX_NIL;
1377 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1378 struct hashmap_base_entry *e;
1385 hash = bucket_hash(h, key);
1386 idx = bucket_scan(h, hash, key);
1390 e = bucket_at(h, idx);
1391 data = entry_value(h, e);
1392 remove_entry(h, idx);
1397 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1398 struct plain_hashmap_entry *e;
1408 hash = bucket_hash(h, key);
1409 idx = bucket_scan(h, hash, key);
1410 if (idx == IDX_NIL) {
1416 e = plain_bucket_at(h, idx);
1419 *rkey = (void*) e->b.key;
1421 remove_entry(h, idx);
1426 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1427 struct swap_entries swap;
1428 struct plain_hashmap_entry *e;
1429 unsigned old_hash, new_hash, idx;
1434 old_hash = bucket_hash(h, old_key);
1435 idx = bucket_scan(h, old_hash, old_key);
1439 new_hash = bucket_hash(h, new_key);
1440 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1443 remove_entry(h, idx);
1445 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1448 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1453 #if 0 /// UNNEEDED by elogind
1454 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1455 struct swap_entries swap;
1456 struct hashmap_base_entry *e;
1457 unsigned old_hash, new_hash, idx;
1462 old_hash = bucket_hash(s, old_key);
1463 idx = bucket_scan(s, old_hash, old_key);
1467 new_hash = bucket_hash(s, new_key);
1468 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1471 remove_entry(s, idx);
1473 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1475 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1481 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1482 struct swap_entries swap;
1483 struct plain_hashmap_entry *e;
1484 unsigned old_hash, new_hash, idx_old, idx_new;
1489 old_hash = bucket_hash(h, old_key);
1490 idx_old = bucket_scan(h, old_hash, old_key);
1491 if (idx_old == IDX_NIL)
1494 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1496 new_hash = bucket_hash(h, new_key);
1497 idx_new = bucket_scan(h, new_hash, new_key);
1498 if (idx_new != IDX_NIL)
1499 if (idx_old != idx_new) {
1500 remove_entry(h, idx_new);
1501 /* Compensate for a possible backward shift. */
1502 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1503 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1504 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1507 remove_entry(h, idx_old);
1509 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1512 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1517 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1518 struct plain_hashmap_entry *e;
1524 hash = bucket_hash(h, key);
1525 idx = bucket_scan(h, hash, key);
1529 e = plain_bucket_at(h, idx);
1530 if (e->value != value)
1533 remove_entry(h, idx);
1538 static unsigned find_first_entry(HashmapBase *h) {
1539 Iterator i = ITERATOR_FIRST;
1541 if (!h || !n_entries(h))
1544 return hashmap_iterate_entry(h, &i);
1547 void *internal_hashmap_first(HashmapBase *h) {
1550 idx = find_first_entry(h);
1554 return entry_value(h, bucket_at(h, idx));
1557 void *internal_hashmap_first_key(HashmapBase *h) {
1558 struct hashmap_base_entry *e;
1561 idx = find_first_entry(h);
1565 e = bucket_at(h, idx);
1566 return (void*) e->key;
1569 void *internal_hashmap_steal_first(HashmapBase *h) {
1570 struct hashmap_base_entry *e;
1574 idx = find_first_entry(h);
1578 e = bucket_at(h, idx);
1579 data = entry_value(h, e);
1580 remove_entry(h, idx);
1585 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1586 struct hashmap_base_entry *e;
1590 idx = find_first_entry(h);
1594 e = bucket_at(h, idx);
1595 key = (void*) e->key;
1596 remove_entry(h, idx);
1601 unsigned internal_hashmap_size(HashmapBase *h) {
1606 return n_entries(h);
1609 unsigned internal_hashmap_buckets(HashmapBase *h) {
1614 return n_buckets(h);
1617 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1623 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1624 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1627 r = hashmap_put(h, pe->b.key, pe->value);
1628 if (r < 0 && r != -EEXIST)
1635 int set_merge(Set *s, Set *other) {
1641 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1642 struct set_entry *se = set_bucket_at(other, idx);
1645 r = set_put(s, se->b.key);
1653 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1658 r = resize_buckets(h, entries_add);
1666 * The same as hashmap_merge(), but every new item from other is moved to h.
1667 * Keys already in h are skipped and stay in other.
1668 * Returns: 0 on success.
1669 * -ENOMEM on alloc failure, in which case no move has been done.
1671 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1672 struct swap_entries swap;
1673 struct hashmap_base_entry *e, *n;
1683 assert(other->type == h->type);
1686 * This reserves buckets for the worst case, where none of other's
1687 * entries are yet present in h. This is preferable to risking
1688 * an allocation failure in the middle of the moving and having to
1689 * rollback or return a partial result.
1691 r = resize_buckets(h, n_entries(other));
1695 HASHMAP_FOREACH_IDX(idx, other, i) {
1698 e = bucket_at(other, idx);
1699 h_hash = bucket_hash(h, e->key);
1700 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1703 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1705 if (h->type != HASHMAP_TYPE_SET)
1706 ((struct plain_hashmap_entry*) n)->value =
1707 ((struct plain_hashmap_entry*) e)->value;
1708 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1710 remove_entry(other, idx);
1716 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1717 struct swap_entries swap;
1718 unsigned h_hash, other_hash, idx;
1719 struct hashmap_base_entry *e, *n;
1724 h_hash = bucket_hash(h, key);
1725 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1731 assert(other->type == h->type);
1733 other_hash = bucket_hash(other, key);
1734 idx = bucket_scan(other, other_hash, key);
1738 e = bucket_at(other, idx);
1740 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1742 if (h->type != HASHMAP_TYPE_SET)
1743 ((struct plain_hashmap_entry*) n)->value =
1744 ((struct plain_hashmap_entry*) e)->value;
1745 r = hashmap_put_boldly(h, h_hash, &swap, true);
1749 remove_entry(other, idx);
1753 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1759 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1764 case HASHMAP_TYPE_PLAIN:
1765 case HASHMAP_TYPE_ORDERED:
1766 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1768 case HASHMAP_TYPE_SET:
1769 r = set_merge((Set*)copy, (Set*)h);
1772 assert_not_reached("Unknown hashmap type");
1776 internal_hashmap_free(copy);
1783 char **internal_hashmap_get_strv(HashmapBase *h) {
1788 sv = new(char*, n_entries(h)+1);
1793 HASHMAP_FOREACH_IDX(idx, h, i)
1794 sv[n++] = entry_value(h, bucket_at(h, idx));
1800 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1801 struct ordered_hashmap_entry *e;
1807 hash = bucket_hash(h, key);
1808 idx = bucket_scan(h, hash, key);
1812 e = ordered_bucket_at(h, idx);
1813 if (e->iterate_next == IDX_NIL)
1815 return ordered_bucket_at(h, e->iterate_next)->p.value;
1818 int set_consume(Set *s, void *value) {
1824 r = set_put(s, value);
1831 int set_put_strdup(Set *s, const char *p) {
1837 if (set_contains(s, (char*) p))
1844 return set_consume(s, c);
1847 #if 0 /// UNNEEDED by elogind
1848 int set_put_strdupv(Set *s, char **l) {
1854 STRV_FOREACH(i, l) {
1855 r = set_put_strdup(s, *i);
1865 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1875 r = extract_first_word(&p, &word, separators, flags);
1879 r = set_consume(s, word);
1886 /* expand the cachemem if needed, return true if newly (re)activated. */
1887 static int cachemem_maintain(CacheMem *mem, unsigned size) {
1890 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1903 int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1904 bool sync_keys = false, sync_values = false;
1909 assert(cache->hashmap);
1911 size = n_entries(cache->hashmap);
1914 r = cachemem_maintain(&cache->keys, size);
1920 cache->keys.active = false;
1923 r = cachemem_maintain(&cache->values, size);
1929 cache->values.active = false;
1931 if (cache->hashmap->dirty) {
1932 if (cache->keys.active)
1934 if (cache->values.active)
1937 cache->hashmap->dirty = false;
1940 if (sync_keys || sync_values) {
1945 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1946 struct hashmap_base_entry *e;
1948 e = bucket_at(cache->hashmap, idx);
1951 cache->keys.ptr[i] = e->key;
1953 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1959 *res_keys = cache->keys.ptr;
1961 *res_values = cache->values.ptr;
1963 *res_n_entries = size;
1968 IteratedCache *iterated_cache_free(IteratedCache *cache) {
1970 free(cache->keys.ptr);
1971 free(cache->values.ptr);