1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
34 #ifdef ENABLE_DEBUG_HASHMAP
39 * Implementation of hashmaps.
41 * - uses less RAM compared to closed addressing (chaining), because
42 * our entries are small (especially in Sets, which tend to contain
43 * the majority of entries in systemd).
44 * Collision resolution: Robin Hood
45 * - tends to equalize displacement of entries from their optimal buckets.
46 * Probe sequence: linear
47 * - though theoretically worse than random probing/uniform hashing/double
48 * hashing, it is good for cache locality.
51 * Celis, P. 1986. Robin Hood Hashing.
52 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
53 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
54 * - The results are derived for random probing. Suggests deletion with
55 * tombstones and two mean-centered search methods. None of that works
56 * well for linear probing.
58 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
59 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
60 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
61 * http://www.math.uu.se/~svante/papers/sj157.pdf
62 * - Applies to Robin Hood with linear probing. Contains remarks on
63 * the unsuitability of mean-centered search with linear probing.
65 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
66 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
67 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
68 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
69 * in a successful search), and Janson writes about displacement. C = d + 1.
71 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
72 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
73 * - Explanation of backward shift deletion with pictures.
75 * Khuong, P. 2013. The Other Robin Hood Hashing.
76 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
77 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
81 * XXX Ideas for improvement:
82 * For unordered hashmaps, randomize iteration order, similarly to Perl:
83 * http://blog.booking.com/hardening-perls-hash-function.html
86 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
87 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
88 #define INV_KEEP_FREE 5U
90 /* Fields common to entries of all hashmap/set types */
91 struct hashmap_base_entry {
95 /* Entry types for specific hashmap/set types
96 * hashmap_base_entry must be at the beginning of each entry struct. */
98 struct plain_hashmap_entry {
99 struct hashmap_base_entry b;
103 struct ordered_hashmap_entry {
104 struct plain_hashmap_entry p;
105 unsigned iterate_next, iterate_previous;
109 struct hashmap_base_entry b;
112 /* In several functions it is advantageous to have the hash table extended
113 * virtually by a couple of additional buckets. We reserve special index values
114 * for these "swap" buckets. */
115 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
116 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
117 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
118 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
120 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
121 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
123 assert_cc(IDX_FIRST == _IDX_SWAP_END);
124 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
126 /* Storage space for the "swap" buckets.
127 * All entry types can fit into a ordered_hashmap_entry. */
128 struct swap_entries {
129 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
132 /* Distance from Initial Bucket */
133 typedef uint8_t dib_raw_t;
134 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
135 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
136 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
137 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
139 #define DIB_FREE UINT_MAX
141 #ifdef ENABLE_DEBUG_HASHMAP
142 struct hashmap_debug_info {
143 LIST_FIELDS(struct hashmap_debug_info, debug_list);
144 unsigned max_entries; /* high watermark of n_entries */
146 /* who allocated this hashmap */
151 /* fields to detect modification while iterating */
152 unsigned put_count; /* counts puts into the hashmap */
153 unsigned rem_count; /* counts removals from hashmap */
154 unsigned last_rem_idx; /* remembers last removal index */
157 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
158 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
160 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
162 #else /* !ENABLE_DEBUG_HASHMAP */
163 #define HASHMAP_DEBUG_FIELDS
164 #endif /* ENABLE_DEBUG_HASHMAP */
168 HASHMAP_TYPE_ORDERED,
173 struct _packed_ indirect_storage {
174 char *storage; /* where buckets and DIBs are stored */
175 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
177 unsigned n_entries; /* number of stored entries */
178 unsigned n_buckets; /* number of buckets */
180 unsigned idx_lowest_entry; /* Index below which all buckets are free.
181 Makes "while(hashmap_steal_first())" loops
182 O(n) instead of O(n^2) for unordered hashmaps. */
183 uint8_t _pad[3]; /* padding for the whole HashmapBase */
184 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
187 struct direct_storage {
188 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
189 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
190 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
191 char storage[sizeof(struct indirect_storage)];
194 #define DIRECT_BUCKETS(entry_t) \
195 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
197 /* We should be able to store at least one entry directly. */
198 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
200 /* We have 3 bits for n_direct_entries. */
201 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
203 /* Hashmaps with directly stored entries all use this shared hash key.
204 * It's no big deal if the key is guessed, because there can be only
205 * a handful of directly stored entries in a hashmap. When a hashmap
206 * outgrows direct storage, it gets its own key for indirect storage. */
207 static uint8_t shared_hash_key[HASH_KEY_SIZE];
208 static bool shared_hash_key_initialized;
210 /* Fields that all hashmap/set types must have */
212 const struct hash_ops *hash_ops; /* hash and compare ops to use */
215 struct indirect_storage indirect; /* if has_indirect */
216 struct direct_storage direct; /* if !has_indirect */
219 enum HashmapType type:2; /* HASHMAP_TYPE_* */
220 bool has_indirect:1; /* whether indirect storage is used */
221 unsigned n_direct_entries:3; /* Number of entries in direct storage.
222 * Only valid if !has_indirect. */
223 bool from_pool:1; /* whether was allocated from mempool */
224 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
227 /* Specific hash types
228 * HashmapBase must be at the beginning of each hashmap struct. */
231 struct HashmapBase b;
234 struct OrderedHashmap {
235 struct HashmapBase b;
236 unsigned iterate_list_head, iterate_list_tail;
240 struct HashmapBase b;
243 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
244 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
245 /* No need for a separate Set pool */
246 assert_cc(sizeof(Hashmap) == sizeof(Set));
248 struct hashmap_type_info {
251 struct mempool *mempool;
252 unsigned n_direct_buckets;
255 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
256 [HASHMAP_TYPE_PLAIN] = {
257 .head_size = sizeof(Hashmap),
258 .entry_size = sizeof(struct plain_hashmap_entry),
259 .mempool = &hashmap_pool,
260 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
262 [HASHMAP_TYPE_ORDERED] = {
263 .head_size = sizeof(OrderedHashmap),
264 .entry_size = sizeof(struct ordered_hashmap_entry),
265 .mempool = &ordered_hashmap_pool,
266 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
268 [HASHMAP_TYPE_SET] = {
269 .head_size = sizeof(Set),
270 .entry_size = sizeof(struct set_entry),
271 .mempool = &hashmap_pool,
272 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
276 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
278 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
279 return (unsigned long) u;
282 int string_compare_func(const void *a, const void *b) {
286 const struct hash_ops string_hash_ops = {
287 .hash = string_hash_func,
288 .compare = string_compare_func
291 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
293 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
294 return (unsigned long) u;
297 int trivial_compare_func(const void *a, const void *b) {
298 return a < b ? -1 : (a > b ? 1 : 0);
301 const struct hash_ops trivial_hash_ops = {
302 .hash = trivial_hash_func,
303 .compare = trivial_compare_func
306 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
308 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
309 return (unsigned long) u;
312 int uint64_compare_func(const void *_a, const void *_b) {
314 a = *(const uint64_t*) _a;
315 b = *(const uint64_t*) _b;
316 return a < b ? -1 : (a > b ? 1 : 0);
319 const struct hash_ops uint64_hash_ops = {
320 .hash = uint64_hash_func,
321 .compare = uint64_compare_func
324 #if SIZEOF_DEV_T != 8
325 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
327 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
328 return (unsigned long) u;
331 int devt_compare_func(const void *_a, const void *_b) {
333 a = *(const dev_t*) _a;
334 b = *(const dev_t*) _b;
335 return a < b ? -1 : (a > b ? 1 : 0);
338 const struct hash_ops devt_hash_ops = {
339 .hash = devt_hash_func,
340 .compare = devt_compare_func
344 static unsigned n_buckets(HashmapBase *h) {
345 return h->has_indirect ? h->indirect.n_buckets
346 : hashmap_type_info[h->type].n_direct_buckets;
349 static unsigned n_entries(HashmapBase *h) {
350 return h->has_indirect ? h->indirect.n_entries
351 : h->n_direct_entries;
354 static void n_entries_inc(HashmapBase *h) {
356 h->indirect.n_entries++;
358 h->n_direct_entries++;
361 static void n_entries_dec(HashmapBase *h) {
363 h->indirect.n_entries--;
365 h->n_direct_entries--;
368 static char *storage_ptr(HashmapBase *h) {
369 return h->has_indirect ? h->indirect.storage
373 static uint8_t *hash_key(HashmapBase *h) {
374 return h->has_indirect ? h->indirect.hash_key
378 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
379 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
381 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
383 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
384 static uint8_t current[HASH_KEY_SIZE];
385 static bool current_initialized = false;
387 /* Returns a hash function key to use. In order to keep things
388 * fast we will not generate a new key each time we allocate a
389 * new hash table. Instead, we'll just reuse the most recently
390 * generated one, except if we never generated one or when we
391 * are rehashing an entire hash table because we reached a
394 if (!current_initialized || !reuse_is_ok) {
395 random_bytes(current, sizeof(current));
396 current_initialized = true;
399 memcpy(hash_key, current, sizeof(current));
402 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
403 return (struct hashmap_base_entry*)
404 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
407 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
408 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
411 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
412 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
415 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
416 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
419 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
420 return &swap->e[idx - _IDX_SWAP_BEGIN];
423 /* Returns a pointer to the bucket at index idx.
424 * Understands real indexes and swap indexes, hence "_virtual". */
425 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
427 if (idx < _IDX_SWAP_BEGIN)
428 return bucket_at(h, idx);
430 if (idx < _IDX_SWAP_END)
431 return &bucket_at_swap(swap, idx)->p.b;
433 assert_not_reached("Invalid index");
436 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
438 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
441 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
442 return idx >= from ? idx - from
443 : n_buckets(h) + idx - from;
446 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
447 unsigned initial_bucket;
449 if (raw_dib == DIB_RAW_FREE)
452 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
456 * Having an overflow DIB value is very unlikely. The hash function
457 * would have to be bad. For example, in a table of size 2^24 filled
458 * to load factor 0.9 the maximum observed DIB is only about 60.
459 * In theory (assuming I used Maxima correctly), for an infinite size
460 * hash table with load factor 0.8 the probability of a given entry
461 * having DIB > 40 is 1.9e-8.
462 * This returns the correct DIB value by recomputing the hash value in
463 * the unlikely case. XXX Hitting this case could be a hint to rehash.
465 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
466 return bucket_distance(h, idx, initial_bucket);
469 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
470 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
473 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
476 dibs = dib_raw_ptr(h);
478 for ( ; idx < n_buckets(h); idx++)
479 if (dibs[idx] != DIB_RAW_FREE)
485 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
486 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
487 bucket_set_dib(h, idx, DIB_FREE);
490 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
491 unsigned from, unsigned to) {
492 struct hashmap_base_entry *e_from, *e_to;
496 e_from = bucket_at_virtual(h, swap, from);
497 e_to = bucket_at_virtual(h, swap, to);
499 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
501 if (h->type == HASHMAP_TYPE_ORDERED) {
502 OrderedHashmap *lh = (OrderedHashmap*) h;
503 struct ordered_hashmap_entry *le, *le_to;
505 le_to = (struct ordered_hashmap_entry*) e_to;
507 if (le_to->iterate_next != IDX_NIL) {
508 le = (struct ordered_hashmap_entry*)
509 bucket_at_virtual(h, swap, le_to->iterate_next);
510 le->iterate_previous = to;
513 if (le_to->iterate_previous != IDX_NIL) {
514 le = (struct ordered_hashmap_entry*)
515 bucket_at_virtual(h, swap, le_to->iterate_previous);
516 le->iterate_next = to;
519 if (lh->iterate_list_head == from)
520 lh->iterate_list_head = to;
521 if (lh->iterate_list_tail == from)
522 lh->iterate_list_tail = to;
526 static unsigned next_idx(HashmapBase *h, unsigned idx) {
527 return (idx + 1U) % n_buckets(h);
530 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
531 return (n_buckets(h) + idx - 1U) % n_buckets(h);
534 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
537 case HASHMAP_TYPE_PLAIN:
538 case HASHMAP_TYPE_ORDERED:
539 return ((struct plain_hashmap_entry*)e)->value;
541 case HASHMAP_TYPE_SET:
542 return (void*) e->key;
545 assert_not_reached("Unknown hashmap type");
549 static void base_remove_entry(HashmapBase *h, unsigned idx) {
550 unsigned left, right, prev, dib;
551 dib_raw_t raw_dib, *dibs;
553 dibs = dib_raw_ptr(h);
554 assert(dibs[idx] != DIB_RAW_FREE);
556 #ifdef ENABLE_DEBUG_HASHMAP
557 h->debug.rem_count++;
558 h->debug.last_rem_idx = idx;
562 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
563 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
564 raw_dib = dibs[right];
565 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
568 /* The buckets are not supposed to be all occupied and with DIB > 0.
569 * That would mean we could make everyone better off by shifting them
570 * backward. This scenario is impossible. */
571 assert(left != right);
574 if (h->type == HASHMAP_TYPE_ORDERED) {
575 OrderedHashmap *lh = (OrderedHashmap*) h;
576 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
578 if (le->iterate_next != IDX_NIL)
579 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
581 lh->iterate_list_tail = le->iterate_previous;
583 if (le->iterate_previous != IDX_NIL)
584 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
586 lh->iterate_list_head = le->iterate_next;
589 /* Now shift all buckets in the interval (left, right) one step backwards */
590 for (prev = left, left = next_idx(h, left); left != right;
591 prev = left, left = next_idx(h, left)) {
592 dib = bucket_calculate_dib(h, left, dibs[left]);
594 bucket_move_entry(h, NULL, left, prev);
595 bucket_set_dib(h, prev, dib - 1);
598 bucket_mark_free(h, prev);
601 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
603 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
604 struct ordered_hashmap_entry *e;
610 if (i->idx == IDX_NIL)
613 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
616 if (i->idx == IDX_FIRST) {
617 idx = h->iterate_list_head;
618 e = ordered_bucket_at(h, idx);
621 e = ordered_bucket_at(h, idx);
623 * We allow removing the current entry while iterating, but removal may cause
624 * a backward shift. The next entry may thus move one bucket to the left.
625 * To detect when it happens, we remember the key pointer of the entry we were
626 * going to iterate next. If it does not match, there was a backward shift.
628 if (e->p.b.key != i->next_key) {
629 idx = prev_idx(HASHMAP_BASE(h), idx);
630 e = ordered_bucket_at(h, idx);
632 assert(e->p.b.key == i->next_key);
635 #ifdef ENABLE_DEBUG_HASHMAP
639 if (e->iterate_next != IDX_NIL) {
640 struct ordered_hashmap_entry *n;
641 i->idx = e->iterate_next;
642 n = ordered_bucket_at(h, i->idx);
643 i->next_key = n->p.b.key;
654 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
660 if (i->idx == IDX_NIL)
663 if (i->idx == IDX_FIRST) {
664 /* fast forward to the first occupied bucket */
665 if (h->has_indirect) {
666 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
667 h->indirect.idx_lowest_entry = i->idx;
669 i->idx = skip_free_buckets(h, 0);
671 if (i->idx == IDX_NIL)
674 struct hashmap_base_entry *e;
678 e = bucket_at(h, i->idx);
680 * We allow removing the current entry while iterating, but removal may cause
681 * a backward shift. The next entry may thus move one bucket to the left.
682 * To detect when it happens, we remember the key pointer of the entry we were
683 * going to iterate next. If it does not match, there was a backward shift.
685 if (e->key != i->next_key)
686 e = bucket_at(h, --i->idx);
688 assert(e->key == i->next_key);
692 #ifdef ENABLE_DEBUG_HASHMAP
696 i->idx = skip_free_buckets(h, i->idx + 1);
697 if (i->idx != IDX_NIL)
698 i->next_key = bucket_at(h, i->idx)->key;
709 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
715 #ifdef ENABLE_DEBUG_HASHMAP
716 if (i->idx == IDX_FIRST) {
717 i->put_count = h->debug.put_count;
718 i->rem_count = h->debug.rem_count;
720 /* While iterating, must not add any new entries */
721 assert(i->put_count == h->debug.put_count);
722 /* ... or remove entries other than the current one */
723 assert(i->rem_count == h->debug.rem_count ||
724 (i->rem_count == h->debug.rem_count - 1 &&
725 i->prev_idx == h->debug.last_rem_idx));
726 /* Reset our removals counter */
727 i->rem_count = h->debug.rem_count;
731 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
732 : hashmap_iterate_in_internal_order(h, i);
735 void *internal_hashmap_iterate(HashmapBase *h, Iterator *i, const void **key) {
736 struct hashmap_base_entry *e;
740 idx = hashmap_iterate_entry(h, i);
741 if (idx == IDX_NIL) {
748 e = bucket_at(h, idx);
749 data = entry_value(h, e);
756 void *set_iterate(Set *s, Iterator *i) {
757 return internal_hashmap_iterate(HASHMAP_BASE(s), i, NULL);
760 #define HASHMAP_FOREACH_IDX(idx, h, i) \
761 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
763 (idx) = hashmap_iterate_entry((h), &(i)))
765 static void reset_direct_storage(HashmapBase *h) {
766 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
769 assert(!h->has_indirect);
771 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
772 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
775 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
777 const struct hashmap_type_info *hi = &hashmap_type_info[type];
780 use_pool = is_main_thread();
782 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
788 h->from_pool = use_pool;
789 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
791 if (type == HASHMAP_TYPE_ORDERED) {
792 OrderedHashmap *lh = (OrderedHashmap*)h;
793 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
796 reset_direct_storage(h);
798 if (!shared_hash_key_initialized) {
799 random_bytes(shared_hash_key, sizeof(shared_hash_key));
800 shared_hash_key_initialized= true;
803 #ifdef ENABLE_DEBUG_HASHMAP
804 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
805 h->debug.func = func;
806 h->debug.file = file;
807 h->debug.line = line;
813 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
814 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
817 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
818 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
821 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
822 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
825 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
826 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
834 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
842 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
843 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
846 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
847 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
850 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
851 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
854 static void hashmap_free_no_clear(HashmapBase *h) {
855 assert(!h->has_indirect);
856 assert(!h->n_direct_entries);
858 #ifdef ENABLE_DEBUG_HASHMAP
859 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
863 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
868 HashmapBase *internal_hashmap_free(HashmapBase *h) {
870 /* Free the hashmap, but nothing in it */
873 internal_hashmap_clear(h);
874 hashmap_free_no_clear(h);
880 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
882 /* Free the hashmap and all data objects in it, but not the
886 internal_hashmap_clear_free(h);
887 hashmap_free_no_clear(h);
893 Hashmap *hashmap_free_free_free(Hashmap *h) {
895 /* Free the hashmap and all data and key objects in it */
898 hashmap_clear_free_free(h);
899 hashmap_free_no_clear(HASHMAP_BASE(h));
905 void internal_hashmap_clear(HashmapBase *h) {
909 if (h->has_indirect) {
910 free(h->indirect.storage);
911 h->has_indirect = false;
914 h->n_direct_entries = 0;
915 reset_direct_storage(h);
917 if (h->type == HASHMAP_TYPE_ORDERED) {
918 OrderedHashmap *lh = (OrderedHashmap*) h;
919 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
923 void internal_hashmap_clear_free(HashmapBase *h) {
929 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
930 idx = skip_free_buckets(h, idx + 1))
931 free(entry_value(h, bucket_at(h, idx)));
933 internal_hashmap_clear(h);
936 void hashmap_clear_free_free(Hashmap *h) {
942 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
943 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
944 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
945 free((void*)e->b.key);
949 internal_hashmap_clear(HASHMAP_BASE(h));
952 static int resize_buckets(HashmapBase *h, unsigned entries_add);
955 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
956 * Performs Robin Hood swaps as it goes. The entry to put must be placed
957 * by the caller into swap slot IDX_PUT.
958 * If used for in-place resizing, may leave a displaced entry in swap slot
959 * IDX_PUT. Caller must rehash it next.
960 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
963 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
964 struct swap_entries *swap) {
965 dib_raw_t raw_dib, *dibs;
966 unsigned dib, distance;
968 #ifdef ENABLE_DEBUG_HASHMAP
969 h->debug.put_count++;
972 dibs = dib_raw_ptr(h);
974 for (distance = 0; ; distance++) {
976 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
977 if (raw_dib == DIB_RAW_REHASH)
978 bucket_move_entry(h, swap, idx, IDX_TMP);
980 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
981 h->indirect.idx_lowest_entry = idx;
983 bucket_set_dib(h, idx, distance);
984 bucket_move_entry(h, swap, IDX_PUT, idx);
985 if (raw_dib == DIB_RAW_REHASH) {
986 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
993 dib = bucket_calculate_dib(h, idx, raw_dib);
995 if (dib < distance) {
996 /* Found a wealthier entry. Go Robin Hood! */
998 bucket_set_dib(h, idx, distance);
1000 /* swap the entries */
1001 bucket_move_entry(h, swap, idx, IDX_TMP);
1002 bucket_move_entry(h, swap, IDX_PUT, idx);
1003 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1008 idx = next_idx(h, idx);
1013 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1014 * The caller must place the entry (only its key and value, not link indexes)
1015 * in swap slot IDX_PUT.
1016 * Caller must ensure: the key does not exist yet in the hashmap.
1017 * that resize is not needed if !may_resize.
1018 * Returns: 1 if entry was put successfully.
1019 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1020 * Cannot return -ENOMEM if !may_resize.
1022 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1023 struct swap_entries *swap, bool may_resize) {
1024 struct ordered_hashmap_entry *new_entry;
1027 assert(idx < n_buckets(h));
1029 new_entry = bucket_at_swap(swap, IDX_PUT);
1032 r = resize_buckets(h, 1);
1036 idx = bucket_hash(h, new_entry->p.b.key);
1038 assert(n_entries(h) < n_buckets(h));
1040 if (h->type == HASHMAP_TYPE_ORDERED) {
1041 OrderedHashmap *lh = (OrderedHashmap*) h;
1043 new_entry->iterate_next = IDX_NIL;
1044 new_entry->iterate_previous = lh->iterate_list_tail;
1046 if (lh->iterate_list_tail != IDX_NIL) {
1047 struct ordered_hashmap_entry *old_tail;
1049 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1050 assert(old_tail->iterate_next == IDX_NIL);
1051 old_tail->iterate_next = IDX_PUT;
1054 lh->iterate_list_tail = IDX_PUT;
1055 if (lh->iterate_list_head == IDX_NIL)
1056 lh->iterate_list_head = IDX_PUT;
1059 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1062 #ifdef ENABLE_DEBUG_HASHMAP
1063 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1068 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1069 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1072 * Returns 0 if resize is not needed.
1073 * 1 if successfully resized.
1074 * -ENOMEM on allocation failure.
1076 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1077 struct swap_entries swap;
1079 dib_raw_t *old_dibs, *new_dibs;
1080 const struct hashmap_type_info *hi;
1081 unsigned idx, optimal_idx;
1082 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1088 hi = &hashmap_type_info[h->type];
1089 new_n_entries = n_entries(h) + entries_add;
1092 if (_unlikely_(new_n_entries < entries_add))
1095 /* For direct storage we allow 100% load, because it's tiny. */
1096 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1100 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1101 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1103 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1105 if (_unlikely_(new_n_buckets < new_n_entries))
1108 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1111 old_n_buckets = n_buckets(h);
1113 if (_likely_(new_n_buckets <= old_n_buckets))
1116 new_shift = log2u_round_up(MAX(
1117 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1118 2 * sizeof(struct direct_storage)));
1120 /* Realloc storage (buckets and DIB array). */
1121 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1126 /* Must upgrade direct to indirect storage. */
1127 if (!h->has_indirect) {
1128 memcpy(new_storage, h->direct.storage,
1129 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1130 h->indirect.n_entries = h->n_direct_entries;
1131 h->indirect.idx_lowest_entry = 0;
1132 h->n_direct_entries = 0;
1135 /* Get a new hash key. If we've just upgraded to indirect storage,
1136 * allow reusing a previously generated key. It's still a different key
1137 * from the shared one that we used for direct storage. */
1138 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1140 h->has_indirect = true;
1141 h->indirect.storage = new_storage;
1142 h->indirect.n_buckets = (1U << new_shift) /
1143 (hi->entry_size + sizeof(dib_raw_t));
1145 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1146 new_dibs = dib_raw_ptr(h);
1149 * Move the DIB array to the new place, replacing valid DIB values with
1150 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1151 * Note: Overlap is not possible, because we have at least doubled the
1152 * number of buckets and dib_raw_t is smaller than any entry type.
1154 for (idx = 0; idx < old_n_buckets; idx++) {
1155 assert(old_dibs[idx] != DIB_RAW_REHASH);
1156 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1160 /* Zero the area of newly added entries (including the old DIB area) */
1161 memzero(bucket_at(h, old_n_buckets),
1162 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1164 /* The upper half of the new DIB array needs initialization */
1165 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1166 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1168 /* Rehash entries that need it */
1170 for (idx = 0; idx < old_n_buckets; idx++) {
1171 if (new_dibs[idx] != DIB_RAW_REHASH)
1174 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1177 * Not much to do if by luck the entry hashes to its current
1178 * location. Just set its DIB.
1180 if (optimal_idx == idx) {
1186 new_dibs[idx] = DIB_RAW_FREE;
1187 bucket_move_entry(h, &swap, idx, IDX_PUT);
1188 /* bucket_move_entry does not clear the source */
1189 memzero(bucket_at(h, idx), hi->entry_size);
1193 * Find the new bucket for the current entry. This may make
1194 * another entry homeless and load it into IDX_PUT.
1196 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1199 /* Did the current entry displace another one? */
1201 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1202 } while (rehash_next);
1205 assert(n_rehashed == n_entries(h));
1211 * Finds an entry with a matching key
1212 * Returns: index of the found entry, or IDX_NIL if not found.
1214 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1215 struct hashmap_base_entry *e;
1216 unsigned dib, distance;
1217 dib_raw_t *dibs = dib_raw_ptr(h);
1219 assert(idx < n_buckets(h));
1221 for (distance = 0; ; distance++) {
1222 if (dibs[idx] == DIB_RAW_FREE)
1225 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1229 if (dib == distance) {
1230 e = bucket_at(h, idx);
1231 if (h->hash_ops->compare(e->key, key) == 0)
1235 idx = next_idx(h, idx);
1238 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1240 int hashmap_put(Hashmap *h, const void *key, void *value) {
1241 struct swap_entries swap;
1242 struct plain_hashmap_entry *e;
1247 hash = bucket_hash(h, key);
1248 idx = bucket_scan(h, hash, key);
1249 if (idx != IDX_NIL) {
1250 e = plain_bucket_at(h, idx);
1251 if (e->value == value)
1256 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1259 return hashmap_put_boldly(h, hash, &swap, true);
1262 int set_put(Set *s, const void *key) {
1263 struct swap_entries swap;
1264 struct hashmap_base_entry *e;
1269 hash = bucket_hash(s, key);
1270 idx = bucket_scan(s, hash, key);
1274 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1276 return hashmap_put_boldly(s, hash, &swap, true);
1279 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1280 struct swap_entries swap;
1281 struct plain_hashmap_entry *e;
1286 hash = bucket_hash(h, key);
1287 idx = bucket_scan(h, hash, key);
1288 if (idx != IDX_NIL) {
1289 e = plain_bucket_at(h, idx);
1290 #ifdef ENABLE_DEBUG_HASHMAP
1291 /* Although the key is equal, the key pointer may have changed,
1292 * and this would break our assumption for iterating. So count
1293 * this operation as incompatible with iteration. */
1294 if (e->b.key != key) {
1295 h->b.debug.put_count++;
1296 h->b.debug.rem_count++;
1297 h->b.debug.last_rem_idx = idx;
1305 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1308 return hashmap_put_boldly(h, hash, &swap, true);
1311 int hashmap_update(Hashmap *h, const void *key, void *value) {
1312 struct plain_hashmap_entry *e;
1317 hash = bucket_hash(h, key);
1318 idx = bucket_scan(h, hash, key);
1322 e = plain_bucket_at(h, idx);
1327 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1328 struct hashmap_base_entry *e;
1334 hash = bucket_hash(h, key);
1335 idx = bucket_scan(h, hash, key);
1339 e = bucket_at(h, idx);
1340 return entry_value(h, e);
1343 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1344 struct plain_hashmap_entry *e;
1350 hash = bucket_hash(h, key);
1351 idx = bucket_scan(h, hash, key);
1355 e = plain_bucket_at(h, idx);
1357 *key2 = (void*) e->b.key;
1362 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1368 hash = bucket_hash(h, key);
1369 return bucket_scan(h, hash, key) != IDX_NIL;
1372 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1373 struct hashmap_base_entry *e;
1380 hash = bucket_hash(h, key);
1381 idx = bucket_scan(h, hash, key);
1385 e = bucket_at(h, idx);
1386 data = entry_value(h, e);
1387 remove_entry(h, idx);
1392 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1393 struct plain_hashmap_entry *e;
1403 hash = bucket_hash(h, key);
1404 idx = bucket_scan(h, hash, key);
1405 if (idx == IDX_NIL) {
1411 e = plain_bucket_at(h, idx);
1414 *rkey = (void*) e->b.key;
1416 remove_entry(h, idx);
1421 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1422 struct swap_entries swap;
1423 struct plain_hashmap_entry *e;
1424 unsigned old_hash, new_hash, idx;
1429 old_hash = bucket_hash(h, old_key);
1430 idx = bucket_scan(h, old_hash, old_key);
1434 new_hash = bucket_hash(h, new_key);
1435 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1438 remove_entry(h, idx);
1440 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1443 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1448 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1449 struct swap_entries swap;
1450 struct hashmap_base_entry *e;
1451 unsigned old_hash, new_hash, idx;
1456 old_hash = bucket_hash(s, old_key);
1457 idx = bucket_scan(s, old_hash, old_key);
1461 new_hash = bucket_hash(s, new_key);
1462 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1465 remove_entry(s, idx);
1467 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1469 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1474 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1475 struct swap_entries swap;
1476 struct plain_hashmap_entry *e;
1477 unsigned old_hash, new_hash, idx_old, idx_new;
1482 old_hash = bucket_hash(h, old_key);
1483 idx_old = bucket_scan(h, old_hash, old_key);
1484 if (idx_old == IDX_NIL)
1487 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1489 new_hash = bucket_hash(h, new_key);
1490 idx_new = bucket_scan(h, new_hash, new_key);
1491 if (idx_new != IDX_NIL)
1492 if (idx_old != idx_new) {
1493 remove_entry(h, idx_new);
1494 /* Compensate for a possible backward shift. */
1495 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1496 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1497 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1500 remove_entry(h, idx_old);
1502 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1505 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1510 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1511 struct plain_hashmap_entry *e;
1517 hash = bucket_hash(h, key);
1518 idx = bucket_scan(h, hash, key);
1522 e = plain_bucket_at(h, idx);
1523 if (e->value != value)
1526 remove_entry(h, idx);
1531 static unsigned find_first_entry(HashmapBase *h) {
1532 Iterator i = ITERATOR_FIRST;
1534 if (!h || !n_entries(h))
1537 return hashmap_iterate_entry(h, &i);
1540 void *internal_hashmap_first(HashmapBase *h) {
1543 idx = find_first_entry(h);
1547 return entry_value(h, bucket_at(h, idx));
1550 void *internal_hashmap_first_key(HashmapBase *h) {
1551 struct hashmap_base_entry *e;
1554 idx = find_first_entry(h);
1558 e = bucket_at(h, idx);
1559 return (void*) e->key;
1562 void *internal_hashmap_steal_first(HashmapBase *h) {
1563 struct hashmap_base_entry *e;
1567 idx = find_first_entry(h);
1571 e = bucket_at(h, idx);
1572 data = entry_value(h, e);
1573 remove_entry(h, idx);
1578 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1579 struct hashmap_base_entry *e;
1583 idx = find_first_entry(h);
1587 e = bucket_at(h, idx);
1588 key = (void*) e->key;
1589 remove_entry(h, idx);
1594 unsigned internal_hashmap_size(HashmapBase *h) {
1599 return n_entries(h);
1602 unsigned internal_hashmap_buckets(HashmapBase *h) {
1607 return n_buckets(h);
1610 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1616 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1617 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1620 r = hashmap_put(h, pe->b.key, pe->value);
1621 if (r < 0 && r != -EEXIST)
1628 int set_merge(Set *s, Set *other) {
1634 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1635 struct set_entry *se = set_bucket_at(other, idx);
1638 r = set_put(s, se->b.key);
1646 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1651 r = resize_buckets(h, entries_add);
1659 * The same as hashmap_merge(), but every new item from other is moved to h.
1660 * Keys already in h are skipped and stay in other.
1661 * Returns: 0 on success.
1662 * -ENOMEM on alloc failure, in which case no move has been done.
1664 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1665 struct swap_entries swap;
1666 struct hashmap_base_entry *e, *n;
1676 assert(other->type == h->type);
1679 * This reserves buckets for the worst case, where none of other's
1680 * entries are yet present in h. This is preferable to risking
1681 * an allocation failure in the middle of the moving and having to
1682 * rollback or return a partial result.
1684 r = resize_buckets(h, n_entries(other));
1688 HASHMAP_FOREACH_IDX(idx, other, i) {
1691 e = bucket_at(other, idx);
1692 h_hash = bucket_hash(h, e->key);
1693 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1696 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1698 if (h->type != HASHMAP_TYPE_SET)
1699 ((struct plain_hashmap_entry*) n)->value =
1700 ((struct plain_hashmap_entry*) e)->value;
1701 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1703 remove_entry(other, idx);
1709 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1710 struct swap_entries swap;
1711 unsigned h_hash, other_hash, idx;
1712 struct hashmap_base_entry *e, *n;
1717 h_hash = bucket_hash(h, key);
1718 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1724 assert(other->type == h->type);
1726 other_hash = bucket_hash(other, key);
1727 idx = bucket_scan(other, other_hash, key);
1731 e = bucket_at(other, idx);
1733 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1735 if (h->type != HASHMAP_TYPE_SET)
1736 ((struct plain_hashmap_entry*) n)->value =
1737 ((struct plain_hashmap_entry*) e)->value;
1738 r = hashmap_put_boldly(h, h_hash, &swap, true);
1742 remove_entry(other, idx);
1746 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1752 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1757 case HASHMAP_TYPE_PLAIN:
1758 case HASHMAP_TYPE_ORDERED:
1759 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1761 case HASHMAP_TYPE_SET:
1762 r = set_merge((Set*)copy, (Set*)h);
1765 assert_not_reached("Unknown hashmap type");
1769 internal_hashmap_free(copy);
1776 char **internal_hashmap_get_strv(HashmapBase *h) {
1781 sv = new(char*, n_entries(h)+1);
1786 HASHMAP_FOREACH_IDX(idx, h, i)
1787 sv[n++] = entry_value(h, bucket_at(h, idx));
1793 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1794 struct ordered_hashmap_entry *e;
1802 hash = bucket_hash(h, key);
1803 idx = bucket_scan(h, hash, key);
1807 e = ordered_bucket_at(h, idx);
1808 if (e->iterate_next == IDX_NIL)
1810 return ordered_bucket_at(h, e->iterate_next)->p.value;
1813 int set_consume(Set *s, void *value) {
1816 r = set_put(s, value);
1823 int set_put_strdup(Set *s, const char *p) {
1834 r = set_consume(s, c);
1841 int set_put_strdupv(Set *s, char **l) {
1845 STRV_FOREACH(i, l) {
1846 r = set_put_strdup(s, *i);