1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
30 #include "siphash24.h"
33 #include "random-util.h"
35 #ifdef ENABLE_DEBUG_HASHMAP
40 * Implementation of hashmaps.
42 * - uses less RAM compared to closed addressing (chaining), because
43 * our entries are small (especially in Sets, which tend to contain
44 * the majority of entries in systemd).
45 * Collision resolution: Robin Hood
46 * - tends to equalize displacement of entries from their optimal buckets.
47 * Probe sequence: linear
48 * - though theoretically worse than random probing/uniform hashing/double
49 * hashing, it is good for cache locality.
52 * Celis, P. 1986. Robin Hood Hashing.
53 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
54 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
55 * - The results are derived for random probing. Suggests deletion with
56 * tombstones and two mean-centered search methods. None of that works
57 * well for linear probing.
59 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
60 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
61 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
62 * http://www.math.uu.se/~svante/papers/sj157.pdf
63 * - Applies to Robin Hood with linear probing. Contains remarks on
64 * the unsuitability of mean-centered search with linear probing.
66 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
67 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
68 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
69 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
70 * in a successful search), and Janson writes about displacement. C = d + 1.
72 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
73 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
74 * - Explanation of backward shift deletion with pictures.
76 * Khuong, P. 2013. The Other Robin Hood Hashing.
77 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
78 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
82 * XXX Ideas for improvement:
83 * For unordered hashmaps, randomize iteration order, similarly to Perl:
84 * http://blog.booking.com/hardening-perls-hash-function.html
87 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
88 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
89 #define INV_KEEP_FREE 5U
91 /* Fields common to entries of all hashmap/set types */
92 struct hashmap_base_entry {
96 /* Entry types for specific hashmap/set types
97 * hashmap_base_entry must be at the beginning of each entry struct. */
99 struct plain_hashmap_entry {
100 struct hashmap_base_entry b;
104 struct ordered_hashmap_entry {
105 struct plain_hashmap_entry p;
106 unsigned iterate_next, iterate_previous;
110 struct hashmap_base_entry b;
113 /* In several functions it is advantageous to have the hash table extended
114 * virtually by a couple of additional buckets. We reserve special index values
115 * for these "swap" buckets. */
116 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
117 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
118 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
119 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
121 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
122 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
124 assert_cc(IDX_FIRST == _IDX_SWAP_END);
125 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
127 /* Storage space for the "swap" buckets.
128 * All entry types can fit into a ordered_hashmap_entry. */
129 struct swap_entries {
130 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
133 /* Distance from Initial Bucket */
134 typedef uint8_t dib_raw_t;
135 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
136 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
137 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
138 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
140 #define DIB_FREE UINT_MAX
142 #ifdef ENABLE_DEBUG_HASHMAP
143 struct hashmap_debug_info {
144 LIST_FIELDS(struct hashmap_debug_info, debug_list);
145 unsigned max_entries; /* high watermark of n_entries */
147 /* who allocated this hashmap */
152 /* fields to detect modification while iterating */
153 unsigned put_count; /* counts puts into the hashmap */
154 unsigned rem_count; /* counts removals from hashmap */
155 unsigned last_rem_idx; /* remembers last removal index */
158 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
159 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
161 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
163 #else /* !ENABLE_DEBUG_HASHMAP */
164 #define HASHMAP_DEBUG_FIELDS
165 #endif /* ENABLE_DEBUG_HASHMAP */
169 HASHMAP_TYPE_ORDERED,
174 struct _packed_ indirect_storage {
175 char *storage; /* where buckets and DIBs are stored */
176 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
178 unsigned n_entries; /* number of stored entries */
179 unsigned n_buckets; /* number of buckets */
181 unsigned idx_lowest_entry; /* Index below which all buckets are free.
182 Makes "while(hashmap_steal_first())" loops
183 O(n) instead of O(n^2) for unordered hashmaps. */
184 uint8_t _pad[3]; /* padding for the whole HashmapBase */
185 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
188 struct direct_storage {
189 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
190 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
191 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
192 char storage[sizeof(struct indirect_storage)];
195 #define DIRECT_BUCKETS(entry_t) \
196 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
198 /* We should be able to store at least one entry directly. */
199 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
201 /* We have 3 bits for n_direct_entries. */
202 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
204 /* Hashmaps with directly stored entries all use this shared hash key.
205 * It's no big deal if the key is guessed, because there can be only
206 * a handful of directly stored entries in a hashmap. When a hashmap
207 * outgrows direct storage, it gets its own key for indirect storage. */
208 static uint8_t shared_hash_key[HASH_KEY_SIZE];
209 static bool shared_hash_key_initialized;
211 /* Fields that all hashmap/set types must have */
213 const struct hash_ops *hash_ops; /* hash and compare ops to use */
216 struct indirect_storage indirect; /* if has_indirect */
217 struct direct_storage direct; /* if !has_indirect */
220 enum HashmapType type:2; /* HASHMAP_TYPE_* */
221 bool has_indirect:1; /* whether indirect storage is used */
222 unsigned n_direct_entries:3; /* Number of entries in direct storage.
223 * Only valid if !has_indirect. */
224 bool from_pool:1; /* whether was allocated from mempool */
225 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
228 /* Specific hash types
229 * HashmapBase must be at the beginning of each hashmap struct. */
232 struct HashmapBase b;
235 struct OrderedHashmap {
236 struct HashmapBase b;
237 unsigned iterate_list_head, iterate_list_tail;
241 struct HashmapBase b;
244 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
245 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
246 /* No need for a separate Set pool */
247 assert_cc(sizeof(Hashmap) == sizeof(Set));
249 struct hashmap_type_info {
252 struct mempool *mempool;
253 unsigned n_direct_buckets;
256 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
257 [HASHMAP_TYPE_PLAIN] = {
258 .head_size = sizeof(Hashmap),
259 .entry_size = sizeof(struct plain_hashmap_entry),
260 .mempool = &hashmap_pool,
261 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
263 [HASHMAP_TYPE_ORDERED] = {
264 .head_size = sizeof(OrderedHashmap),
265 .entry_size = sizeof(struct ordered_hashmap_entry),
266 .mempool = &ordered_hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
269 [HASHMAP_TYPE_SET] = {
270 .head_size = sizeof(Set),
271 .entry_size = sizeof(struct set_entry),
272 .mempool = &hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
277 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
279 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
280 return (unsigned long) u;
283 int string_compare_func(const void *a, const void *b) {
287 const struct hash_ops string_hash_ops = {
288 .hash = string_hash_func,
289 .compare = string_compare_func
292 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
294 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
295 return (unsigned long) u;
298 int trivial_compare_func(const void *a, const void *b) {
299 return a < b ? -1 : (a > b ? 1 : 0);
302 const struct hash_ops trivial_hash_ops = {
303 .hash = trivial_hash_func,
304 .compare = trivial_compare_func
307 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
309 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
310 return (unsigned long) u;
313 int uint64_compare_func(const void *_a, const void *_b) {
315 a = *(const uint64_t*) _a;
316 b = *(const uint64_t*) _b;
317 return a < b ? -1 : (a > b ? 1 : 0);
320 const struct hash_ops uint64_hash_ops = {
321 .hash = uint64_hash_func,
322 .compare = uint64_compare_func
325 #if SIZEOF_DEV_T != 8
326 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
328 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
329 return (unsigned long) u;
332 int devt_compare_func(const void *_a, const void *_b) {
334 a = *(const dev_t*) _a;
335 b = *(const dev_t*) _b;
336 return a < b ? -1 : (a > b ? 1 : 0);
339 const struct hash_ops devt_hash_ops = {
340 .hash = devt_hash_func,
341 .compare = devt_compare_func
345 static unsigned n_buckets(HashmapBase *h) {
346 return h->has_indirect ? h->indirect.n_buckets
347 : hashmap_type_info[h->type].n_direct_buckets;
350 static unsigned n_entries(HashmapBase *h) {
351 return h->has_indirect ? h->indirect.n_entries
352 : h->n_direct_entries;
355 static void n_entries_inc(HashmapBase *h) {
357 h->indirect.n_entries++;
359 h->n_direct_entries++;
362 static void n_entries_dec(HashmapBase *h) {
364 h->indirect.n_entries--;
366 h->n_direct_entries--;
369 static char *storage_ptr(HashmapBase *h) {
370 return h->has_indirect ? h->indirect.storage
374 static uint8_t *hash_key(HashmapBase *h) {
375 return h->has_indirect ? h->indirect.hash_key
379 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
380 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
382 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
384 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
385 static uint8_t current[HASH_KEY_SIZE];
386 static bool current_initialized = false;
388 /* Returns a hash function key to use. In order to keep things
389 * fast we will not generate a new key each time we allocate a
390 * new hash table. Instead, we'll just reuse the most recently
391 * generated one, except if we never generated one or when we
392 * are rehashing an entire hash table because we reached a
395 if (!current_initialized || !reuse_is_ok) {
396 random_bytes(current, sizeof(current));
397 current_initialized = true;
400 memcpy(hash_key, current, sizeof(current));
403 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
404 return (struct hashmap_base_entry*)
405 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
408 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
409 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
412 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
413 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
416 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
417 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
420 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
421 return &swap->e[idx - _IDX_SWAP_BEGIN];
424 /* Returns a pointer to the bucket at index idx.
425 * Understands real indexes and swap indexes, hence "_virtual". */
426 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
428 if (idx < _IDX_SWAP_BEGIN)
429 return bucket_at(h, idx);
431 if (idx < _IDX_SWAP_END)
432 return &bucket_at_swap(swap, idx)->p.b;
434 assert_not_reached("Invalid index");
437 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
439 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
442 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
443 return idx >= from ? idx - from
444 : n_buckets(h) + idx - from;
447 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
448 unsigned initial_bucket;
450 if (raw_dib == DIB_RAW_FREE)
453 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
457 * Having an overflow DIB value is very unlikely. The hash function
458 * would have to be bad. For example, in a table of size 2^24 filled
459 * to load factor 0.9 the maximum observed DIB is only about 60.
460 * In theory (assuming I used Maxima correctly), for an infinite size
461 * hash table with load factor 0.8 the probability of a given entry
462 * having DIB > 40 is 1.9e-8.
463 * This returns the correct DIB value by recomputing the hash value in
464 * the unlikely case. XXX Hitting this case could be a hint to rehash.
466 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
467 return bucket_distance(h, idx, initial_bucket);
470 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
471 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
474 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
477 dibs = dib_raw_ptr(h);
479 for ( ; idx < n_buckets(h); idx++)
480 if (dibs[idx] != DIB_RAW_FREE)
486 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
487 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
488 bucket_set_dib(h, idx, DIB_FREE);
491 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
492 unsigned from, unsigned to) {
493 struct hashmap_base_entry *e_from, *e_to;
497 e_from = bucket_at_virtual(h, swap, from);
498 e_to = bucket_at_virtual(h, swap, to);
500 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
502 if (h->type == HASHMAP_TYPE_ORDERED) {
503 OrderedHashmap *lh = (OrderedHashmap*) h;
504 struct ordered_hashmap_entry *le, *le_to;
506 le_to = (struct ordered_hashmap_entry*) e_to;
508 if (le_to->iterate_next != IDX_NIL) {
509 le = (struct ordered_hashmap_entry*)
510 bucket_at_virtual(h, swap, le_to->iterate_next);
511 le->iterate_previous = to;
514 if (le_to->iterate_previous != IDX_NIL) {
515 le = (struct ordered_hashmap_entry*)
516 bucket_at_virtual(h, swap, le_to->iterate_previous);
517 le->iterate_next = to;
520 if (lh->iterate_list_head == from)
521 lh->iterate_list_head = to;
522 if (lh->iterate_list_tail == from)
523 lh->iterate_list_tail = to;
527 static unsigned next_idx(HashmapBase *h, unsigned idx) {
528 return (idx + 1U) % n_buckets(h);
531 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
532 return (n_buckets(h) + idx - 1U) % n_buckets(h);
535 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
538 case HASHMAP_TYPE_PLAIN:
539 case HASHMAP_TYPE_ORDERED:
540 return ((struct plain_hashmap_entry*)e)->value;
542 case HASHMAP_TYPE_SET:
543 return (void*) e->key;
546 assert_not_reached("Unknown hashmap type");
550 static void base_remove_entry(HashmapBase *h, unsigned idx) {
551 unsigned left, right, prev, dib;
552 dib_raw_t raw_dib, *dibs;
554 dibs = dib_raw_ptr(h);
555 assert(dibs[idx] != DIB_RAW_FREE);
557 #ifdef ENABLE_DEBUG_HASHMAP
558 h->debug.rem_count++;
559 h->debug.last_rem_idx = idx;
563 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
564 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
565 raw_dib = dibs[right];
566 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
569 /* The buckets are not supposed to be all occupied and with DIB > 0.
570 * That would mean we could make everyone better off by shifting them
571 * backward. This scenario is impossible. */
572 assert(left != right);
575 if (h->type == HASHMAP_TYPE_ORDERED) {
576 OrderedHashmap *lh = (OrderedHashmap*) h;
577 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
579 if (le->iterate_next != IDX_NIL)
580 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
582 lh->iterate_list_tail = le->iterate_previous;
584 if (le->iterate_previous != IDX_NIL)
585 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
587 lh->iterate_list_head = le->iterate_next;
590 /* Now shift all buckets in the interval (left, right) one step backwards */
591 for (prev = left, left = next_idx(h, left); left != right;
592 prev = left, left = next_idx(h, left)) {
593 dib = bucket_calculate_dib(h, left, dibs[left]);
595 bucket_move_entry(h, NULL, left, prev);
596 bucket_set_dib(h, prev, dib - 1);
599 bucket_mark_free(h, prev);
602 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
604 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
605 struct ordered_hashmap_entry *e;
611 if (i->idx == IDX_NIL)
614 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
617 if (i->idx == IDX_FIRST) {
618 idx = h->iterate_list_head;
619 e = ordered_bucket_at(h, idx);
622 e = ordered_bucket_at(h, idx);
624 * We allow removing the current entry while iterating, but removal may cause
625 * a backward shift. The next entry may thus move one bucket to the left.
626 * To detect when it happens, we remember the key pointer of the entry we were
627 * going to iterate next. If it does not match, there was a backward shift.
629 if (e->p.b.key != i->next_key) {
630 idx = prev_idx(HASHMAP_BASE(h), idx);
631 e = ordered_bucket_at(h, idx);
633 assert(e->p.b.key == i->next_key);
636 #ifdef ENABLE_DEBUG_HASHMAP
640 if (e->iterate_next != IDX_NIL) {
641 struct ordered_hashmap_entry *n;
642 i->idx = e->iterate_next;
643 n = ordered_bucket_at(h, i->idx);
644 i->next_key = n->p.b.key;
655 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
661 if (i->idx == IDX_NIL)
664 if (i->idx == IDX_FIRST) {
665 /* fast forward to the first occupied bucket */
666 if (h->has_indirect) {
667 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
668 h->indirect.idx_lowest_entry = i->idx;
670 i->idx = skip_free_buckets(h, 0);
672 if (i->idx == IDX_NIL)
675 struct hashmap_base_entry *e;
679 e = bucket_at(h, i->idx);
681 * We allow removing the current entry while iterating, but removal may cause
682 * a backward shift. The next entry may thus move one bucket to the left.
683 * To detect when it happens, we remember the key pointer of the entry we were
684 * going to iterate next. If it does not match, there was a backward shift.
686 if (e->key != i->next_key)
687 e = bucket_at(h, --i->idx);
689 assert(e->key == i->next_key);
693 #ifdef ENABLE_DEBUG_HASHMAP
697 i->idx = skip_free_buckets(h, i->idx + 1);
698 if (i->idx != IDX_NIL)
699 i->next_key = bucket_at(h, i->idx)->key;
710 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
716 #ifdef ENABLE_DEBUG_HASHMAP
717 if (i->idx == IDX_FIRST) {
718 i->put_count = h->debug.put_count;
719 i->rem_count = h->debug.rem_count;
721 /* While iterating, must not add any new entries */
722 assert(i->put_count == h->debug.put_count);
723 /* ... or remove entries other than the current one */
724 assert(i->rem_count == h->debug.rem_count ||
725 (i->rem_count == h->debug.rem_count - 1 &&
726 i->prev_idx == h->debug.last_rem_idx));
727 /* Reset our removals counter */
728 i->rem_count = h->debug.rem_count;
732 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
733 : hashmap_iterate_in_internal_order(h, i);
736 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
737 struct hashmap_base_entry *e;
741 idx = hashmap_iterate_entry(h, i);
742 if (idx == IDX_NIL) {
751 e = bucket_at(h, idx);
752 data = entry_value(h, e);
761 bool set_iterate(Set *s, Iterator *i, void **value) {
762 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
765 #define HASHMAP_FOREACH_IDX(idx, h, i) \
766 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
768 (idx) = hashmap_iterate_entry((h), &(i)))
770 static void reset_direct_storage(HashmapBase *h) {
771 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
774 assert(!h->has_indirect);
776 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
777 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
780 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
782 const struct hashmap_type_info *hi = &hashmap_type_info[type];
785 use_pool = is_main_thread();
787 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
793 h->from_pool = use_pool;
794 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
796 if (type == HASHMAP_TYPE_ORDERED) {
797 OrderedHashmap *lh = (OrderedHashmap*)h;
798 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
801 reset_direct_storage(h);
803 if (!shared_hash_key_initialized) {
804 random_bytes(shared_hash_key, sizeof(shared_hash_key));
805 shared_hash_key_initialized= true;
808 #ifdef ENABLE_DEBUG_HASHMAP
809 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
810 h->debug.func = func;
811 h->debug.file = file;
812 h->debug.line = line;
818 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
819 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
822 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
823 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
826 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
827 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
830 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
831 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
839 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
847 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
848 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
851 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
852 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
855 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
856 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
859 static void hashmap_free_no_clear(HashmapBase *h) {
860 assert(!h->has_indirect);
861 assert(!h->n_direct_entries);
863 #ifdef ENABLE_DEBUG_HASHMAP
864 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
868 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
873 HashmapBase *internal_hashmap_free(HashmapBase *h) {
875 /* Free the hashmap, but nothing in it */
878 internal_hashmap_clear(h);
879 hashmap_free_no_clear(h);
885 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
887 /* Free the hashmap and all data objects in it, but not the
891 internal_hashmap_clear_free(h);
892 hashmap_free_no_clear(h);
898 Hashmap *hashmap_free_free_free(Hashmap *h) {
900 /* Free the hashmap and all data and key objects in it */
903 hashmap_clear_free_free(h);
904 hashmap_free_no_clear(HASHMAP_BASE(h));
910 void internal_hashmap_clear(HashmapBase *h) {
914 if (h->has_indirect) {
915 free(h->indirect.storage);
916 h->has_indirect = false;
919 h->n_direct_entries = 0;
920 reset_direct_storage(h);
922 if (h->type == HASHMAP_TYPE_ORDERED) {
923 OrderedHashmap *lh = (OrderedHashmap*) h;
924 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
928 void internal_hashmap_clear_free(HashmapBase *h) {
934 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
935 idx = skip_free_buckets(h, idx + 1))
936 free(entry_value(h, bucket_at(h, idx)));
938 internal_hashmap_clear(h);
941 void hashmap_clear_free_free(Hashmap *h) {
947 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
948 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
949 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
950 free((void*)e->b.key);
954 internal_hashmap_clear(HASHMAP_BASE(h));
957 static int resize_buckets(HashmapBase *h, unsigned entries_add);
960 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
961 * Performs Robin Hood swaps as it goes. The entry to put must be placed
962 * by the caller into swap slot IDX_PUT.
963 * If used for in-place resizing, may leave a displaced entry in swap slot
964 * IDX_PUT. Caller must rehash it next.
965 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
968 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
969 struct swap_entries *swap) {
970 dib_raw_t raw_dib, *dibs;
971 unsigned dib, distance;
973 #ifdef ENABLE_DEBUG_HASHMAP
974 h->debug.put_count++;
977 dibs = dib_raw_ptr(h);
979 for (distance = 0; ; distance++) {
981 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
982 if (raw_dib == DIB_RAW_REHASH)
983 bucket_move_entry(h, swap, idx, IDX_TMP);
985 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
986 h->indirect.idx_lowest_entry = idx;
988 bucket_set_dib(h, idx, distance);
989 bucket_move_entry(h, swap, IDX_PUT, idx);
990 if (raw_dib == DIB_RAW_REHASH) {
991 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
998 dib = bucket_calculate_dib(h, idx, raw_dib);
1000 if (dib < distance) {
1001 /* Found a wealthier entry. Go Robin Hood! */
1002 bucket_set_dib(h, idx, distance);
1004 /* swap the entries */
1005 bucket_move_entry(h, swap, idx, IDX_TMP);
1006 bucket_move_entry(h, swap, IDX_PUT, idx);
1007 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1012 idx = next_idx(h, idx);
1017 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1018 * The caller must place the entry (only its key and value, not link indexes)
1019 * in swap slot IDX_PUT.
1020 * Caller must ensure: the key does not exist yet in the hashmap.
1021 * that resize is not needed if !may_resize.
1022 * Returns: 1 if entry was put successfully.
1023 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1024 * Cannot return -ENOMEM if !may_resize.
1026 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1027 struct swap_entries *swap, bool may_resize) {
1028 struct ordered_hashmap_entry *new_entry;
1031 assert(idx < n_buckets(h));
1033 new_entry = bucket_at_swap(swap, IDX_PUT);
1036 r = resize_buckets(h, 1);
1040 idx = bucket_hash(h, new_entry->p.b.key);
1042 assert(n_entries(h) < n_buckets(h));
1044 if (h->type == HASHMAP_TYPE_ORDERED) {
1045 OrderedHashmap *lh = (OrderedHashmap*) h;
1047 new_entry->iterate_next = IDX_NIL;
1048 new_entry->iterate_previous = lh->iterate_list_tail;
1050 if (lh->iterate_list_tail != IDX_NIL) {
1051 struct ordered_hashmap_entry *old_tail;
1053 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1054 assert(old_tail->iterate_next == IDX_NIL);
1055 old_tail->iterate_next = IDX_PUT;
1058 lh->iterate_list_tail = IDX_PUT;
1059 if (lh->iterate_list_head == IDX_NIL)
1060 lh->iterate_list_head = IDX_PUT;
1063 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1066 #ifdef ENABLE_DEBUG_HASHMAP
1067 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1072 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1073 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1076 * Returns 0 if resize is not needed.
1077 * 1 if successfully resized.
1078 * -ENOMEM on allocation failure.
1080 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1081 struct swap_entries swap;
1083 dib_raw_t *old_dibs, *new_dibs;
1084 const struct hashmap_type_info *hi;
1085 unsigned idx, optimal_idx;
1086 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1092 hi = &hashmap_type_info[h->type];
1093 new_n_entries = n_entries(h) + entries_add;
1096 if (_unlikely_(new_n_entries < entries_add))
1099 /* For direct storage we allow 100% load, because it's tiny. */
1100 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1104 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1105 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1107 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1109 if (_unlikely_(new_n_buckets < new_n_entries))
1112 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1115 old_n_buckets = n_buckets(h);
1117 if (_likely_(new_n_buckets <= old_n_buckets))
1120 new_shift = log2u_round_up(MAX(
1121 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1122 2 * sizeof(struct direct_storage)));
1124 /* Realloc storage (buckets and DIB array). */
1125 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1130 /* Must upgrade direct to indirect storage. */
1131 if (!h->has_indirect) {
1132 memcpy(new_storage, h->direct.storage,
1133 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1134 h->indirect.n_entries = h->n_direct_entries;
1135 h->indirect.idx_lowest_entry = 0;
1136 h->n_direct_entries = 0;
1139 /* Get a new hash key. If we've just upgraded to indirect storage,
1140 * allow reusing a previously generated key. It's still a different key
1141 * from the shared one that we used for direct storage. */
1142 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1144 h->has_indirect = true;
1145 h->indirect.storage = new_storage;
1146 h->indirect.n_buckets = (1U << new_shift) /
1147 (hi->entry_size + sizeof(dib_raw_t));
1149 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1150 new_dibs = dib_raw_ptr(h);
1153 * Move the DIB array to the new place, replacing valid DIB values with
1154 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1155 * Note: Overlap is not possible, because we have at least doubled the
1156 * number of buckets and dib_raw_t is smaller than any entry type.
1158 for (idx = 0; idx < old_n_buckets; idx++) {
1159 assert(old_dibs[idx] != DIB_RAW_REHASH);
1160 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1164 /* Zero the area of newly added entries (including the old DIB area) */
1165 memzero(bucket_at(h, old_n_buckets),
1166 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1168 /* The upper half of the new DIB array needs initialization */
1169 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1170 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1172 /* Rehash entries that need it */
1174 for (idx = 0; idx < old_n_buckets; idx++) {
1175 if (new_dibs[idx] != DIB_RAW_REHASH)
1178 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1181 * Not much to do if by luck the entry hashes to its current
1182 * location. Just set its DIB.
1184 if (optimal_idx == idx) {
1190 new_dibs[idx] = DIB_RAW_FREE;
1191 bucket_move_entry(h, &swap, idx, IDX_PUT);
1192 /* bucket_move_entry does not clear the source */
1193 memzero(bucket_at(h, idx), hi->entry_size);
1197 * Find the new bucket for the current entry. This may make
1198 * another entry homeless and load it into IDX_PUT.
1200 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1203 /* Did the current entry displace another one? */
1205 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1206 } while (rehash_next);
1209 assert(n_rehashed == n_entries(h));
1215 * Finds an entry with a matching key
1216 * Returns: index of the found entry, or IDX_NIL if not found.
1218 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1219 struct hashmap_base_entry *e;
1220 unsigned dib, distance;
1221 dib_raw_t *dibs = dib_raw_ptr(h);
1223 assert(idx < n_buckets(h));
1225 for (distance = 0; ; distance++) {
1226 if (dibs[idx] == DIB_RAW_FREE)
1229 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1233 if (dib == distance) {
1234 e = bucket_at(h, idx);
1235 if (h->hash_ops->compare(e->key, key) == 0)
1239 idx = next_idx(h, idx);
1242 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1244 int hashmap_put(Hashmap *h, const void *key, void *value) {
1245 struct swap_entries swap;
1246 struct plain_hashmap_entry *e;
1251 hash = bucket_hash(h, key);
1252 idx = bucket_scan(h, hash, key);
1253 if (idx != IDX_NIL) {
1254 e = plain_bucket_at(h, idx);
1255 if (e->value == value)
1260 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1263 return hashmap_put_boldly(h, hash, &swap, true);
1266 int set_put(Set *s, const void *key) {
1267 struct swap_entries swap;
1268 struct hashmap_base_entry *e;
1273 hash = bucket_hash(s, key);
1274 idx = bucket_scan(s, hash, key);
1278 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1280 return hashmap_put_boldly(s, hash, &swap, true);
1283 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1284 struct swap_entries swap;
1285 struct plain_hashmap_entry *e;
1290 hash = bucket_hash(h, key);
1291 idx = bucket_scan(h, hash, key);
1292 if (idx != IDX_NIL) {
1293 e = plain_bucket_at(h, idx);
1294 #ifdef ENABLE_DEBUG_HASHMAP
1295 /* Although the key is equal, the key pointer may have changed,
1296 * and this would break our assumption for iterating. So count
1297 * this operation as incompatible with iteration. */
1298 if (e->b.key != key) {
1299 h->b.debug.put_count++;
1300 h->b.debug.rem_count++;
1301 h->b.debug.last_rem_idx = idx;
1309 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1312 return hashmap_put_boldly(h, hash, &swap, true);
1315 int hashmap_update(Hashmap *h, const void *key, void *value) {
1316 struct plain_hashmap_entry *e;
1321 hash = bucket_hash(h, key);
1322 idx = bucket_scan(h, hash, key);
1326 e = plain_bucket_at(h, idx);
1331 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1332 struct hashmap_base_entry *e;
1338 hash = bucket_hash(h, key);
1339 idx = bucket_scan(h, hash, key);
1343 e = bucket_at(h, idx);
1344 return entry_value(h, e);
1347 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1348 struct plain_hashmap_entry *e;
1354 hash = bucket_hash(h, key);
1355 idx = bucket_scan(h, hash, key);
1359 e = plain_bucket_at(h, idx);
1361 *key2 = (void*) e->b.key;
1366 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1372 hash = bucket_hash(h, key);
1373 return bucket_scan(h, hash, key) != IDX_NIL;
1376 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1377 struct hashmap_base_entry *e;
1384 hash = bucket_hash(h, key);
1385 idx = bucket_scan(h, hash, key);
1389 e = bucket_at(h, idx);
1390 data = entry_value(h, e);
1391 remove_entry(h, idx);
1396 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1397 struct plain_hashmap_entry *e;
1407 hash = bucket_hash(h, key);
1408 idx = bucket_scan(h, hash, key);
1409 if (idx == IDX_NIL) {
1415 e = plain_bucket_at(h, idx);
1418 *rkey = (void*) e->b.key;
1420 remove_entry(h, idx);
1425 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1426 struct swap_entries swap;
1427 struct plain_hashmap_entry *e;
1428 unsigned old_hash, new_hash, idx;
1433 old_hash = bucket_hash(h, old_key);
1434 idx = bucket_scan(h, old_hash, old_key);
1438 new_hash = bucket_hash(h, new_key);
1439 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1442 remove_entry(h, idx);
1444 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1447 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1452 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1453 struct swap_entries swap;
1454 struct hashmap_base_entry *e;
1455 unsigned old_hash, new_hash, idx;
1460 old_hash = bucket_hash(s, old_key);
1461 idx = bucket_scan(s, old_hash, old_key);
1465 new_hash = bucket_hash(s, new_key);
1466 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1469 remove_entry(s, idx);
1471 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1473 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1478 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1479 struct swap_entries swap;
1480 struct plain_hashmap_entry *e;
1481 unsigned old_hash, new_hash, idx_old, idx_new;
1486 old_hash = bucket_hash(h, old_key);
1487 idx_old = bucket_scan(h, old_hash, old_key);
1488 if (idx_old == IDX_NIL)
1491 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1493 new_hash = bucket_hash(h, new_key);
1494 idx_new = bucket_scan(h, new_hash, new_key);
1495 if (idx_new != IDX_NIL)
1496 if (idx_old != idx_new) {
1497 remove_entry(h, idx_new);
1498 /* Compensate for a possible backward shift. */
1499 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1500 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1501 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1504 remove_entry(h, idx_old);
1506 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1509 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1514 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1515 struct plain_hashmap_entry *e;
1521 hash = bucket_hash(h, key);
1522 idx = bucket_scan(h, hash, key);
1526 e = plain_bucket_at(h, idx);
1527 if (e->value != value)
1530 remove_entry(h, idx);
1535 static unsigned find_first_entry(HashmapBase *h) {
1536 Iterator i = ITERATOR_FIRST;
1538 if (!h || !n_entries(h))
1541 return hashmap_iterate_entry(h, &i);
1544 void *internal_hashmap_first(HashmapBase *h) {
1547 idx = find_first_entry(h);
1551 return entry_value(h, bucket_at(h, idx));
1554 void *internal_hashmap_first_key(HashmapBase *h) {
1555 struct hashmap_base_entry *e;
1558 idx = find_first_entry(h);
1562 e = bucket_at(h, idx);
1563 return (void*) e->key;
1566 void *internal_hashmap_steal_first(HashmapBase *h) {
1567 struct hashmap_base_entry *e;
1571 idx = find_first_entry(h);
1575 e = bucket_at(h, idx);
1576 data = entry_value(h, e);
1577 remove_entry(h, idx);
1582 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1583 struct hashmap_base_entry *e;
1587 idx = find_first_entry(h);
1591 e = bucket_at(h, idx);
1592 key = (void*) e->key;
1593 remove_entry(h, idx);
1598 unsigned internal_hashmap_size(HashmapBase *h) {
1603 return n_entries(h);
1606 unsigned internal_hashmap_buckets(HashmapBase *h) {
1611 return n_buckets(h);
1614 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1620 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1621 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1624 r = hashmap_put(h, pe->b.key, pe->value);
1625 if (r < 0 && r != -EEXIST)
1632 int set_merge(Set *s, Set *other) {
1638 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1639 struct set_entry *se = set_bucket_at(other, idx);
1642 r = set_put(s, se->b.key);
1650 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1655 r = resize_buckets(h, entries_add);
1663 * The same as hashmap_merge(), but every new item from other is moved to h.
1664 * Keys already in h are skipped and stay in other.
1665 * Returns: 0 on success.
1666 * -ENOMEM on alloc failure, in which case no move has been done.
1668 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1669 struct swap_entries swap;
1670 struct hashmap_base_entry *e, *n;
1680 assert(other->type == h->type);
1683 * This reserves buckets for the worst case, where none of other's
1684 * entries are yet present in h. This is preferable to risking
1685 * an allocation failure in the middle of the moving and having to
1686 * rollback or return a partial result.
1688 r = resize_buckets(h, n_entries(other));
1692 HASHMAP_FOREACH_IDX(idx, other, i) {
1695 e = bucket_at(other, idx);
1696 h_hash = bucket_hash(h, e->key);
1697 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1700 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1702 if (h->type != HASHMAP_TYPE_SET)
1703 ((struct plain_hashmap_entry*) n)->value =
1704 ((struct plain_hashmap_entry*) e)->value;
1705 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1707 remove_entry(other, idx);
1713 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1714 struct swap_entries swap;
1715 unsigned h_hash, other_hash, idx;
1716 struct hashmap_base_entry *e, *n;
1721 h_hash = bucket_hash(h, key);
1722 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1728 assert(other->type == h->type);
1730 other_hash = bucket_hash(other, key);
1731 idx = bucket_scan(other, other_hash, key);
1735 e = bucket_at(other, idx);
1737 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1739 if (h->type != HASHMAP_TYPE_SET)
1740 ((struct plain_hashmap_entry*) n)->value =
1741 ((struct plain_hashmap_entry*) e)->value;
1742 r = hashmap_put_boldly(h, h_hash, &swap, true);
1746 remove_entry(other, idx);
1750 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1756 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1761 case HASHMAP_TYPE_PLAIN:
1762 case HASHMAP_TYPE_ORDERED:
1763 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1765 case HASHMAP_TYPE_SET:
1766 r = set_merge((Set*)copy, (Set*)h);
1769 assert_not_reached("Unknown hashmap type");
1773 internal_hashmap_free(copy);
1780 char **internal_hashmap_get_strv(HashmapBase *h) {
1785 sv = new(char*, n_entries(h)+1);
1790 HASHMAP_FOREACH_IDX(idx, h, i)
1791 sv[n++] = entry_value(h, bucket_at(h, idx));
1797 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1798 struct ordered_hashmap_entry *e;
1804 hash = bucket_hash(h, key);
1805 idx = bucket_scan(h, hash, key);
1809 e = ordered_bucket_at(h, idx);
1810 if (e->iterate_next == IDX_NIL)
1812 return ordered_bucket_at(h, e->iterate_next)->p.value;
1815 int set_consume(Set *s, void *value) {
1818 r = set_put(s, value);
1825 int set_put_strdup(Set *s, const char *p) {
1836 r = set_consume(s, c);
1843 int set_put_strdupv(Set *s, char **l) {
1847 STRV_FOREACH(i, l) {
1848 r = set_put_strdup(s, *i);