1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
32 #include "siphash24.h"
38 * Implementation of hashmaps.
40 * - uses less RAM compared to closed addressing (chaining), because
41 * our entries are small (especially in Sets, which tend to contain
42 * the majority of entries in systemd).
43 * Collision resolution: Robin Hood
44 * - tends to equalize displacement of entries from their optimal buckets.
45 * Probe sequence: linear
46 * - though theoretically worse than random probing/uniform hashing/double
47 * hashing, it is good for cache locality.
50 * Celis, P. 1986. Robin Hood Hashing.
51 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
52 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
53 * - The results are derived for random probing. Suggests deletion with
54 * tombstones and two mean-centered search methods. None of that works
55 * well for linear probing.
57 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
58 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
59 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
60 * http://www.math.uu.se/~svante/papers/sj157.pdf
61 * - Applies to Robin Hood with linear probing. Contains remarks on
62 * the unsuitability of mean-centered search with linear probing.
64 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
65 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
66 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
67 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
68 * in a successful search), and Janson writes about displacement. C = d + 1.
70 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
71 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
72 * - Explanation of backward shift deletion with pictures.
74 * Khuong, P. 2013. The Other Robin Hood Hashing.
75 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
76 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
80 * XXX Ideas for improvement:
81 * For unordered hashmaps, randomize iteration order, similarly to Perl:
82 * http://blog.booking.com/hardening-perls-hash-function.html
85 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
86 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
87 #define INV_KEEP_FREE 5U
89 /* Fields common to entries of all hashmap/set types */
90 struct hashmap_base_entry {
94 /* Entry types for specific hashmap/set types
95 * hashmap_base_entry must be at the beginning of each entry struct. */
97 struct plain_hashmap_entry {
98 struct hashmap_base_entry b;
102 struct ordered_hashmap_entry {
103 struct plain_hashmap_entry p;
104 unsigned iterate_next, iterate_previous;
108 struct hashmap_base_entry b;
111 /* In several functions it is advantageous to have the hash table extended
112 * virtually by a couple of additional buckets. We reserve special index values
113 * for these "swap" buckets. */
114 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
115 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
116 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
117 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
119 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
120 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
122 assert_cc(IDX_FIRST == _IDX_SWAP_END);
123 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
125 /* Storage space for the "swap" buckets.
126 * All entry types can fit into a ordered_hashmap_entry. */
127 struct swap_entries {
128 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
131 /* Distance from Initial Bucket */
132 typedef uint8_t dib_raw_t;
133 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
134 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
135 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
136 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
138 #define DIB_FREE UINT_MAX
140 #ifdef ENABLE_HASHMAP_DEBUG
141 struct hashmap_debug_info {
142 LIST_FIELDS(struct hashmap_debug_info, debug_list);
143 unsigned max_entries; /* high watermark of n_entries */
145 /* who allocated this hashmap */
150 /* fields to detect modification while iterating */
151 unsigned put_count; /* counts puts into the hashmap */
152 unsigned rem_count; /* counts removals from hashmap */
153 unsigned last_rem_idx; /* remembers last removal index */
156 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
157 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
159 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
161 #else /* !ENABLE_HASHMAP_DEBUG */
162 #define HASHMAP_DEBUG_FIELDS
163 #endif /* ENABLE_HASHMAP_DEBUG */
167 HASHMAP_TYPE_ORDERED,
172 struct _packed_ indirect_storage {
173 char *storage; /* where buckets and DIBs are stored */
174 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
176 unsigned n_entries; /* number of stored entries */
177 unsigned n_buckets; /* number of buckets */
179 unsigned idx_lowest_entry; /* Index below which all buckets are free.
180 Makes "while(hashmap_steal_first())" loops
181 O(n) instead of O(n^2) for unordered hashmaps. */
182 uint8_t _pad[3]; /* padding for the whole HashmapBase */
183 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
186 struct direct_storage {
187 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
188 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
189 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
190 char storage[sizeof(struct indirect_storage)];
193 #define DIRECT_BUCKETS(entry_t) \
194 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
196 /* We should be able to store at least one entry directly. */
197 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
199 /* We have 3 bits for n_direct_entries. */
200 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
202 /* Hashmaps with directly stored entries all use this shared hash key.
203 * It's no big deal if the key is guessed, because there can be only
204 * a handful of directly stored entries in a hashmap. When a hashmap
205 * outgrows direct storage, it gets its own key for indirect storage. */
206 static uint8_t shared_hash_key[HASH_KEY_SIZE];
207 static bool shared_hash_key_initialized;
209 /* Fields that all hashmap/set types must have */
211 const struct hash_ops *hash_ops; /* hash and compare ops to use */
214 struct indirect_storage indirect; /* if has_indirect */
215 struct direct_storage direct; /* if !has_indirect */
218 enum HashmapType type:2; /* HASHMAP_TYPE_* */
219 bool has_indirect:1; /* whether indirect storage is used */
220 unsigned n_direct_entries:3; /* Number of entries in direct storage.
221 * Only valid if !has_indirect. */
222 bool from_pool:1; /* whether was allocated from mempool */
223 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
226 /* Specific hash types
227 * HashmapBase must be at the beginning of each hashmap struct. */
230 struct HashmapBase b;
233 struct OrderedHashmap {
234 struct HashmapBase b;
235 unsigned iterate_list_head, iterate_list_tail;
239 struct HashmapBase b;
242 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
243 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
244 /* No need for a separate Set pool */
245 assert_cc(sizeof(Hashmap) == sizeof(Set));
247 struct hashmap_type_info {
250 struct mempool *mempool;
251 unsigned n_direct_buckets;
254 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
255 [HASHMAP_TYPE_PLAIN] = {
256 .head_size = sizeof(Hashmap),
257 .entry_size = sizeof(struct plain_hashmap_entry),
258 .mempool = &hashmap_pool,
259 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
261 [HASHMAP_TYPE_ORDERED] = {
262 .head_size = sizeof(OrderedHashmap),
263 .entry_size = sizeof(struct ordered_hashmap_entry),
264 .mempool = &ordered_hashmap_pool,
265 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
267 [HASHMAP_TYPE_SET] = {
268 .head_size = sizeof(Set),
269 .entry_size = sizeof(struct set_entry),
270 .mempool = &hashmap_pool,
271 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
275 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
277 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
278 return (unsigned long) u;
281 int string_compare_func(const void *a, const void *b) {
285 const struct hash_ops string_hash_ops = {
286 .hash = string_hash_func,
287 .compare = string_compare_func
290 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
292 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
293 return (unsigned long) u;
296 int trivial_compare_func(const void *a, const void *b) {
297 return a < b ? -1 : (a > b ? 1 : 0);
300 const struct hash_ops trivial_hash_ops = {
301 .hash = trivial_hash_func,
302 .compare = trivial_compare_func
305 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
307 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
308 return (unsigned long) u;
311 int uint64_compare_func(const void *_a, const void *_b) {
313 a = *(const uint64_t*) _a;
314 b = *(const uint64_t*) _b;
315 return a < b ? -1 : (a > b ? 1 : 0);
318 const struct hash_ops uint64_hash_ops = {
319 .hash = uint64_hash_func,
320 .compare = uint64_compare_func
323 #if SIZEOF_DEV_T != 8
324 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
326 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
327 return (unsigned long) u;
330 int devt_compare_func(const void *_a, const void *_b) {
332 a = *(const dev_t*) _a;
333 b = *(const dev_t*) _b;
334 return a < b ? -1 : (a > b ? 1 : 0);
337 const struct hash_ops devt_hash_ops = {
338 .hash = devt_hash_func,
339 .compare = devt_compare_func
343 static unsigned n_buckets(HashmapBase *h) {
344 return h->has_indirect ? h->indirect.n_buckets
345 : hashmap_type_info[h->type].n_direct_buckets;
348 static unsigned n_entries(HashmapBase *h) {
349 return h->has_indirect ? h->indirect.n_entries
350 : h->n_direct_entries;
353 static void n_entries_inc(HashmapBase *h) {
355 h->indirect.n_entries++;
357 h->n_direct_entries++;
360 static void n_entries_dec(HashmapBase *h) {
362 h->indirect.n_entries--;
364 h->n_direct_entries--;
367 static char *storage_ptr(HashmapBase *h) {
368 return h->has_indirect ? h->indirect.storage
372 static uint8_t *hash_key(HashmapBase *h) {
373 return h->has_indirect ? h->indirect.hash_key
377 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
378 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
380 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
382 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
383 static uint8_t current[HASH_KEY_SIZE];
384 static bool current_initialized = false;
386 /* Returns a hash function key to use. In order to keep things
387 * fast we will not generate a new key each time we allocate a
388 * new hash table. Instead, we'll just reuse the most recently
389 * generated one, except if we never generated one or when we
390 * are rehashing an entire hash table because we reached a
393 if (!current_initialized || !reuse_is_ok) {
394 random_bytes(current, sizeof(current));
395 current_initialized = true;
398 memcpy(hash_key, current, sizeof(current));
401 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
402 return (struct hashmap_base_entry*)
403 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
406 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
407 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
410 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
411 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
414 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
415 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
418 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
419 return &swap->e[idx - _IDX_SWAP_BEGIN];
422 /* Returns a pointer to the bucket at index idx.
423 * Understands real indexes and swap indexes, hence "_virtual". */
424 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
426 if (idx < _IDX_SWAP_BEGIN)
427 return bucket_at(h, idx);
429 if (idx < _IDX_SWAP_END)
430 return &bucket_at_swap(swap, idx)->p.b;
432 assert_not_reached("Invalid index");
435 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
437 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
440 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
441 return idx >= from ? idx - from
442 : n_buckets(h) + idx - from;
445 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
446 unsigned initial_bucket;
448 if (raw_dib == DIB_RAW_FREE)
451 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
455 * Having an overflow DIB value is very unlikely. The hash function
456 * would have to be bad. For example, in a table of size 2^24 filled
457 * to load factor 0.9 the maximum observed DIB is only about 60.
458 * In theory (assuming I used Maxima correctly), for an infinite size
459 * hash table with load factor 0.8 the probability of a given entry
460 * having DIB > 40 is 1.9e-8.
461 * This returns the correct DIB value by recomputing the hash value in
462 * the unlikely case. XXX Hitting this case could be a hint to rehash.
464 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
465 return bucket_distance(h, idx, initial_bucket);
468 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
469 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
472 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
475 dibs = dib_raw_ptr(h);
477 for ( ; idx < n_buckets(h); idx++)
478 if (dibs[idx] != DIB_RAW_FREE)
484 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
485 memset(bucket_at(h, idx), 0, hashmap_type_info[h->type].entry_size);
486 bucket_set_dib(h, idx, DIB_FREE);
489 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
490 unsigned from, unsigned to) {
491 struct hashmap_base_entry *e_from, *e_to;
495 e_from = bucket_at_virtual(h, swap, from);
496 e_to = bucket_at_virtual(h, swap, to);
498 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
500 if (h->type == HASHMAP_TYPE_ORDERED) {
501 OrderedHashmap *lh = (OrderedHashmap*) h;
502 struct ordered_hashmap_entry *le, *le_to;
504 le_to = (struct ordered_hashmap_entry*) e_to;
506 if (le_to->iterate_next != IDX_NIL) {
507 le = (struct ordered_hashmap_entry*)
508 bucket_at_virtual(h, swap, le_to->iterate_next);
509 le->iterate_previous = to;
512 if (le_to->iterate_previous != IDX_NIL) {
513 le = (struct ordered_hashmap_entry*)
514 bucket_at_virtual(h, swap, le_to->iterate_previous);
515 le->iterate_next = to;
518 if (lh->iterate_list_head == from)
519 lh->iterate_list_head = to;
520 if (lh->iterate_list_tail == from)
521 lh->iterate_list_tail = to;
525 static unsigned next_idx(HashmapBase *h, unsigned idx) {
526 return (idx + 1U) % n_buckets(h);
529 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
530 return (n_buckets(h) + idx - 1U) % n_buckets(h);
533 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
536 case HASHMAP_TYPE_PLAIN:
537 case HASHMAP_TYPE_ORDERED:
538 return ((struct plain_hashmap_entry*)e)->value;
540 case HASHMAP_TYPE_SET:
541 return (void*) e->key;
544 assert_not_reached("Unknown hashmap type");
548 static void base_remove_entry(HashmapBase *h, unsigned idx) {
549 unsigned left, right, prev, dib;
550 dib_raw_t raw_dib, *dibs;
552 dibs = dib_raw_ptr(h);
553 assert(dibs[idx] != DIB_RAW_FREE);
555 #ifdef ENABLE_HASHMAP_DEBUG
556 h->debug.rem_count++;
557 h->debug.last_rem_idx = idx;
561 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
562 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
563 raw_dib = dibs[right];
564 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
567 /* The buckets are not supposed to be all occupied and with DIB > 0.
568 * That would mean we could make everyone better off by shifting them
569 * backward. This scenario is impossible. */
570 assert(left != right);
573 if (h->type == HASHMAP_TYPE_ORDERED) {
574 OrderedHashmap *lh = (OrderedHashmap*) h;
575 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
577 if (le->iterate_next != IDX_NIL)
578 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
580 lh->iterate_list_tail = le->iterate_previous;
582 if (le->iterate_previous != IDX_NIL)
583 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
585 lh->iterate_list_head = le->iterate_next;
588 /* Now shift all buckets in the interval (left, right) one step backwards */
589 for (prev = left, left = next_idx(h, left); left != right;
590 prev = left, left = next_idx(h, left)) {
591 dib = bucket_calculate_dib(h, left, dibs[left]);
593 bucket_move_entry(h, NULL, left, prev);
594 bucket_set_dib(h, prev, dib - 1);
597 bucket_mark_free(h, prev);
600 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
602 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
603 struct ordered_hashmap_entry *e;
609 if (i->idx == IDX_NIL)
612 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
615 if (i->idx == IDX_FIRST) {
616 idx = h->iterate_list_head;
617 e = ordered_bucket_at(h, idx);
620 e = ordered_bucket_at(h, idx);
622 * We allow removing the current entry while iterating, but removal may cause
623 * a backward shift. The next entry may thus move one bucket to the left.
624 * To detect when it happens, we remember the key pointer of the entry we were
625 * going to iterate next. If it does not match, there was a backward shift.
627 if (e->p.b.key != i->next_key) {
628 idx = prev_idx(HASHMAP_BASE(h), idx);
629 e = ordered_bucket_at(h, idx);
631 assert(e->p.b.key == i->next_key);
634 #ifdef ENABLE_HASHMAP_DEBUG
638 if (e->iterate_next != IDX_NIL) {
639 struct ordered_hashmap_entry *n;
640 i->idx = e->iterate_next;
641 n = ordered_bucket_at(h, i->idx);
642 i->next_key = n->p.b.key;
653 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
659 if (i->idx == IDX_NIL)
662 if (i->idx == IDX_FIRST) {
663 /* fast forward to the first occupied bucket */
664 if (h->has_indirect) {
665 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
666 h->indirect.idx_lowest_entry = i->idx;
668 i->idx = skip_free_buckets(h, 0);
670 if (i->idx == IDX_NIL)
673 struct hashmap_base_entry *e;
677 e = bucket_at(h, i->idx);
679 * We allow removing the current entry while iterating, but removal may cause
680 * a backward shift. The next entry may thus move one bucket to the left.
681 * To detect when it happens, we remember the key pointer of the entry we were
682 * going to iterate next. If it does not match, there was a backward shift.
684 if (e->key != i->next_key)
685 e = bucket_at(h, --i->idx);
687 assert(e->key == i->next_key);
691 #ifdef ENABLE_HASHMAP_DEBUG
695 i->idx = skip_free_buckets(h, i->idx + 1);
696 if (i->idx != IDX_NIL)
697 i->next_key = bucket_at(h, i->idx)->key;
708 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
714 #ifdef ENABLE_HASHMAP_DEBUG
715 if (i->idx == IDX_FIRST) {
716 i->put_count = h->debug.put_count;
717 i->rem_count = h->debug.rem_count;
719 /* While iterating, must not add any new entries */
720 assert(i->put_count == h->debug.put_count);
721 /* ... or remove entries other than the current one */
722 assert(i->rem_count == h->debug.rem_count ||
723 (i->rem_count == h->debug.rem_count - 1 &&
724 i->prev_idx == h->debug.last_rem_idx));
725 /* Reset our removals counter */
726 i->rem_count = h->debug.rem_count;
730 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
731 : hashmap_iterate_in_internal_order(h, i);
734 void *internal_hashmap_iterate(HashmapBase *h, Iterator *i, const void **key) {
735 struct hashmap_base_entry *e;
739 idx = hashmap_iterate_entry(h, i);
740 if (idx == IDX_NIL) {
747 e = bucket_at(h, idx);
748 data = entry_value(h, e);
755 void *set_iterate(Set *s, Iterator *i) {
756 return internal_hashmap_iterate(HASHMAP_BASE(s), i, NULL);
759 #define HASHMAP_FOREACH_IDX(idx, h, i) \
760 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
762 (idx) = hashmap_iterate_entry((h), &(i)))
764 static void reset_direct_storage(HashmapBase *h) {
765 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
768 assert(!h->has_indirect);
770 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
771 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
774 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
776 const struct hashmap_type_info *hi = &hashmap_type_info[type];
779 use_pool = is_main_thread();
781 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
787 h->from_pool = use_pool;
788 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
790 if (type == HASHMAP_TYPE_ORDERED) {
791 OrderedHashmap *lh = (OrderedHashmap*)h;
792 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
795 reset_direct_storage(h);
797 if (!shared_hash_key_initialized) {
798 random_bytes(shared_hash_key, sizeof(shared_hash_key));
799 shared_hash_key_initialized= true;
802 #ifdef ENABLE_HASHMAP_DEBUG
803 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
804 h->debug.func = func;
805 h->debug.file = file;
806 h->debug.line = line;
812 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
813 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
816 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
817 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
820 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
821 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
824 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
825 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
833 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
841 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
842 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
845 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
846 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
849 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
850 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
853 static void hashmap_free_no_clear(HashmapBase *h) {
854 assert(!h->has_indirect);
855 assert(!h->n_direct_entries);
857 #ifdef ENABLE_HASHMAP_DEBUG
858 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
862 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
867 void internal_hashmap_free(HashmapBase *h) {
869 /* Free the hashmap, but nothing in it */
874 internal_hashmap_clear(h);
875 hashmap_free_no_clear(h);
878 void internal_hashmap_free_free(HashmapBase *h) {
880 /* Free the hashmap and all data objects in it, but not the
886 internal_hashmap_clear_free(h);
887 hashmap_free_no_clear(h);
890 void hashmap_free_free_free(Hashmap *h) {
892 /* Free the hashmap and all data and key objects in it */
897 hashmap_clear_free_free(h);
898 hashmap_free_no_clear(HASHMAP_BASE(h));
901 void internal_hashmap_clear(HashmapBase *h) {
905 if (h->has_indirect) {
906 free(h->indirect.storage);
907 h->has_indirect = false;
910 h->n_direct_entries = 0;
911 reset_direct_storage(h);
913 if (h->type == HASHMAP_TYPE_ORDERED) {
914 OrderedHashmap *lh = (OrderedHashmap*) h;
915 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
919 void internal_hashmap_clear_free(HashmapBase *h) {
925 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
926 idx = skip_free_buckets(h, idx + 1))
927 free(entry_value(h, bucket_at(h, idx)));
929 internal_hashmap_clear(h);
932 void hashmap_clear_free_free(Hashmap *h) {
938 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
939 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
940 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
941 free((void*)e->b.key);
945 internal_hashmap_clear(HASHMAP_BASE(h));
948 static int resize_buckets(HashmapBase *h, unsigned entries_add);
951 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
952 * Performs Robin Hood swaps as it goes. The entry to put must be placed
953 * by the caller into swap slot IDX_PUT.
954 * If used for in-place resizing, may leave a displaced entry in swap slot
955 * IDX_PUT. Caller must rehash it next.
956 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
959 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
960 struct swap_entries *swap) {
961 dib_raw_t raw_dib, *dibs;
962 unsigned dib, distance;
964 #ifdef ENABLE_HASHMAP_DEBUG
965 h->debug.put_count++;
968 dibs = dib_raw_ptr(h);
970 for (distance = 0; ; distance++) {
972 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
973 if (raw_dib == DIB_RAW_REHASH)
974 bucket_move_entry(h, swap, idx, IDX_TMP);
976 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
977 h->indirect.idx_lowest_entry = idx;
979 bucket_set_dib(h, idx, distance);
980 bucket_move_entry(h, swap, IDX_PUT, idx);
981 if (raw_dib == DIB_RAW_REHASH) {
982 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
989 dib = bucket_calculate_dib(h, idx, raw_dib);
991 if (dib < distance) {
992 /* Found a wealthier entry. Go Robin Hood! */
994 bucket_set_dib(h, idx, distance);
996 /* swap the entries */
997 bucket_move_entry(h, swap, idx, IDX_TMP);
998 bucket_move_entry(h, swap, IDX_PUT, idx);
999 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1004 idx = next_idx(h, idx);
1009 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1010 * The caller must place the entry (only its key and value, not link indexes)
1011 * in swap slot IDX_PUT.
1012 * Caller must ensure: the key does not exist yet in the hashmap.
1013 * that resize is not needed if !may_resize.
1014 * Returns: 1 if entry was put successfully.
1015 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1016 * Cannot return -ENOMEM if !may_resize.
1018 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1019 struct swap_entries *swap, bool may_resize) {
1020 struct ordered_hashmap_entry *new_entry;
1023 assert(idx < n_buckets(h));
1025 new_entry = bucket_at_swap(swap, IDX_PUT);
1028 r = resize_buckets(h, 1);
1032 idx = bucket_hash(h, new_entry->p.b.key);
1034 assert(n_entries(h) < n_buckets(h));
1036 if (h->type == HASHMAP_TYPE_ORDERED) {
1037 OrderedHashmap *lh = (OrderedHashmap*) h;
1039 new_entry->iterate_next = IDX_NIL;
1040 new_entry->iterate_previous = lh->iterate_list_tail;
1042 if (lh->iterate_list_tail != IDX_NIL) {
1043 struct ordered_hashmap_entry *old_tail;
1045 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1046 assert(old_tail->iterate_next == IDX_NIL);
1047 old_tail->iterate_next = IDX_PUT;
1050 lh->iterate_list_tail = IDX_PUT;
1051 if (lh->iterate_list_head == IDX_NIL)
1052 lh->iterate_list_head = IDX_PUT;
1055 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1058 #ifdef ENABLE_HASHMAP_DEBUG
1059 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1064 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1065 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1068 * Returns 0 if resize is not needed.
1069 * 1 if succesfully resized.
1070 * -ENOMEM on allocation failure.
1072 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1073 struct swap_entries swap;
1075 dib_raw_t *old_dibs, *new_dibs;
1076 const struct hashmap_type_info *hi;
1077 unsigned idx, optimal_idx;
1078 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1084 hi = &hashmap_type_info[h->type];
1085 new_n_entries = n_entries(h) + entries_add;
1088 if (_unlikely_(new_n_entries < entries_add))
1091 /* For direct storage we allow 100% load, because it's tiny. */
1092 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1096 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1097 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1099 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1101 if (_unlikely_(new_n_buckets < new_n_entries))
1104 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1107 old_n_buckets = n_buckets(h);
1109 if (_likely_(new_n_buckets <= old_n_buckets))
1112 new_shift = log2u_round_up(MAX(
1113 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1114 2 * sizeof(struct direct_storage)));
1116 /* Realloc storage (buckets and DIB array). */
1117 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1122 /* Must upgrade direct to indirect storage. */
1123 if (!h->has_indirect) {
1124 memcpy(new_storage, h->direct.storage,
1125 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1126 h->indirect.n_entries = h->n_direct_entries;
1127 h->indirect.idx_lowest_entry = 0;
1128 h->n_direct_entries = 0;
1131 /* Get a new hash key. If we've just upgraded to indirect storage,
1132 * allow reusing a previously generated key. It's still a different key
1133 * from the shared one that we used for direct storage. */
1134 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1136 h->has_indirect = true;
1137 h->indirect.storage = new_storage;
1138 h->indirect.n_buckets = (1U << new_shift) /
1139 (hi->entry_size + sizeof(dib_raw_t));
1141 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1142 new_dibs = dib_raw_ptr(h);
1145 * Move the DIB array to the new place, replacing valid DIB values with
1146 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1147 * Note: Overlap is not possible, because we have at least doubled the
1148 * number of buckets and dib_raw_t is smaller than any entry type.
1150 for (idx = 0; idx < old_n_buckets; idx++) {
1151 assert(old_dibs[idx] != DIB_RAW_REHASH);
1152 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1156 /* Zero the area of newly added entries (including the old DIB area) */
1157 memset(bucket_at(h, old_n_buckets), 0,
1158 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1160 /* The upper half of the new DIB array needs initialization */
1161 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1162 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1164 /* Rehash entries that need it */
1166 for (idx = 0; idx < old_n_buckets; idx++) {
1167 if (new_dibs[idx] != DIB_RAW_REHASH)
1170 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1173 * Not much to do if by luck the entry hashes to its current
1174 * location. Just set its DIB.
1176 if (optimal_idx == idx) {
1182 new_dibs[idx] = DIB_RAW_FREE;
1183 bucket_move_entry(h, &swap, idx, IDX_PUT);
1184 /* bucket_move_entry does not clear the source */
1185 memset(bucket_at(h, idx), 0, hi->entry_size);
1189 * Find the new bucket for the current entry. This may make
1190 * another entry homeless and load it into IDX_PUT.
1192 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1195 /* Did the current entry displace another one? */
1197 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1198 } while (rehash_next);
1201 assert(n_rehashed == n_entries(h));
1207 * Finds an entry with a matching key
1208 * Returns: index of the found entry, or IDX_NIL if not found.
1210 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1211 struct hashmap_base_entry *e;
1212 unsigned dib, distance;
1213 dib_raw_t *dibs = dib_raw_ptr(h);
1215 assert(idx < n_buckets(h));
1217 for (distance = 0; ; distance++) {
1218 if (dibs[idx] == DIB_RAW_FREE)
1221 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1225 if (dib == distance) {
1226 e = bucket_at(h, idx);
1227 if (h->hash_ops->compare(e->key, key) == 0)
1231 idx = next_idx(h, idx);
1234 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1236 int hashmap_put(Hashmap *h, const void *key, void *value) {
1237 struct swap_entries swap;
1238 struct plain_hashmap_entry *e;
1243 hash = bucket_hash(h, key);
1244 idx = bucket_scan(h, hash, key);
1245 if (idx != IDX_NIL) {
1246 e = plain_bucket_at(h, idx);
1247 if (e->value == value)
1252 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1255 return hashmap_put_boldly(h, hash, &swap, true);
1258 int set_put(Set *s, const void *key) {
1259 struct swap_entries swap;
1260 struct hashmap_base_entry *e;
1265 hash = bucket_hash(s, key);
1266 idx = bucket_scan(s, hash, key);
1270 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1272 return hashmap_put_boldly(s, hash, &swap, true);
1275 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1276 struct swap_entries swap;
1277 struct plain_hashmap_entry *e;
1282 hash = bucket_hash(h, key);
1283 idx = bucket_scan(h, hash, key);
1284 if (idx != IDX_NIL) {
1285 e = plain_bucket_at(h, idx);
1286 #ifdef ENABLE_HASHMAP_DEBUG
1287 /* Although the key is equal, the key pointer may have changed,
1288 * and this would break our assumption for iterating. So count
1289 * this operation as incompatible with iteration. */
1290 if (e->b.key != key) {
1291 h->b.debug.put_count++;
1292 h->b.debug.rem_count++;
1293 h->b.debug.last_rem_idx = idx;
1301 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1304 return hashmap_put_boldly(h, hash, &swap, true);
1307 int hashmap_update(Hashmap *h, const void *key, void *value) {
1308 struct plain_hashmap_entry *e;
1313 hash = bucket_hash(h, key);
1314 idx = bucket_scan(h, hash, key);
1318 e = plain_bucket_at(h, idx);
1323 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1324 struct hashmap_base_entry *e;
1330 hash = bucket_hash(h, key);
1331 idx = bucket_scan(h, hash, key);
1335 e = bucket_at(h, idx);
1336 return entry_value(h, e);
1339 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1340 struct plain_hashmap_entry *e;
1346 hash = bucket_hash(h, key);
1347 idx = bucket_scan(h, hash, key);
1351 e = plain_bucket_at(h, idx);
1353 *key2 = (void*) e->b.key;
1358 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1364 hash = bucket_hash(h, key);
1365 return bucket_scan(h, hash, key) != IDX_NIL;
1368 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1369 struct hashmap_base_entry *e;
1376 hash = bucket_hash(h, key);
1377 idx = bucket_scan(h, hash, key);
1381 e = bucket_at(h, idx);
1382 data = entry_value(h, e);
1383 remove_entry(h, idx);
1388 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1389 struct plain_hashmap_entry *e;
1399 hash = bucket_hash(h, key);
1400 idx = bucket_scan(h, hash, key);
1401 if (idx == IDX_NIL) {
1407 e = plain_bucket_at(h, idx);
1410 *rkey = (void*) e->b.key;
1412 remove_entry(h, idx);
1417 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1418 struct swap_entries swap;
1419 struct plain_hashmap_entry *e;
1420 unsigned old_hash, new_hash, idx;
1425 old_hash = bucket_hash(h, old_key);
1426 idx = bucket_scan(h, old_hash, old_key);
1430 new_hash = bucket_hash(h, new_key);
1431 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1434 remove_entry(h, idx);
1436 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1439 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1444 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1445 struct swap_entries swap;
1446 struct hashmap_base_entry *e;
1447 unsigned old_hash, new_hash, idx;
1452 old_hash = bucket_hash(s, old_key);
1453 idx = bucket_scan(s, old_hash, old_key);
1457 new_hash = bucket_hash(s, new_key);
1458 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1461 remove_entry(s, idx);
1463 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1465 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1470 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1471 struct swap_entries swap;
1472 struct plain_hashmap_entry *e;
1473 unsigned old_hash, new_hash, idx_old, idx_new;
1478 old_hash = bucket_hash(h, old_key);
1479 idx_old = bucket_scan(h, old_hash, old_key);
1480 if (idx_old == IDX_NIL)
1483 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1485 new_hash = bucket_hash(h, new_key);
1486 idx_new = bucket_scan(h, new_hash, new_key);
1487 if (idx_new != IDX_NIL)
1488 if (idx_old != idx_new) {
1489 remove_entry(h, idx_new);
1490 /* Compensate for a possible backward shift. */
1491 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1492 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1493 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1496 remove_entry(h, idx_old);
1498 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1501 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1506 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1507 struct plain_hashmap_entry *e;
1513 hash = bucket_hash(h, key);
1514 idx = bucket_scan(h, hash, key);
1518 e = plain_bucket_at(h, idx);
1519 if (e->value != value)
1522 remove_entry(h, idx);
1527 static unsigned find_first_entry(HashmapBase *h) {
1528 Iterator i = ITERATOR_FIRST;
1530 if (!h || !n_entries(h))
1533 return hashmap_iterate_entry(h, &i);
1536 void *internal_hashmap_first(HashmapBase *h) {
1539 idx = find_first_entry(h);
1543 return entry_value(h, bucket_at(h, idx));
1546 void *internal_hashmap_first_key(HashmapBase *h) {
1547 struct hashmap_base_entry *e;
1550 idx = find_first_entry(h);
1554 e = bucket_at(h, idx);
1555 return (void*) e->key;
1558 void *internal_hashmap_steal_first(HashmapBase *h) {
1559 struct hashmap_base_entry *e;
1563 idx = find_first_entry(h);
1567 e = bucket_at(h, idx);
1568 data = entry_value(h, e);
1569 remove_entry(h, idx);
1574 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1575 struct hashmap_base_entry *e;
1579 idx = find_first_entry(h);
1583 e = bucket_at(h, idx);
1584 key = (void*) e->key;
1585 remove_entry(h, idx);
1590 unsigned internal_hashmap_size(HashmapBase *h) {
1595 return n_entries(h);
1598 unsigned internal_hashmap_buckets(HashmapBase *h) {
1603 return n_buckets(h);
1606 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1612 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1613 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1616 r = hashmap_put(h, pe->b.key, pe->value);
1617 if (r < 0 && r != -EEXIST)
1624 int set_merge(Set *s, Set *other) {
1630 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1631 struct set_entry *se = set_bucket_at(other, idx);
1634 r = set_put(s, se->b.key);
1642 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1647 r = resize_buckets(h, entries_add);
1655 * The same as hashmap_merge(), but every new item from other is moved to h.
1656 * Keys already in h are skipped and stay in other.
1657 * Returns: 0 on success.
1658 * -ENOMEM on alloc failure, in which case no move has been done.
1660 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1661 struct swap_entries swap;
1662 struct hashmap_base_entry *e, *n;
1672 assert(other->type == h->type);
1675 * This reserves buckets for the worst case, where none of other's
1676 * entries are yet present in h. This is preferable to risking
1677 * an allocation failure in the middle of the moving and having to
1678 * rollback or return a partial result.
1680 r = resize_buckets(h, n_entries(other));
1684 HASHMAP_FOREACH_IDX(idx, other, i) {
1687 e = bucket_at(other, idx);
1688 h_hash = bucket_hash(h, e->key);
1689 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1692 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1694 if (h->type != HASHMAP_TYPE_SET)
1695 ((struct plain_hashmap_entry*) n)->value =
1696 ((struct plain_hashmap_entry*) e)->value;
1697 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1699 remove_entry(other, idx);
1705 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1706 struct swap_entries swap;
1707 unsigned h_hash, other_hash, idx;
1708 struct hashmap_base_entry *e, *n;
1713 h_hash = bucket_hash(h, key);
1714 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1720 assert(other->type == h->type);
1722 other_hash = bucket_hash(other, key);
1723 idx = bucket_scan(other, other_hash, key);
1727 e = bucket_at(other, idx);
1729 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1731 if (h->type != HASHMAP_TYPE_SET)
1732 ((struct plain_hashmap_entry*) n)->value =
1733 ((struct plain_hashmap_entry*) e)->value;
1734 r = hashmap_put_boldly(h, h_hash, &swap, true);
1738 remove_entry(other, idx);
1742 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1748 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1753 case HASHMAP_TYPE_PLAIN:
1754 case HASHMAP_TYPE_ORDERED:
1755 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1757 case HASHMAP_TYPE_SET:
1758 r = set_merge((Set*)copy, (Set*)h);
1761 assert_not_reached("Unknown hashmap type");
1765 internal_hashmap_free(copy);
1772 char **internal_hashmap_get_strv(HashmapBase *h) {
1777 sv = new(char*, n_entries(h)+1);
1782 HASHMAP_FOREACH_IDX(idx, h, i)
1783 sv[n++] = entry_value(h, bucket_at(h, idx));
1789 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1790 struct ordered_hashmap_entry *e;
1798 hash = bucket_hash(h, key);
1799 idx = bucket_scan(h, hash, key);
1803 e = ordered_bucket_at(h, idx);
1804 if (e->iterate_next == IDX_NIL)
1806 return ordered_bucket_at(h, e->iterate_next)->p.value;
1809 int set_consume(Set *s, void *value) {
1812 r = set_put(s, value);
1819 int set_put_strdup(Set *s, const char *p) {
1830 r = set_consume(s, c);
1837 int set_put_strdupv(Set *s, char **l) {
1841 STRV_FOREACH(i, l) {
1842 r = set_put_strdup(s, *i);