1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
31 #include "siphash24.h"
34 #include "random-util.h"
36 #ifdef ENABLE_DEBUG_HASHMAP
41 * Implementation of hashmaps.
43 * - uses less RAM compared to closed addressing (chaining), because
44 * our entries are small (especially in Sets, which tend to contain
45 * the majority of entries in systemd).
46 * Collision resolution: Robin Hood
47 * - tends to equalize displacement of entries from their optimal buckets.
48 * Probe sequence: linear
49 * - though theoretically worse than random probing/uniform hashing/double
50 * hashing, it is good for cache locality.
53 * Celis, P. 1986. Robin Hood Hashing.
54 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
55 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 * - The results are derived for random probing. Suggests deletion with
57 * tombstones and two mean-centered search methods. None of that works
58 * well for linear probing.
60 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
61 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
62 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
63 * http://www.math.uu.se/~svante/papers/sj157.pdf
64 * - Applies to Robin Hood with linear probing. Contains remarks on
65 * the unsuitability of mean-centered search with linear probing.
67 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
68 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
69 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
70 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
71 * in a successful search), and Janson writes about displacement. C = d + 1.
73 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
74 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
75 * - Explanation of backward shift deletion with pictures.
77 * Khuong, P. 2013. The Other Robin Hood Hashing.
78 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
79 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
83 * XXX Ideas for improvement:
84 * For unordered hashmaps, randomize iteration order, similarly to Perl:
85 * http://blog.booking.com/hardening-perls-hash-function.html
88 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
89 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
90 #define INV_KEEP_FREE 5U
92 /* Fields common to entries of all hashmap/set types */
93 struct hashmap_base_entry {
97 /* Entry types for specific hashmap/set types
98 * hashmap_base_entry must be at the beginning of each entry struct. */
100 struct plain_hashmap_entry {
101 struct hashmap_base_entry b;
105 struct ordered_hashmap_entry {
106 struct plain_hashmap_entry p;
107 unsigned iterate_next, iterate_previous;
111 struct hashmap_base_entry b;
114 /* In several functions it is advantageous to have the hash table extended
115 * virtually by a couple of additional buckets. We reserve special index values
116 * for these "swap" buckets. */
117 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
118 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
119 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
120 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
122 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
123 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
125 assert_cc(IDX_FIRST == _IDX_SWAP_END);
126 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
128 /* Storage space for the "swap" buckets.
129 * All entry types can fit into a ordered_hashmap_entry. */
130 struct swap_entries {
131 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
134 /* Distance from Initial Bucket */
135 typedef uint8_t dib_raw_t;
136 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
137 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
138 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
139 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
141 #define DIB_FREE UINT_MAX
143 #ifdef ENABLE_DEBUG_HASHMAP
144 struct hashmap_debug_info {
145 LIST_FIELDS(struct hashmap_debug_info, debug_list);
146 unsigned max_entries; /* high watermark of n_entries */
148 /* who allocated this hashmap */
153 /* fields to detect modification while iterating */
154 unsigned put_count; /* counts puts into the hashmap */
155 unsigned rem_count; /* counts removals from hashmap */
156 unsigned last_rem_idx; /* remembers last removal index */
159 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
160 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
161 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
163 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
165 #else /* !ENABLE_DEBUG_HASHMAP */
166 #define HASHMAP_DEBUG_FIELDS
167 #endif /* ENABLE_DEBUG_HASHMAP */
171 HASHMAP_TYPE_ORDERED,
176 struct _packed_ indirect_storage {
177 char *storage; /* where buckets and DIBs are stored */
178 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
180 unsigned n_entries; /* number of stored entries */
181 unsigned n_buckets; /* number of buckets */
183 unsigned idx_lowest_entry; /* Index below which all buckets are free.
184 Makes "while(hashmap_steal_first())" loops
185 O(n) instead of O(n^2) for unordered hashmaps. */
186 uint8_t _pad[3]; /* padding for the whole HashmapBase */
187 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
190 struct direct_storage {
191 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
192 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
193 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
194 char storage[sizeof(struct indirect_storage)];
197 #define DIRECT_BUCKETS(entry_t) \
198 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
200 /* We should be able to store at least one entry directly. */
201 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
203 /* We have 3 bits for n_direct_entries. */
204 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
206 /* Hashmaps with directly stored entries all use this shared hash key.
207 * It's no big deal if the key is guessed, because there can be only
208 * a handful of directly stored entries in a hashmap. When a hashmap
209 * outgrows direct storage, it gets its own key for indirect storage. */
210 static uint8_t shared_hash_key[HASH_KEY_SIZE];
211 static bool shared_hash_key_initialized;
213 /* Fields that all hashmap/set types must have */
215 const struct hash_ops *hash_ops; /* hash and compare ops to use */
218 struct indirect_storage indirect; /* if has_indirect */
219 struct direct_storage direct; /* if !has_indirect */
222 enum HashmapType type:2; /* HASHMAP_TYPE_* */
223 bool has_indirect:1; /* whether indirect storage is used */
224 unsigned n_direct_entries:3; /* Number of entries in direct storage.
225 * Only valid if !has_indirect. */
226 bool from_pool:1; /* whether was allocated from mempool */
227 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
230 /* Specific hash types
231 * HashmapBase must be at the beginning of each hashmap struct. */
234 struct HashmapBase b;
237 struct OrderedHashmap {
238 struct HashmapBase b;
239 unsigned iterate_list_head, iterate_list_tail;
243 struct HashmapBase b;
246 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248 /* No need for a separate Set pool */
249 assert_cc(sizeof(Hashmap) == sizeof(Set));
251 struct hashmap_type_info {
254 struct mempool *mempool;
255 unsigned n_direct_buckets;
258 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 [HASHMAP_TYPE_PLAIN] = {
260 .head_size = sizeof(Hashmap),
261 .entry_size = sizeof(struct plain_hashmap_entry),
262 .mempool = &hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
265 [HASHMAP_TYPE_ORDERED] = {
266 .head_size = sizeof(OrderedHashmap),
267 .entry_size = sizeof(struct ordered_hashmap_entry),
268 .mempool = &ordered_hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
271 [HASHMAP_TYPE_SET] = {
272 .head_size = sizeof(Set),
273 .entry_size = sizeof(struct set_entry),
274 .mempool = &hashmap_pool,
275 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
279 void string_hash_func(const void *p, struct siphash *state) {
280 siphash24_compress(p, strlen(p) + 1, state);
283 int string_compare_func(const void *a, const void *b) {
287 const struct hash_ops string_hash_ops = {
288 .hash = string_hash_func,
289 .compare = string_compare_func
292 void trivial_hash_func(const void *p, struct siphash *state) {
293 siphash24_compress(&p, sizeof(p), state);
296 int trivial_compare_func(const void *a, const void *b) {
297 return a < b ? -1 : (a > b ? 1 : 0);
300 const struct hash_ops trivial_hash_ops = {
301 .hash = trivial_hash_func,
302 .compare = trivial_compare_func
305 void uint64_hash_func(const void *p, struct siphash *state) {
306 siphash24_compress(p, sizeof(uint64_t), state);
309 int uint64_compare_func(const void *_a, const void *_b) {
311 a = *(const uint64_t*) _a;
312 b = *(const uint64_t*) _b;
313 return a < b ? -1 : (a > b ? 1 : 0);
316 const struct hash_ops uint64_hash_ops = {
317 .hash = uint64_hash_func,
318 .compare = uint64_compare_func
321 #if SIZEOF_DEV_T != 8
322 void devt_hash_func(const void *p, struct siphash *state) {
323 siphash24_compress(p, sizeof(dev_t), state);
326 int devt_compare_func(const void *_a, const void *_b) {
328 a = *(const dev_t*) _a;
329 b = *(const dev_t*) _b;
330 return a < b ? -1 : (a > b ? 1 : 0);
333 const struct hash_ops devt_hash_ops = {
334 .hash = devt_hash_func,
335 .compare = devt_compare_func
339 static unsigned n_buckets(HashmapBase *h) {
340 return h->has_indirect ? h->indirect.n_buckets
341 : hashmap_type_info[h->type].n_direct_buckets;
344 static unsigned n_entries(HashmapBase *h) {
345 return h->has_indirect ? h->indirect.n_entries
346 : h->n_direct_entries;
349 static void n_entries_inc(HashmapBase *h) {
351 h->indirect.n_entries++;
353 h->n_direct_entries++;
356 static void n_entries_dec(HashmapBase *h) {
358 h->indirect.n_entries--;
360 h->n_direct_entries--;
363 static char *storage_ptr(HashmapBase *h) {
364 return h->has_indirect ? h->indirect.storage
368 static uint8_t *hash_key(HashmapBase *h) {
369 return h->has_indirect ? h->indirect.hash_key
373 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
374 struct siphash state;
377 siphash24_init(&state, hash_key(h));
379 h->hash_ops->hash(p, &state);
381 siphash24_finalize((uint8_t*)&hash, &state);
383 return (unsigned) (hash % n_buckets(h));
385 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
387 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
388 static uint8_t current[HASH_KEY_SIZE];
389 static bool current_initialized = false;
391 /* Returns a hash function key to use. In order to keep things
392 * fast we will not generate a new key each time we allocate a
393 * new hash table. Instead, we'll just reuse the most recently
394 * generated one, except if we never generated one or when we
395 * are rehashing an entire hash table because we reached a
398 if (!current_initialized || !reuse_is_ok) {
399 random_bytes(current, sizeof(current));
400 current_initialized = true;
403 memcpy(hash_key, current, sizeof(current));
406 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
407 return (struct hashmap_base_entry*)
408 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
411 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
412 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
415 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
416 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
419 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
420 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
423 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
424 return &swap->e[idx - _IDX_SWAP_BEGIN];
427 /* Returns a pointer to the bucket at index idx.
428 * Understands real indexes and swap indexes, hence "_virtual". */
429 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
431 if (idx < _IDX_SWAP_BEGIN)
432 return bucket_at(h, idx);
434 if (idx < _IDX_SWAP_END)
435 return &bucket_at_swap(swap, idx)->p.b;
437 assert_not_reached("Invalid index");
440 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
442 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
445 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
446 return idx >= from ? idx - from
447 : n_buckets(h) + idx - from;
450 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
451 unsigned initial_bucket;
453 if (raw_dib == DIB_RAW_FREE)
456 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
460 * Having an overflow DIB value is very unlikely. The hash function
461 * would have to be bad. For example, in a table of size 2^24 filled
462 * to load factor 0.9 the maximum observed DIB is only about 60.
463 * In theory (assuming I used Maxima correctly), for an infinite size
464 * hash table with load factor 0.8 the probability of a given entry
465 * having DIB > 40 is 1.9e-8.
466 * This returns the correct DIB value by recomputing the hash value in
467 * the unlikely case. XXX Hitting this case could be a hint to rehash.
469 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
470 return bucket_distance(h, idx, initial_bucket);
473 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
474 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
477 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
480 dibs = dib_raw_ptr(h);
482 for ( ; idx < n_buckets(h); idx++)
483 if (dibs[idx] != DIB_RAW_FREE)
489 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
490 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
491 bucket_set_dib(h, idx, DIB_FREE);
494 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
495 unsigned from, unsigned to) {
496 struct hashmap_base_entry *e_from, *e_to;
500 e_from = bucket_at_virtual(h, swap, from);
501 e_to = bucket_at_virtual(h, swap, to);
503 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
505 if (h->type == HASHMAP_TYPE_ORDERED) {
506 OrderedHashmap *lh = (OrderedHashmap*) h;
507 struct ordered_hashmap_entry *le, *le_to;
509 le_to = (struct ordered_hashmap_entry*) e_to;
511 if (le_to->iterate_next != IDX_NIL) {
512 le = (struct ordered_hashmap_entry*)
513 bucket_at_virtual(h, swap, le_to->iterate_next);
514 le->iterate_previous = to;
517 if (le_to->iterate_previous != IDX_NIL) {
518 le = (struct ordered_hashmap_entry*)
519 bucket_at_virtual(h, swap, le_to->iterate_previous);
520 le->iterate_next = to;
523 if (lh->iterate_list_head == from)
524 lh->iterate_list_head = to;
525 if (lh->iterate_list_tail == from)
526 lh->iterate_list_tail = to;
530 static unsigned next_idx(HashmapBase *h, unsigned idx) {
531 return (idx + 1U) % n_buckets(h);
534 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
535 return (n_buckets(h) + idx - 1U) % n_buckets(h);
538 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
541 case HASHMAP_TYPE_PLAIN:
542 case HASHMAP_TYPE_ORDERED:
543 return ((struct plain_hashmap_entry*)e)->value;
545 case HASHMAP_TYPE_SET:
546 return (void*) e->key;
549 assert_not_reached("Unknown hashmap type");
553 static void base_remove_entry(HashmapBase *h, unsigned idx) {
554 unsigned left, right, prev, dib;
555 dib_raw_t raw_dib, *dibs;
557 dibs = dib_raw_ptr(h);
558 assert(dibs[idx] != DIB_RAW_FREE);
560 #ifdef ENABLE_DEBUG_HASHMAP
561 h->debug.rem_count++;
562 h->debug.last_rem_idx = idx;
566 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
567 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
568 raw_dib = dibs[right];
569 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
572 /* The buckets are not supposed to be all occupied and with DIB > 0.
573 * That would mean we could make everyone better off by shifting them
574 * backward. This scenario is impossible. */
575 assert(left != right);
578 if (h->type == HASHMAP_TYPE_ORDERED) {
579 OrderedHashmap *lh = (OrderedHashmap*) h;
580 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
582 if (le->iterate_next != IDX_NIL)
583 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
585 lh->iterate_list_tail = le->iterate_previous;
587 if (le->iterate_previous != IDX_NIL)
588 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
590 lh->iterate_list_head = le->iterate_next;
593 /* Now shift all buckets in the interval (left, right) one step backwards */
594 for (prev = left, left = next_idx(h, left); left != right;
595 prev = left, left = next_idx(h, left)) {
596 dib = bucket_calculate_dib(h, left, dibs[left]);
598 bucket_move_entry(h, NULL, left, prev);
599 bucket_set_dib(h, prev, dib - 1);
602 bucket_mark_free(h, prev);
605 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
607 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
608 struct ordered_hashmap_entry *e;
614 if (i->idx == IDX_NIL)
617 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
620 if (i->idx == IDX_FIRST) {
621 idx = h->iterate_list_head;
622 e = ordered_bucket_at(h, idx);
625 e = ordered_bucket_at(h, idx);
627 * We allow removing the current entry while iterating, but removal may cause
628 * a backward shift. The next entry may thus move one bucket to the left.
629 * To detect when it happens, we remember the key pointer of the entry we were
630 * going to iterate next. If it does not match, there was a backward shift.
632 if (e->p.b.key != i->next_key) {
633 idx = prev_idx(HASHMAP_BASE(h), idx);
634 e = ordered_bucket_at(h, idx);
636 assert(e->p.b.key == i->next_key);
639 #ifdef ENABLE_DEBUG_HASHMAP
643 if (e->iterate_next != IDX_NIL) {
644 struct ordered_hashmap_entry *n;
645 i->idx = e->iterate_next;
646 n = ordered_bucket_at(h, i->idx);
647 i->next_key = n->p.b.key;
658 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
664 if (i->idx == IDX_NIL)
667 if (i->idx == IDX_FIRST) {
668 /* fast forward to the first occupied bucket */
669 if (h->has_indirect) {
670 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
671 h->indirect.idx_lowest_entry = i->idx;
673 i->idx = skip_free_buckets(h, 0);
675 if (i->idx == IDX_NIL)
678 struct hashmap_base_entry *e;
682 e = bucket_at(h, i->idx);
684 * We allow removing the current entry while iterating, but removal may cause
685 * a backward shift. The next entry may thus move one bucket to the left.
686 * To detect when it happens, we remember the key pointer of the entry we were
687 * going to iterate next. If it does not match, there was a backward shift.
689 if (e->key != i->next_key)
690 e = bucket_at(h, --i->idx);
692 assert(e->key == i->next_key);
696 #ifdef ENABLE_DEBUG_HASHMAP
700 i->idx = skip_free_buckets(h, i->idx + 1);
701 if (i->idx != IDX_NIL)
702 i->next_key = bucket_at(h, i->idx)->key;
713 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
719 #ifdef ENABLE_DEBUG_HASHMAP
720 if (i->idx == IDX_FIRST) {
721 i->put_count = h->debug.put_count;
722 i->rem_count = h->debug.rem_count;
724 /* While iterating, must not add any new entries */
725 assert(i->put_count == h->debug.put_count);
726 /* ... or remove entries other than the current one */
727 assert(i->rem_count == h->debug.rem_count ||
728 (i->rem_count == h->debug.rem_count - 1 &&
729 i->prev_idx == h->debug.last_rem_idx));
730 /* Reset our removals counter */
731 i->rem_count = h->debug.rem_count;
735 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
736 : hashmap_iterate_in_internal_order(h, i);
739 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
740 struct hashmap_base_entry *e;
744 idx = hashmap_iterate_entry(h, i);
745 if (idx == IDX_NIL) {
754 e = bucket_at(h, idx);
755 data = entry_value(h, e);
764 bool set_iterate(Set *s, Iterator *i, void **value) {
765 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
768 #define HASHMAP_FOREACH_IDX(idx, h, i) \
769 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
771 (idx) = hashmap_iterate_entry((h), &(i)))
773 static void reset_direct_storage(HashmapBase *h) {
774 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
777 assert(!h->has_indirect);
779 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
780 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
783 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
785 const struct hashmap_type_info *hi = &hashmap_type_info[type];
788 use_pool = is_main_thread();
790 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
796 h->from_pool = use_pool;
797 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
799 if (type == HASHMAP_TYPE_ORDERED) {
800 OrderedHashmap *lh = (OrderedHashmap*)h;
801 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
804 reset_direct_storage(h);
806 if (!shared_hash_key_initialized) {
807 random_bytes(shared_hash_key, sizeof(shared_hash_key));
808 shared_hash_key_initialized= true;
811 #ifdef ENABLE_DEBUG_HASHMAP
812 h->debug.func = func;
813 h->debug.file = file;
814 h->debug.line = line;
815 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
816 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
817 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
823 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
824 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
827 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
828 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
831 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
832 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
835 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
836 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
844 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
852 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
853 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
856 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
857 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
860 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
861 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
864 static void hashmap_free_no_clear(HashmapBase *h) {
865 assert(!h->has_indirect);
866 assert(!h->n_direct_entries);
868 #ifdef ENABLE_DEBUG_HASHMAP
869 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
870 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
871 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
875 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
880 HashmapBase *internal_hashmap_free(HashmapBase *h) {
882 /* Free the hashmap, but nothing in it */
885 internal_hashmap_clear(h);
886 hashmap_free_no_clear(h);
892 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
894 /* Free the hashmap and all data objects in it, but not the
898 internal_hashmap_clear_free(h);
899 hashmap_free_no_clear(h);
905 Hashmap *hashmap_free_free_free(Hashmap *h) {
907 /* Free the hashmap and all data and key objects in it */
910 hashmap_clear_free_free(h);
911 hashmap_free_no_clear(HASHMAP_BASE(h));
917 void internal_hashmap_clear(HashmapBase *h) {
921 if (h->has_indirect) {
922 free(h->indirect.storage);
923 h->has_indirect = false;
926 h->n_direct_entries = 0;
927 reset_direct_storage(h);
929 if (h->type == HASHMAP_TYPE_ORDERED) {
930 OrderedHashmap *lh = (OrderedHashmap*) h;
931 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
935 void internal_hashmap_clear_free(HashmapBase *h) {
941 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
942 idx = skip_free_buckets(h, idx + 1))
943 free(entry_value(h, bucket_at(h, idx)));
945 internal_hashmap_clear(h);
948 void hashmap_clear_free_free(Hashmap *h) {
954 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
955 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
956 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
957 free((void*)e->b.key);
961 internal_hashmap_clear(HASHMAP_BASE(h));
964 static int resize_buckets(HashmapBase *h, unsigned entries_add);
967 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
968 * Performs Robin Hood swaps as it goes. The entry to put must be placed
969 * by the caller into swap slot IDX_PUT.
970 * If used for in-place resizing, may leave a displaced entry in swap slot
971 * IDX_PUT. Caller must rehash it next.
972 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
975 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
976 struct swap_entries *swap) {
977 dib_raw_t raw_dib, *dibs;
978 unsigned dib, distance;
980 #ifdef ENABLE_DEBUG_HASHMAP
981 h->debug.put_count++;
984 dibs = dib_raw_ptr(h);
986 for (distance = 0; ; distance++) {
988 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
989 if (raw_dib == DIB_RAW_REHASH)
990 bucket_move_entry(h, swap, idx, IDX_TMP);
992 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
993 h->indirect.idx_lowest_entry = idx;
995 bucket_set_dib(h, idx, distance);
996 bucket_move_entry(h, swap, IDX_PUT, idx);
997 if (raw_dib == DIB_RAW_REHASH) {
998 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1005 dib = bucket_calculate_dib(h, idx, raw_dib);
1007 if (dib < distance) {
1008 /* Found a wealthier entry. Go Robin Hood! */
1009 bucket_set_dib(h, idx, distance);
1011 /* swap the entries */
1012 bucket_move_entry(h, swap, idx, IDX_TMP);
1013 bucket_move_entry(h, swap, IDX_PUT, idx);
1014 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1019 idx = next_idx(h, idx);
1024 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1025 * The caller must place the entry (only its key and value, not link indexes)
1026 * in swap slot IDX_PUT.
1027 * Caller must ensure: the key does not exist yet in the hashmap.
1028 * that resize is not needed if !may_resize.
1029 * Returns: 1 if entry was put successfully.
1030 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1031 * Cannot return -ENOMEM if !may_resize.
1033 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1034 struct swap_entries *swap, bool may_resize) {
1035 struct ordered_hashmap_entry *new_entry;
1038 assert(idx < n_buckets(h));
1040 new_entry = bucket_at_swap(swap, IDX_PUT);
1043 r = resize_buckets(h, 1);
1047 idx = bucket_hash(h, new_entry->p.b.key);
1049 assert(n_entries(h) < n_buckets(h));
1051 if (h->type == HASHMAP_TYPE_ORDERED) {
1052 OrderedHashmap *lh = (OrderedHashmap*) h;
1054 new_entry->iterate_next = IDX_NIL;
1055 new_entry->iterate_previous = lh->iterate_list_tail;
1057 if (lh->iterate_list_tail != IDX_NIL) {
1058 struct ordered_hashmap_entry *old_tail;
1060 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1061 assert(old_tail->iterate_next == IDX_NIL);
1062 old_tail->iterate_next = IDX_PUT;
1065 lh->iterate_list_tail = IDX_PUT;
1066 if (lh->iterate_list_head == IDX_NIL)
1067 lh->iterate_list_head = IDX_PUT;
1070 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1073 #ifdef ENABLE_DEBUG_HASHMAP
1074 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1079 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1080 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1083 * Returns 0 if resize is not needed.
1084 * 1 if successfully resized.
1085 * -ENOMEM on allocation failure.
1087 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1088 struct swap_entries swap;
1090 dib_raw_t *old_dibs, *new_dibs;
1091 const struct hashmap_type_info *hi;
1092 unsigned idx, optimal_idx;
1093 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1099 hi = &hashmap_type_info[h->type];
1100 new_n_entries = n_entries(h) + entries_add;
1103 if (_unlikely_(new_n_entries < entries_add))
1106 /* For direct storage we allow 100% load, because it's tiny. */
1107 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1111 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1112 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1114 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1116 if (_unlikely_(new_n_buckets < new_n_entries))
1119 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1122 old_n_buckets = n_buckets(h);
1124 if (_likely_(new_n_buckets <= old_n_buckets))
1127 new_shift = log2u_round_up(MAX(
1128 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1129 2 * sizeof(struct direct_storage)));
1131 /* Realloc storage (buckets and DIB array). */
1132 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1137 /* Must upgrade direct to indirect storage. */
1138 if (!h->has_indirect) {
1139 memcpy(new_storage, h->direct.storage,
1140 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1141 h->indirect.n_entries = h->n_direct_entries;
1142 h->indirect.idx_lowest_entry = 0;
1143 h->n_direct_entries = 0;
1146 /* Get a new hash key. If we've just upgraded to indirect storage,
1147 * allow reusing a previously generated key. It's still a different key
1148 * from the shared one that we used for direct storage. */
1149 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1151 h->has_indirect = true;
1152 h->indirect.storage = new_storage;
1153 h->indirect.n_buckets = (1U << new_shift) /
1154 (hi->entry_size + sizeof(dib_raw_t));
1156 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1157 new_dibs = dib_raw_ptr(h);
1160 * Move the DIB array to the new place, replacing valid DIB values with
1161 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1162 * Note: Overlap is not possible, because we have at least doubled the
1163 * number of buckets and dib_raw_t is smaller than any entry type.
1165 for (idx = 0; idx < old_n_buckets; idx++) {
1166 assert(old_dibs[idx] != DIB_RAW_REHASH);
1167 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1171 /* Zero the area of newly added entries (including the old DIB area) */
1172 memzero(bucket_at(h, old_n_buckets),
1173 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1175 /* The upper half of the new DIB array needs initialization */
1176 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1177 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1179 /* Rehash entries that need it */
1181 for (idx = 0; idx < old_n_buckets; idx++) {
1182 if (new_dibs[idx] != DIB_RAW_REHASH)
1185 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1188 * Not much to do if by luck the entry hashes to its current
1189 * location. Just set its DIB.
1191 if (optimal_idx == idx) {
1197 new_dibs[idx] = DIB_RAW_FREE;
1198 bucket_move_entry(h, &swap, idx, IDX_PUT);
1199 /* bucket_move_entry does not clear the source */
1200 memzero(bucket_at(h, idx), hi->entry_size);
1204 * Find the new bucket for the current entry. This may make
1205 * another entry homeless and load it into IDX_PUT.
1207 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1210 /* Did the current entry displace another one? */
1212 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1213 } while (rehash_next);
1216 assert(n_rehashed == n_entries(h));
1222 * Finds an entry with a matching key
1223 * Returns: index of the found entry, or IDX_NIL if not found.
1225 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1226 struct hashmap_base_entry *e;
1227 unsigned dib, distance;
1228 dib_raw_t *dibs = dib_raw_ptr(h);
1230 assert(idx < n_buckets(h));
1232 for (distance = 0; ; distance++) {
1233 if (dibs[idx] == DIB_RAW_FREE)
1236 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1240 if (dib == distance) {
1241 e = bucket_at(h, idx);
1242 if (h->hash_ops->compare(e->key, key) == 0)
1246 idx = next_idx(h, idx);
1249 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1251 int hashmap_put(Hashmap *h, const void *key, void *value) {
1252 struct swap_entries swap;
1253 struct plain_hashmap_entry *e;
1258 hash = bucket_hash(h, key);
1259 idx = bucket_scan(h, hash, key);
1260 if (idx != IDX_NIL) {
1261 e = plain_bucket_at(h, idx);
1262 if (e->value == value)
1267 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1270 return hashmap_put_boldly(h, hash, &swap, true);
1273 int set_put(Set *s, const void *key) {
1274 struct swap_entries swap;
1275 struct hashmap_base_entry *e;
1280 hash = bucket_hash(s, key);
1281 idx = bucket_scan(s, hash, key);
1285 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1287 return hashmap_put_boldly(s, hash, &swap, true);
1290 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1291 struct swap_entries swap;
1292 struct plain_hashmap_entry *e;
1297 hash = bucket_hash(h, key);
1298 idx = bucket_scan(h, hash, key);
1299 if (idx != IDX_NIL) {
1300 e = plain_bucket_at(h, idx);
1301 #ifdef ENABLE_DEBUG_HASHMAP
1302 /* Although the key is equal, the key pointer may have changed,
1303 * and this would break our assumption for iterating. So count
1304 * this operation as incompatible with iteration. */
1305 if (e->b.key != key) {
1306 h->b.debug.put_count++;
1307 h->b.debug.rem_count++;
1308 h->b.debug.last_rem_idx = idx;
1316 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1319 return hashmap_put_boldly(h, hash, &swap, true);
1322 int hashmap_update(Hashmap *h, const void *key, void *value) {
1323 struct plain_hashmap_entry *e;
1328 hash = bucket_hash(h, key);
1329 idx = bucket_scan(h, hash, key);
1333 e = plain_bucket_at(h, idx);
1338 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1339 struct hashmap_base_entry *e;
1345 hash = bucket_hash(h, key);
1346 idx = bucket_scan(h, hash, key);
1350 e = bucket_at(h, idx);
1351 return entry_value(h, e);
1354 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1355 struct plain_hashmap_entry *e;
1361 hash = bucket_hash(h, key);
1362 idx = bucket_scan(h, hash, key);
1366 e = plain_bucket_at(h, idx);
1368 *key2 = (void*) e->b.key;
1373 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1379 hash = bucket_hash(h, key);
1380 return bucket_scan(h, hash, key) != IDX_NIL;
1383 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1384 struct hashmap_base_entry *e;
1391 hash = bucket_hash(h, key);
1392 idx = bucket_scan(h, hash, key);
1396 e = bucket_at(h, idx);
1397 data = entry_value(h, e);
1398 remove_entry(h, idx);
1403 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1404 struct plain_hashmap_entry *e;
1414 hash = bucket_hash(h, key);
1415 idx = bucket_scan(h, hash, key);
1416 if (idx == IDX_NIL) {
1422 e = plain_bucket_at(h, idx);
1425 *rkey = (void*) e->b.key;
1427 remove_entry(h, idx);
1432 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1433 struct swap_entries swap;
1434 struct plain_hashmap_entry *e;
1435 unsigned old_hash, new_hash, idx;
1440 old_hash = bucket_hash(h, old_key);
1441 idx = bucket_scan(h, old_hash, old_key);
1445 new_hash = bucket_hash(h, new_key);
1446 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1449 remove_entry(h, idx);
1451 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1454 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1459 /// UNNEEDED by elogind
1461 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1462 struct swap_entries swap;
1463 struct hashmap_base_entry *e;
1464 unsigned old_hash, new_hash, idx;
1469 old_hash = bucket_hash(s, old_key);
1470 idx = bucket_scan(s, old_hash, old_key);
1474 new_hash = bucket_hash(s, new_key);
1475 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1478 remove_entry(s, idx);
1480 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1482 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1488 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1489 struct swap_entries swap;
1490 struct plain_hashmap_entry *e;
1491 unsigned old_hash, new_hash, idx_old, idx_new;
1496 old_hash = bucket_hash(h, old_key);
1497 idx_old = bucket_scan(h, old_hash, old_key);
1498 if (idx_old == IDX_NIL)
1501 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1503 new_hash = bucket_hash(h, new_key);
1504 idx_new = bucket_scan(h, new_hash, new_key);
1505 if (idx_new != IDX_NIL)
1506 if (idx_old != idx_new) {
1507 remove_entry(h, idx_new);
1508 /* Compensate for a possible backward shift. */
1509 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1510 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1511 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1514 remove_entry(h, idx_old);
1516 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1519 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1524 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1525 struct plain_hashmap_entry *e;
1531 hash = bucket_hash(h, key);
1532 idx = bucket_scan(h, hash, key);
1536 e = plain_bucket_at(h, idx);
1537 if (e->value != value)
1540 remove_entry(h, idx);
1545 static unsigned find_first_entry(HashmapBase *h) {
1546 Iterator i = ITERATOR_FIRST;
1548 if (!h || !n_entries(h))
1551 return hashmap_iterate_entry(h, &i);
1554 void *internal_hashmap_first(HashmapBase *h) {
1557 idx = find_first_entry(h);
1561 return entry_value(h, bucket_at(h, idx));
1564 void *internal_hashmap_first_key(HashmapBase *h) {
1565 struct hashmap_base_entry *e;
1568 idx = find_first_entry(h);
1572 e = bucket_at(h, idx);
1573 return (void*) e->key;
1576 void *internal_hashmap_steal_first(HashmapBase *h) {
1577 struct hashmap_base_entry *e;
1581 idx = find_first_entry(h);
1585 e = bucket_at(h, idx);
1586 data = entry_value(h, e);
1587 remove_entry(h, idx);
1592 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1593 struct hashmap_base_entry *e;
1597 idx = find_first_entry(h);
1601 e = bucket_at(h, idx);
1602 key = (void*) e->key;
1603 remove_entry(h, idx);
1608 unsigned internal_hashmap_size(HashmapBase *h) {
1613 return n_entries(h);
1616 unsigned internal_hashmap_buckets(HashmapBase *h) {
1621 return n_buckets(h);
1624 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1630 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1631 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1634 r = hashmap_put(h, pe->b.key, pe->value);
1635 if (r < 0 && r != -EEXIST)
1642 int set_merge(Set *s, Set *other) {
1648 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1649 struct set_entry *se = set_bucket_at(other, idx);
1652 r = set_put(s, se->b.key);
1660 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1665 r = resize_buckets(h, entries_add);
1673 * The same as hashmap_merge(), but every new item from other is moved to h.
1674 * Keys already in h are skipped and stay in other.
1675 * Returns: 0 on success.
1676 * -ENOMEM on alloc failure, in which case no move has been done.
1678 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1679 struct swap_entries swap;
1680 struct hashmap_base_entry *e, *n;
1690 assert(other->type == h->type);
1693 * This reserves buckets for the worst case, where none of other's
1694 * entries are yet present in h. This is preferable to risking
1695 * an allocation failure in the middle of the moving and having to
1696 * rollback or return a partial result.
1698 r = resize_buckets(h, n_entries(other));
1702 HASHMAP_FOREACH_IDX(idx, other, i) {
1705 e = bucket_at(other, idx);
1706 h_hash = bucket_hash(h, e->key);
1707 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1710 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1712 if (h->type != HASHMAP_TYPE_SET)
1713 ((struct plain_hashmap_entry*) n)->value =
1714 ((struct plain_hashmap_entry*) e)->value;
1715 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1717 remove_entry(other, idx);
1723 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1724 struct swap_entries swap;
1725 unsigned h_hash, other_hash, idx;
1726 struct hashmap_base_entry *e, *n;
1731 h_hash = bucket_hash(h, key);
1732 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1738 assert(other->type == h->type);
1740 other_hash = bucket_hash(other, key);
1741 idx = bucket_scan(other, other_hash, key);
1745 e = bucket_at(other, idx);
1747 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1749 if (h->type != HASHMAP_TYPE_SET)
1750 ((struct plain_hashmap_entry*) n)->value =
1751 ((struct plain_hashmap_entry*) e)->value;
1752 r = hashmap_put_boldly(h, h_hash, &swap, true);
1756 remove_entry(other, idx);
1760 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1766 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1771 case HASHMAP_TYPE_PLAIN:
1772 case HASHMAP_TYPE_ORDERED:
1773 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1775 case HASHMAP_TYPE_SET:
1776 r = set_merge((Set*)copy, (Set*)h);
1779 assert_not_reached("Unknown hashmap type");
1783 internal_hashmap_free(copy);
1790 char **internal_hashmap_get_strv(HashmapBase *h) {
1795 sv = new(char*, n_entries(h)+1);
1800 HASHMAP_FOREACH_IDX(idx, h, i)
1801 sv[n++] = entry_value(h, bucket_at(h, idx));
1807 /// UNNEEDED by elogind
1809 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1810 struct ordered_hashmap_entry *e;
1816 hash = bucket_hash(h, key);
1817 idx = bucket_scan(h, hash, key);
1821 e = ordered_bucket_at(h, idx);
1822 if (e->iterate_next == IDX_NIL)
1824 return ordered_bucket_at(h, e->iterate_next)->p.value;
1828 int set_consume(Set *s, void *value) {
1831 r = set_put(s, value);
1838 int set_put_strdup(Set *s, const char *p) {
1849 r = set_consume(s, c);
1856 /// UNNEEDED by elogind
1858 int set_put_strdupv(Set *s, char **l) {
1862 STRV_FOREACH(i, l) {
1863 r = set_put_strdup(s, *i);