1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
28 #include "alloc-util.h"
32 #include "process-util.h"
33 #include "random-util.h"
35 #include "siphash24.h"
39 #ifdef ENABLE_DEBUG_HASHMAP
45 * Implementation of hashmaps.
47 * - uses less RAM compared to closed addressing (chaining), because
48 * our entries are small (especially in Sets, which tend to contain
49 * the majority of entries in systemd).
50 * Collision resolution: Robin Hood
51 * - tends to equalize displacement of entries from their optimal buckets.
52 * Probe sequence: linear
53 * - though theoretically worse than random probing/uniform hashing/double
54 * hashing, it is good for cache locality.
57 * Celis, P. 1986. Robin Hood Hashing.
58 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
59 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60 * - The results are derived for random probing. Suggests deletion with
61 * tombstones and two mean-centered search methods. None of that works
62 * well for linear probing.
64 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
65 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
66 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
67 * http://www.math.uu.se/~svante/papers/sj157.pdf
68 * - Applies to Robin Hood with linear probing. Contains remarks on
69 * the unsuitability of mean-centered search with linear probing.
71 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
72 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
73 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
74 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
75 * in a successful search), and Janson writes about displacement. C = d + 1.
77 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
78 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
79 * - Explanation of backward shift deletion with pictures.
81 * Khuong, P. 2013. The Other Robin Hood Hashing.
82 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
83 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
87 * XXX Ideas for improvement:
88 * For unordered hashmaps, randomize iteration order, similarly to Perl:
89 * http://blog.booking.com/hardening-perls-hash-function.html
92 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
93 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
94 #define INV_KEEP_FREE 5U
96 /* Fields common to entries of all hashmap/set types */
97 struct hashmap_base_entry {
101 /* Entry types for specific hashmap/set types
102 * hashmap_base_entry must be at the beginning of each entry struct. */
104 struct plain_hashmap_entry {
105 struct hashmap_base_entry b;
109 struct ordered_hashmap_entry {
110 struct plain_hashmap_entry p;
111 unsigned iterate_next, iterate_previous;
115 struct hashmap_base_entry b;
118 /* In several functions it is advantageous to have the hash table extended
119 * virtually by a couple of additional buckets. We reserve special index values
120 * for these "swap" buckets. */
121 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
122 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
123 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
124 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
126 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
127 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
129 assert_cc(IDX_FIRST == _IDX_SWAP_END);
130 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
132 /* Storage space for the "swap" buckets.
133 * All entry types can fit into a ordered_hashmap_entry. */
134 struct swap_entries {
135 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
138 /* Distance from Initial Bucket */
139 typedef uint8_t dib_raw_t;
140 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
141 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
142 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
143 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
145 #define DIB_FREE UINT_MAX
147 #ifdef ENABLE_DEBUG_HASHMAP
148 struct hashmap_debug_info {
149 LIST_FIELDS(struct hashmap_debug_info, debug_list);
150 unsigned max_entries; /* high watermark of n_entries */
152 /* who allocated this hashmap */
157 /* fields to detect modification while iterating */
158 unsigned put_count; /* counts puts into the hashmap */
159 unsigned rem_count; /* counts removals from hashmap */
160 unsigned last_rem_idx; /* remembers last removal index */
163 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
164 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
165 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
167 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
169 #else /* !ENABLE_DEBUG_HASHMAP */
170 #define HASHMAP_DEBUG_FIELDS
171 #endif /* ENABLE_DEBUG_HASHMAP */
175 HASHMAP_TYPE_ORDERED,
180 struct _packed_ indirect_storage {
181 char *storage; /* where buckets and DIBs are stored */
182 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
184 unsigned n_entries; /* number of stored entries */
185 unsigned n_buckets; /* number of buckets */
187 unsigned idx_lowest_entry; /* Index below which all buckets are free.
188 Makes "while(hashmap_steal_first())" loops
189 O(n) instead of O(n^2) for unordered hashmaps. */
190 uint8_t _pad[3]; /* padding for the whole HashmapBase */
191 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
194 struct direct_storage {
195 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
196 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
197 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
198 char storage[sizeof(struct indirect_storage)];
201 #define DIRECT_BUCKETS(entry_t) \
202 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
204 /* We should be able to store at least one entry directly. */
205 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
207 /* We have 3 bits for n_direct_entries. */
208 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
210 /* Hashmaps with directly stored entries all use this shared hash key.
211 * It's no big deal if the key is guessed, because there can be only
212 * a handful of directly stored entries in a hashmap. When a hashmap
213 * outgrows direct storage, it gets its own key for indirect storage. */
214 static uint8_t shared_hash_key[HASH_KEY_SIZE];
215 static bool shared_hash_key_initialized;
217 /* Fields that all hashmap/set types must have */
219 const struct hash_ops *hash_ops; /* hash and compare ops to use */
222 struct indirect_storage indirect; /* if has_indirect */
223 struct direct_storage direct; /* if !has_indirect */
226 enum HashmapType type:2; /* HASHMAP_TYPE_* */
227 bool has_indirect:1; /* whether indirect storage is used */
228 unsigned n_direct_entries:3; /* Number of entries in direct storage.
229 * Only valid if !has_indirect. */
230 bool from_pool:1; /* whether was allocated from mempool */
231 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
234 /* Specific hash types
235 * HashmapBase must be at the beginning of each hashmap struct. */
238 struct HashmapBase b;
241 struct OrderedHashmap {
242 struct HashmapBase b;
243 unsigned iterate_list_head, iterate_list_tail;
247 struct HashmapBase b;
250 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
251 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
252 /* No need for a separate Set pool */
253 assert_cc(sizeof(Hashmap) == sizeof(Set));
255 struct hashmap_type_info {
258 struct mempool *mempool;
259 unsigned n_direct_buckets;
262 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
263 [HASHMAP_TYPE_PLAIN] = {
264 .head_size = sizeof(Hashmap),
265 .entry_size = sizeof(struct plain_hashmap_entry),
266 .mempool = &hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
269 [HASHMAP_TYPE_ORDERED] = {
270 .head_size = sizeof(OrderedHashmap),
271 .entry_size = sizeof(struct ordered_hashmap_entry),
272 .mempool = &ordered_hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
275 [HASHMAP_TYPE_SET] = {
276 .head_size = sizeof(Set),
277 .entry_size = sizeof(struct set_entry),
278 .mempool = &hashmap_pool,
279 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
283 void string_hash_func(const void *p, struct siphash *state) {
284 siphash24_compress(p, strlen(p) + 1, state);
287 int string_compare_func(const void *a, const void *b) {
291 const struct hash_ops string_hash_ops = {
292 .hash = string_hash_func,
293 .compare = string_compare_func
296 void trivial_hash_func(const void *p, struct siphash *state) {
297 siphash24_compress(&p, sizeof(p), state);
300 int trivial_compare_func(const void *a, const void *b) {
301 return a < b ? -1 : (a > b ? 1 : 0);
304 const struct hash_ops trivial_hash_ops = {
305 .hash = trivial_hash_func,
306 .compare = trivial_compare_func
309 void uint64_hash_func(const void *p, struct siphash *state) {
310 siphash24_compress(p, sizeof(uint64_t), state);
313 int uint64_compare_func(const void *_a, const void *_b) {
315 a = *(const uint64_t*) _a;
316 b = *(const uint64_t*) _b;
317 return a < b ? -1 : (a > b ? 1 : 0);
320 const struct hash_ops uint64_hash_ops = {
321 .hash = uint64_hash_func,
322 .compare = uint64_compare_func
325 #if SIZEOF_DEV_T != 8
326 void devt_hash_func(const void *p, struct siphash *state) {
327 siphash24_compress(p, sizeof(dev_t), state);
330 int devt_compare_func(const void *_a, const void *_b) {
332 a = *(const dev_t*) _a;
333 b = *(const dev_t*) _b;
334 return a < b ? -1 : (a > b ? 1 : 0);
337 const struct hash_ops devt_hash_ops = {
338 .hash = devt_hash_func,
339 .compare = devt_compare_func
343 static unsigned n_buckets(HashmapBase *h) {
344 return h->has_indirect ? h->indirect.n_buckets
345 : hashmap_type_info[h->type].n_direct_buckets;
348 static unsigned n_entries(HashmapBase *h) {
349 return h->has_indirect ? h->indirect.n_entries
350 : h->n_direct_entries;
353 static void n_entries_inc(HashmapBase *h) {
355 h->indirect.n_entries++;
357 h->n_direct_entries++;
360 static void n_entries_dec(HashmapBase *h) {
362 h->indirect.n_entries--;
364 h->n_direct_entries--;
367 static char *storage_ptr(HashmapBase *h) {
368 return h->has_indirect ? h->indirect.storage
372 static uint8_t *hash_key(HashmapBase *h) {
373 return h->has_indirect ? h->indirect.hash_key
377 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
378 struct siphash state;
381 siphash24_init(&state, hash_key(h));
383 h->hash_ops->hash(p, &state);
385 hash = siphash24_finalize(&state);
387 return (unsigned) (hash % n_buckets(h));
389 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
391 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
392 static uint8_t current[HASH_KEY_SIZE];
393 static bool current_initialized = false;
395 /* Returns a hash function key to use. In order to keep things
396 * fast we will not generate a new key each time we allocate a
397 * new hash table. Instead, we'll just reuse the most recently
398 * generated one, except if we never generated one or when we
399 * are rehashing an entire hash table because we reached a
402 if (!current_initialized || !reuse_is_ok) {
403 random_bytes(current, sizeof(current));
404 current_initialized = true;
407 memcpy(hash_key, current, sizeof(current));
410 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
411 return (struct hashmap_base_entry*)
412 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
415 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
416 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
419 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
420 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
423 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
424 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
427 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
428 return &swap->e[idx - _IDX_SWAP_BEGIN];
431 /* Returns a pointer to the bucket at index idx.
432 * Understands real indexes and swap indexes, hence "_virtual". */
433 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
435 if (idx < _IDX_SWAP_BEGIN)
436 return bucket_at(h, idx);
438 if (idx < _IDX_SWAP_END)
439 return &bucket_at_swap(swap, idx)->p.b;
441 assert_not_reached("Invalid index");
444 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
446 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
449 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
450 return idx >= from ? idx - from
451 : n_buckets(h) + idx - from;
454 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
455 unsigned initial_bucket;
457 if (raw_dib == DIB_RAW_FREE)
460 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
464 * Having an overflow DIB value is very unlikely. The hash function
465 * would have to be bad. For example, in a table of size 2^24 filled
466 * to load factor 0.9 the maximum observed DIB is only about 60.
467 * In theory (assuming I used Maxima correctly), for an infinite size
468 * hash table with load factor 0.8 the probability of a given entry
469 * having DIB > 40 is 1.9e-8.
470 * This returns the correct DIB value by recomputing the hash value in
471 * the unlikely case. XXX Hitting this case could be a hint to rehash.
473 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
474 return bucket_distance(h, idx, initial_bucket);
477 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
478 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
481 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
484 dibs = dib_raw_ptr(h);
486 for ( ; idx < n_buckets(h); idx++)
487 if (dibs[idx] != DIB_RAW_FREE)
493 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
494 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
495 bucket_set_dib(h, idx, DIB_FREE);
498 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
499 unsigned from, unsigned to) {
500 struct hashmap_base_entry *e_from, *e_to;
504 e_from = bucket_at_virtual(h, swap, from);
505 e_to = bucket_at_virtual(h, swap, to);
507 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
509 if (h->type == HASHMAP_TYPE_ORDERED) {
510 OrderedHashmap *lh = (OrderedHashmap*) h;
511 struct ordered_hashmap_entry *le, *le_to;
513 le_to = (struct ordered_hashmap_entry*) e_to;
515 if (le_to->iterate_next != IDX_NIL) {
516 le = (struct ordered_hashmap_entry*)
517 bucket_at_virtual(h, swap, le_to->iterate_next);
518 le->iterate_previous = to;
521 if (le_to->iterate_previous != IDX_NIL) {
522 le = (struct ordered_hashmap_entry*)
523 bucket_at_virtual(h, swap, le_to->iterate_previous);
524 le->iterate_next = to;
527 if (lh->iterate_list_head == from)
528 lh->iterate_list_head = to;
529 if (lh->iterate_list_tail == from)
530 lh->iterate_list_tail = to;
534 static unsigned next_idx(HashmapBase *h, unsigned idx) {
535 return (idx + 1U) % n_buckets(h);
538 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
539 return (n_buckets(h) + idx - 1U) % n_buckets(h);
542 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
545 case HASHMAP_TYPE_PLAIN:
546 case HASHMAP_TYPE_ORDERED:
547 return ((struct plain_hashmap_entry*)e)->value;
549 case HASHMAP_TYPE_SET:
550 return (void*) e->key;
553 assert_not_reached("Unknown hashmap type");
557 static void base_remove_entry(HashmapBase *h, unsigned idx) {
558 unsigned left, right, prev, dib;
559 dib_raw_t raw_dib, *dibs;
561 dibs = dib_raw_ptr(h);
562 assert(dibs[idx] != DIB_RAW_FREE);
564 #ifdef ENABLE_DEBUG_HASHMAP
565 h->debug.rem_count++;
566 h->debug.last_rem_idx = idx;
570 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
571 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
572 raw_dib = dibs[right];
573 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
576 /* The buckets are not supposed to be all occupied and with DIB > 0.
577 * That would mean we could make everyone better off by shifting them
578 * backward. This scenario is impossible. */
579 assert(left != right);
582 if (h->type == HASHMAP_TYPE_ORDERED) {
583 OrderedHashmap *lh = (OrderedHashmap*) h;
584 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
586 if (le->iterate_next != IDX_NIL)
587 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
589 lh->iterate_list_tail = le->iterate_previous;
591 if (le->iterate_previous != IDX_NIL)
592 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
594 lh->iterate_list_head = le->iterate_next;
597 /* Now shift all buckets in the interval (left, right) one step backwards */
598 for (prev = left, left = next_idx(h, left); left != right;
599 prev = left, left = next_idx(h, left)) {
600 dib = bucket_calculate_dib(h, left, dibs[left]);
602 bucket_move_entry(h, NULL, left, prev);
603 bucket_set_dib(h, prev, dib - 1);
606 bucket_mark_free(h, prev);
609 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
611 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
612 struct ordered_hashmap_entry *e;
618 if (i->idx == IDX_NIL)
621 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
624 if (i->idx == IDX_FIRST) {
625 idx = h->iterate_list_head;
626 e = ordered_bucket_at(h, idx);
629 e = ordered_bucket_at(h, idx);
631 * We allow removing the current entry while iterating, but removal may cause
632 * a backward shift. The next entry may thus move one bucket to the left.
633 * To detect when it happens, we remember the key pointer of the entry we were
634 * going to iterate next. If it does not match, there was a backward shift.
636 if (e->p.b.key != i->next_key) {
637 idx = prev_idx(HASHMAP_BASE(h), idx);
638 e = ordered_bucket_at(h, idx);
640 assert(e->p.b.key == i->next_key);
643 #ifdef ENABLE_DEBUG_HASHMAP
647 if (e->iterate_next != IDX_NIL) {
648 struct ordered_hashmap_entry *n;
649 i->idx = e->iterate_next;
650 n = ordered_bucket_at(h, i->idx);
651 i->next_key = n->p.b.key;
662 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
668 if (i->idx == IDX_NIL)
671 if (i->idx == IDX_FIRST) {
672 /* fast forward to the first occupied bucket */
673 if (h->has_indirect) {
674 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
675 h->indirect.idx_lowest_entry = i->idx;
677 i->idx = skip_free_buckets(h, 0);
679 if (i->idx == IDX_NIL)
682 struct hashmap_base_entry *e;
686 e = bucket_at(h, i->idx);
688 * We allow removing the current entry while iterating, but removal may cause
689 * a backward shift. The next entry may thus move one bucket to the left.
690 * To detect when it happens, we remember the key pointer of the entry we were
691 * going to iterate next. If it does not match, there was a backward shift.
693 if (e->key != i->next_key)
694 e = bucket_at(h, --i->idx);
696 assert(e->key == i->next_key);
700 #ifdef ENABLE_DEBUG_HASHMAP
704 i->idx = skip_free_buckets(h, i->idx + 1);
705 if (i->idx != IDX_NIL)
706 i->next_key = bucket_at(h, i->idx)->key;
717 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
723 #ifdef ENABLE_DEBUG_HASHMAP
724 if (i->idx == IDX_FIRST) {
725 i->put_count = h->debug.put_count;
726 i->rem_count = h->debug.rem_count;
728 /* While iterating, must not add any new entries */
729 assert(i->put_count == h->debug.put_count);
730 /* ... or remove entries other than the current one */
731 assert(i->rem_count == h->debug.rem_count ||
732 (i->rem_count == h->debug.rem_count - 1 &&
733 i->prev_idx == h->debug.last_rem_idx));
734 /* Reset our removals counter */
735 i->rem_count = h->debug.rem_count;
739 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
740 : hashmap_iterate_in_internal_order(h, i);
743 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
744 struct hashmap_base_entry *e;
748 idx = hashmap_iterate_entry(h, i);
749 if (idx == IDX_NIL) {
758 e = bucket_at(h, idx);
759 data = entry_value(h, e);
768 bool set_iterate(Set *s, Iterator *i, void **value) {
769 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
772 #define HASHMAP_FOREACH_IDX(idx, h, i) \
773 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
775 (idx) = hashmap_iterate_entry((h), &(i)))
777 static void reset_direct_storage(HashmapBase *h) {
778 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
781 assert(!h->has_indirect);
783 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
784 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
787 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
789 const struct hashmap_type_info *hi = &hashmap_type_info[type];
792 use_pool = is_main_thread();
794 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
800 h->from_pool = use_pool;
801 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
803 if (type == HASHMAP_TYPE_ORDERED) {
804 OrderedHashmap *lh = (OrderedHashmap*)h;
805 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
808 reset_direct_storage(h);
810 if (!shared_hash_key_initialized) {
811 random_bytes(shared_hash_key, sizeof(shared_hash_key));
812 shared_hash_key_initialized= true;
815 #ifdef ENABLE_DEBUG_HASHMAP
816 h->debug.func = func;
817 h->debug.file = file;
818 h->debug.line = line;
819 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
820 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
821 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
827 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
828 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
831 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
832 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
835 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
836 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
839 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
840 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
848 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
856 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
857 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
860 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
861 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
864 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
865 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
868 static void hashmap_free_no_clear(HashmapBase *h) {
869 assert(!h->has_indirect);
870 assert(!h->n_direct_entries);
872 #ifdef ENABLE_DEBUG_HASHMAP
873 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
874 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
875 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
879 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
884 HashmapBase *internal_hashmap_free(HashmapBase *h) {
886 /* Free the hashmap, but nothing in it */
889 internal_hashmap_clear(h);
890 hashmap_free_no_clear(h);
896 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
898 /* Free the hashmap and all data objects in it, but not the
902 internal_hashmap_clear_free(h);
903 hashmap_free_no_clear(h);
909 Hashmap *hashmap_free_free_free(Hashmap *h) {
911 /* Free the hashmap and all data and key objects in it */
914 hashmap_clear_free_free(h);
915 hashmap_free_no_clear(HASHMAP_BASE(h));
921 void internal_hashmap_clear(HashmapBase *h) {
925 if (h->has_indirect) {
926 free(h->indirect.storage);
927 h->has_indirect = false;
930 h->n_direct_entries = 0;
931 reset_direct_storage(h);
933 if (h->type == HASHMAP_TYPE_ORDERED) {
934 OrderedHashmap *lh = (OrderedHashmap*) h;
935 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
939 void internal_hashmap_clear_free(HashmapBase *h) {
945 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
946 idx = skip_free_buckets(h, idx + 1))
947 free(entry_value(h, bucket_at(h, idx)));
949 internal_hashmap_clear(h);
952 void hashmap_clear_free_free(Hashmap *h) {
958 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
959 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
960 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
961 free((void*)e->b.key);
965 internal_hashmap_clear(HASHMAP_BASE(h));
968 static int resize_buckets(HashmapBase *h, unsigned entries_add);
971 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
972 * Performs Robin Hood swaps as it goes. The entry to put must be placed
973 * by the caller into swap slot IDX_PUT.
974 * If used for in-place resizing, may leave a displaced entry in swap slot
975 * IDX_PUT. Caller must rehash it next.
976 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
979 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
980 struct swap_entries *swap) {
981 dib_raw_t raw_dib, *dibs;
982 unsigned dib, distance;
984 #ifdef ENABLE_DEBUG_HASHMAP
985 h->debug.put_count++;
988 dibs = dib_raw_ptr(h);
990 for (distance = 0; ; distance++) {
992 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
993 if (raw_dib == DIB_RAW_REHASH)
994 bucket_move_entry(h, swap, idx, IDX_TMP);
996 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
997 h->indirect.idx_lowest_entry = idx;
999 bucket_set_dib(h, idx, distance);
1000 bucket_move_entry(h, swap, IDX_PUT, idx);
1001 if (raw_dib == DIB_RAW_REHASH) {
1002 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1009 dib = bucket_calculate_dib(h, idx, raw_dib);
1011 if (dib < distance) {
1012 /* Found a wealthier entry. Go Robin Hood! */
1013 bucket_set_dib(h, idx, distance);
1015 /* swap the entries */
1016 bucket_move_entry(h, swap, idx, IDX_TMP);
1017 bucket_move_entry(h, swap, IDX_PUT, idx);
1018 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1023 idx = next_idx(h, idx);
1028 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1029 * The caller must place the entry (only its key and value, not link indexes)
1030 * in swap slot IDX_PUT.
1031 * Caller must ensure: the key does not exist yet in the hashmap.
1032 * that resize is not needed if !may_resize.
1033 * Returns: 1 if entry was put successfully.
1034 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1035 * Cannot return -ENOMEM if !may_resize.
1037 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1038 struct swap_entries *swap, bool may_resize) {
1039 struct ordered_hashmap_entry *new_entry;
1042 assert(idx < n_buckets(h));
1044 new_entry = bucket_at_swap(swap, IDX_PUT);
1047 r = resize_buckets(h, 1);
1051 idx = bucket_hash(h, new_entry->p.b.key);
1053 assert(n_entries(h) < n_buckets(h));
1055 if (h->type == HASHMAP_TYPE_ORDERED) {
1056 OrderedHashmap *lh = (OrderedHashmap*) h;
1058 new_entry->iterate_next = IDX_NIL;
1059 new_entry->iterate_previous = lh->iterate_list_tail;
1061 if (lh->iterate_list_tail != IDX_NIL) {
1062 struct ordered_hashmap_entry *old_tail;
1064 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1065 assert(old_tail->iterate_next == IDX_NIL);
1066 old_tail->iterate_next = IDX_PUT;
1069 lh->iterate_list_tail = IDX_PUT;
1070 if (lh->iterate_list_head == IDX_NIL)
1071 lh->iterate_list_head = IDX_PUT;
1074 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1077 #ifdef ENABLE_DEBUG_HASHMAP
1078 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1083 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1084 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1087 * Returns 0 if resize is not needed.
1088 * 1 if successfully resized.
1089 * -ENOMEM on allocation failure.
1091 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1092 struct swap_entries swap;
1094 dib_raw_t *old_dibs, *new_dibs;
1095 const struct hashmap_type_info *hi;
1096 unsigned idx, optimal_idx;
1097 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1103 hi = &hashmap_type_info[h->type];
1104 new_n_entries = n_entries(h) + entries_add;
1107 if (_unlikely_(new_n_entries < entries_add))
1110 /* For direct storage we allow 100% load, because it's tiny. */
1111 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1115 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1116 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1118 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1120 if (_unlikely_(new_n_buckets < new_n_entries))
1123 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1126 old_n_buckets = n_buckets(h);
1128 if (_likely_(new_n_buckets <= old_n_buckets))
1131 new_shift = log2u_round_up(MAX(
1132 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1133 2 * sizeof(struct direct_storage)));
1135 /* Realloc storage (buckets and DIB array). */
1136 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1141 /* Must upgrade direct to indirect storage. */
1142 if (!h->has_indirect) {
1143 memcpy(new_storage, h->direct.storage,
1144 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1145 h->indirect.n_entries = h->n_direct_entries;
1146 h->indirect.idx_lowest_entry = 0;
1147 h->n_direct_entries = 0;
1150 /* Get a new hash key. If we've just upgraded to indirect storage,
1151 * allow reusing a previously generated key. It's still a different key
1152 * from the shared one that we used for direct storage. */
1153 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1155 h->has_indirect = true;
1156 h->indirect.storage = new_storage;
1157 h->indirect.n_buckets = (1U << new_shift) /
1158 (hi->entry_size + sizeof(dib_raw_t));
1160 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1161 new_dibs = dib_raw_ptr(h);
1164 * Move the DIB array to the new place, replacing valid DIB values with
1165 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1166 * Note: Overlap is not possible, because we have at least doubled the
1167 * number of buckets and dib_raw_t is smaller than any entry type.
1169 for (idx = 0; idx < old_n_buckets; idx++) {
1170 assert(old_dibs[idx] != DIB_RAW_REHASH);
1171 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1175 /* Zero the area of newly added entries (including the old DIB area) */
1176 memzero(bucket_at(h, old_n_buckets),
1177 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1179 /* The upper half of the new DIB array needs initialization */
1180 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1181 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1183 /* Rehash entries that need it */
1185 for (idx = 0; idx < old_n_buckets; idx++) {
1186 if (new_dibs[idx] != DIB_RAW_REHASH)
1189 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1192 * Not much to do if by luck the entry hashes to its current
1193 * location. Just set its DIB.
1195 if (optimal_idx == idx) {
1201 new_dibs[idx] = DIB_RAW_FREE;
1202 bucket_move_entry(h, &swap, idx, IDX_PUT);
1203 /* bucket_move_entry does not clear the source */
1204 memzero(bucket_at(h, idx), hi->entry_size);
1208 * Find the new bucket for the current entry. This may make
1209 * another entry homeless and load it into IDX_PUT.
1211 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1214 /* Did the current entry displace another one? */
1216 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1217 } while (rehash_next);
1220 assert(n_rehashed == n_entries(h));
1226 * Finds an entry with a matching key
1227 * Returns: index of the found entry, or IDX_NIL if not found.
1229 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1230 struct hashmap_base_entry *e;
1231 unsigned dib, distance;
1232 dib_raw_t *dibs = dib_raw_ptr(h);
1234 assert(idx < n_buckets(h));
1236 for (distance = 0; ; distance++) {
1237 if (dibs[idx] == DIB_RAW_FREE)
1240 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1244 if (dib == distance) {
1245 e = bucket_at(h, idx);
1246 if (h->hash_ops->compare(e->key, key) == 0)
1250 idx = next_idx(h, idx);
1253 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1255 int hashmap_put(Hashmap *h, const void *key, void *value) {
1256 struct swap_entries swap;
1257 struct plain_hashmap_entry *e;
1262 hash = bucket_hash(h, key);
1263 idx = bucket_scan(h, hash, key);
1264 if (idx != IDX_NIL) {
1265 e = plain_bucket_at(h, idx);
1266 if (e->value == value)
1271 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1274 return hashmap_put_boldly(h, hash, &swap, true);
1277 int set_put(Set *s, const void *key) {
1278 struct swap_entries swap;
1279 struct hashmap_base_entry *e;
1284 hash = bucket_hash(s, key);
1285 idx = bucket_scan(s, hash, key);
1289 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1291 return hashmap_put_boldly(s, hash, &swap, true);
1294 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1295 struct swap_entries swap;
1296 struct plain_hashmap_entry *e;
1301 hash = bucket_hash(h, key);
1302 idx = bucket_scan(h, hash, key);
1303 if (idx != IDX_NIL) {
1304 e = plain_bucket_at(h, idx);
1305 #ifdef ENABLE_DEBUG_HASHMAP
1306 /* Although the key is equal, the key pointer may have changed,
1307 * and this would break our assumption for iterating. So count
1308 * this operation as incompatible with iteration. */
1309 if (e->b.key != key) {
1310 h->b.debug.put_count++;
1311 h->b.debug.rem_count++;
1312 h->b.debug.last_rem_idx = idx;
1320 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1323 return hashmap_put_boldly(h, hash, &swap, true);
1326 int hashmap_update(Hashmap *h, const void *key, void *value) {
1327 struct plain_hashmap_entry *e;
1332 hash = bucket_hash(h, key);
1333 idx = bucket_scan(h, hash, key);
1337 e = plain_bucket_at(h, idx);
1342 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1343 struct hashmap_base_entry *e;
1349 hash = bucket_hash(h, key);
1350 idx = bucket_scan(h, hash, key);
1354 e = bucket_at(h, idx);
1355 return entry_value(h, e);
1358 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1359 struct plain_hashmap_entry *e;
1365 hash = bucket_hash(h, key);
1366 idx = bucket_scan(h, hash, key);
1370 e = plain_bucket_at(h, idx);
1372 *key2 = (void*) e->b.key;
1377 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1383 hash = bucket_hash(h, key);
1384 return bucket_scan(h, hash, key) != IDX_NIL;
1387 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1388 struct hashmap_base_entry *e;
1395 hash = bucket_hash(h, key);
1396 idx = bucket_scan(h, hash, key);
1400 e = bucket_at(h, idx);
1401 data = entry_value(h, e);
1402 remove_entry(h, idx);
1407 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1408 struct plain_hashmap_entry *e;
1418 hash = bucket_hash(h, key);
1419 idx = bucket_scan(h, hash, key);
1420 if (idx == IDX_NIL) {
1426 e = plain_bucket_at(h, idx);
1429 *rkey = (void*) e->b.key;
1431 remove_entry(h, idx);
1436 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1437 struct swap_entries swap;
1438 struct plain_hashmap_entry *e;
1439 unsigned old_hash, new_hash, idx;
1444 old_hash = bucket_hash(h, old_key);
1445 idx = bucket_scan(h, old_hash, old_key);
1449 new_hash = bucket_hash(h, new_key);
1450 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1453 remove_entry(h, idx);
1455 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1458 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1463 #if 0 /// UNNEEDED by elogind
1464 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1465 struct swap_entries swap;
1466 struct hashmap_base_entry *e;
1467 unsigned old_hash, new_hash, idx;
1472 old_hash = bucket_hash(s, old_key);
1473 idx = bucket_scan(s, old_hash, old_key);
1477 new_hash = bucket_hash(s, new_key);
1478 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1481 remove_entry(s, idx);
1483 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1485 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1491 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1492 struct swap_entries swap;
1493 struct plain_hashmap_entry *e;
1494 unsigned old_hash, new_hash, idx_old, idx_new;
1499 old_hash = bucket_hash(h, old_key);
1500 idx_old = bucket_scan(h, old_hash, old_key);
1501 if (idx_old == IDX_NIL)
1504 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1506 new_hash = bucket_hash(h, new_key);
1507 idx_new = bucket_scan(h, new_hash, new_key);
1508 if (idx_new != IDX_NIL)
1509 if (idx_old != idx_new) {
1510 remove_entry(h, idx_new);
1511 /* Compensate for a possible backward shift. */
1512 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1513 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1514 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1517 remove_entry(h, idx_old);
1519 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1522 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1527 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1528 struct plain_hashmap_entry *e;
1534 hash = bucket_hash(h, key);
1535 idx = bucket_scan(h, hash, key);
1539 e = plain_bucket_at(h, idx);
1540 if (e->value != value)
1543 remove_entry(h, idx);
1548 static unsigned find_first_entry(HashmapBase *h) {
1549 Iterator i = ITERATOR_FIRST;
1551 if (!h || !n_entries(h))
1554 return hashmap_iterate_entry(h, &i);
1557 void *internal_hashmap_first(HashmapBase *h) {
1560 idx = find_first_entry(h);
1564 return entry_value(h, bucket_at(h, idx));
1567 void *internal_hashmap_first_key(HashmapBase *h) {
1568 struct hashmap_base_entry *e;
1571 idx = find_first_entry(h);
1575 e = bucket_at(h, idx);
1576 return (void*) e->key;
1579 void *internal_hashmap_steal_first(HashmapBase *h) {
1580 struct hashmap_base_entry *e;
1584 idx = find_first_entry(h);
1588 e = bucket_at(h, idx);
1589 data = entry_value(h, e);
1590 remove_entry(h, idx);
1595 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1596 struct hashmap_base_entry *e;
1600 idx = find_first_entry(h);
1604 e = bucket_at(h, idx);
1605 key = (void*) e->key;
1606 remove_entry(h, idx);
1611 unsigned internal_hashmap_size(HashmapBase *h) {
1616 return n_entries(h);
1619 unsigned internal_hashmap_buckets(HashmapBase *h) {
1624 return n_buckets(h);
1627 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1633 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1634 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1637 r = hashmap_put(h, pe->b.key, pe->value);
1638 if (r < 0 && r != -EEXIST)
1645 int set_merge(Set *s, Set *other) {
1651 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1652 struct set_entry *se = set_bucket_at(other, idx);
1655 r = set_put(s, se->b.key);
1663 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1668 r = resize_buckets(h, entries_add);
1676 * The same as hashmap_merge(), but every new item from other is moved to h.
1677 * Keys already in h are skipped and stay in other.
1678 * Returns: 0 on success.
1679 * -ENOMEM on alloc failure, in which case no move has been done.
1681 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1682 struct swap_entries swap;
1683 struct hashmap_base_entry *e, *n;
1693 assert(other->type == h->type);
1696 * This reserves buckets for the worst case, where none of other's
1697 * entries are yet present in h. This is preferable to risking
1698 * an allocation failure in the middle of the moving and having to
1699 * rollback or return a partial result.
1701 r = resize_buckets(h, n_entries(other));
1705 HASHMAP_FOREACH_IDX(idx, other, i) {
1708 e = bucket_at(other, idx);
1709 h_hash = bucket_hash(h, e->key);
1710 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1713 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1715 if (h->type != HASHMAP_TYPE_SET)
1716 ((struct plain_hashmap_entry*) n)->value =
1717 ((struct plain_hashmap_entry*) e)->value;
1718 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1720 remove_entry(other, idx);
1726 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1727 struct swap_entries swap;
1728 unsigned h_hash, other_hash, idx;
1729 struct hashmap_base_entry *e, *n;
1734 h_hash = bucket_hash(h, key);
1735 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1741 assert(other->type == h->type);
1743 other_hash = bucket_hash(other, key);
1744 idx = bucket_scan(other, other_hash, key);
1748 e = bucket_at(other, idx);
1750 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1752 if (h->type != HASHMAP_TYPE_SET)
1753 ((struct plain_hashmap_entry*) n)->value =
1754 ((struct plain_hashmap_entry*) e)->value;
1755 r = hashmap_put_boldly(h, h_hash, &swap, true);
1759 remove_entry(other, idx);
1763 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1769 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1774 case HASHMAP_TYPE_PLAIN:
1775 case HASHMAP_TYPE_ORDERED:
1776 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1778 case HASHMAP_TYPE_SET:
1779 r = set_merge((Set*)copy, (Set*)h);
1782 assert_not_reached("Unknown hashmap type");
1786 internal_hashmap_free(copy);
1793 char **internal_hashmap_get_strv(HashmapBase *h) {
1798 sv = new(char*, n_entries(h)+1);
1803 HASHMAP_FOREACH_IDX(idx, h, i)
1804 sv[n++] = entry_value(h, bucket_at(h, idx));
1810 #if 0 /// UNNEEDED by elogind
1811 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1812 struct ordered_hashmap_entry *e;
1818 hash = bucket_hash(h, key);
1819 idx = bucket_scan(h, hash, key);
1823 e = ordered_bucket_at(h, idx);
1824 if (e->iterate_next == IDX_NIL)
1826 return ordered_bucket_at(h, e->iterate_next)->p.value;
1830 int set_consume(Set *s, void *value) {
1833 r = set_put(s, value);
1840 int set_put_strdup(Set *s, const char *p) {
1851 r = set_consume(s, c);
1858 #if 0 /// UNNEEDED by elogind
1859 int set_put_strdupv(Set *s, char **l) {
1863 STRV_FOREACH(i, l) {
1864 r = set_put_strdup(s, *i);