1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
31 #include "siphash24.h"
34 #include "random-util.h"
36 #ifdef ENABLE_DEBUG_HASHMAP
41 * Implementation of hashmaps.
43 * - uses less RAM compared to closed addressing (chaining), because
44 * our entries are small (especially in Sets, which tend to contain
45 * the majority of entries in systemd).
46 * Collision resolution: Robin Hood
47 * - tends to equalize displacement of entries from their optimal buckets.
48 * Probe sequence: linear
49 * - though theoretically worse than random probing/uniform hashing/double
50 * hashing, it is good for cache locality.
53 * Celis, P. 1986. Robin Hood Hashing.
54 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
55 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 * - The results are derived for random probing. Suggests deletion with
57 * tombstones and two mean-centered search methods. None of that works
58 * well for linear probing.
60 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
61 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
62 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
63 * http://www.math.uu.se/~svante/papers/sj157.pdf
64 * - Applies to Robin Hood with linear probing. Contains remarks on
65 * the unsuitability of mean-centered search with linear probing.
67 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
68 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
69 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
70 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
71 * in a successful search), and Janson writes about displacement. C = d + 1.
73 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
74 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
75 * - Explanation of backward shift deletion with pictures.
77 * Khuong, P. 2013. The Other Robin Hood Hashing.
78 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
79 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
83 * XXX Ideas for improvement:
84 * For unordered hashmaps, randomize iteration order, similarly to Perl:
85 * http://blog.booking.com/hardening-perls-hash-function.html
88 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
89 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
90 #define INV_KEEP_FREE 5U
92 /* Fields common to entries of all hashmap/set types */
93 struct hashmap_base_entry {
97 /* Entry types for specific hashmap/set types
98 * hashmap_base_entry must be at the beginning of each entry struct. */
100 struct plain_hashmap_entry {
101 struct hashmap_base_entry b;
105 struct ordered_hashmap_entry {
106 struct plain_hashmap_entry p;
107 unsigned iterate_next, iterate_previous;
111 struct hashmap_base_entry b;
114 /* In several functions it is advantageous to have the hash table extended
115 * virtually by a couple of additional buckets. We reserve special index values
116 * for these "swap" buckets. */
117 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
118 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
119 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
120 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
122 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
123 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
125 assert_cc(IDX_FIRST == _IDX_SWAP_END);
126 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
128 /* Storage space for the "swap" buckets.
129 * All entry types can fit into a ordered_hashmap_entry. */
130 struct swap_entries {
131 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
134 /* Distance from Initial Bucket */
135 typedef uint8_t dib_raw_t;
136 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
137 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
138 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
139 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
141 #define DIB_FREE UINT_MAX
143 #ifdef ENABLE_DEBUG_HASHMAP
144 struct hashmap_debug_info {
145 LIST_FIELDS(struct hashmap_debug_info, debug_list);
146 unsigned max_entries; /* high watermark of n_entries */
148 /* who allocated this hashmap */
153 /* fields to detect modification while iterating */
154 unsigned put_count; /* counts puts into the hashmap */
155 unsigned rem_count; /* counts removals from hashmap */
156 unsigned last_rem_idx; /* remembers last removal index */
159 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
160 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
161 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
163 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
165 #else /* !ENABLE_DEBUG_HASHMAP */
166 #define HASHMAP_DEBUG_FIELDS
167 #endif /* ENABLE_DEBUG_HASHMAP */
171 HASHMAP_TYPE_ORDERED,
176 struct _packed_ indirect_storage {
177 char *storage; /* where buckets and DIBs are stored */
178 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
180 unsigned n_entries; /* number of stored entries */
181 unsigned n_buckets; /* number of buckets */
183 unsigned idx_lowest_entry; /* Index below which all buckets are free.
184 Makes "while(hashmap_steal_first())" loops
185 O(n) instead of O(n^2) for unordered hashmaps. */
186 uint8_t _pad[3]; /* padding for the whole HashmapBase */
187 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
190 struct direct_storage {
191 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
192 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
193 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
194 char storage[sizeof(struct indirect_storage)];
197 #define DIRECT_BUCKETS(entry_t) \
198 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
200 /* We should be able to store at least one entry directly. */
201 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
203 /* We have 3 bits for n_direct_entries. */
204 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
206 /* Hashmaps with directly stored entries all use this shared hash key.
207 * It's no big deal if the key is guessed, because there can be only
208 * a handful of directly stored entries in a hashmap. When a hashmap
209 * outgrows direct storage, it gets its own key for indirect storage. */
210 static uint8_t shared_hash_key[HASH_KEY_SIZE];
211 static bool shared_hash_key_initialized;
213 /* Fields that all hashmap/set types must have */
215 const struct hash_ops *hash_ops; /* hash and compare ops to use */
218 struct indirect_storage indirect; /* if has_indirect */
219 struct direct_storage direct; /* if !has_indirect */
222 enum HashmapType type:2; /* HASHMAP_TYPE_* */
223 bool has_indirect:1; /* whether indirect storage is used */
224 unsigned n_direct_entries:3; /* Number of entries in direct storage.
225 * Only valid if !has_indirect. */
226 bool from_pool:1; /* whether was allocated from mempool */
227 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
230 /* Specific hash types
231 * HashmapBase must be at the beginning of each hashmap struct. */
234 struct HashmapBase b;
237 struct OrderedHashmap {
238 struct HashmapBase b;
239 unsigned iterate_list_head, iterate_list_tail;
243 struct HashmapBase b;
246 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248 /* No need for a separate Set pool */
249 assert_cc(sizeof(Hashmap) == sizeof(Set));
251 struct hashmap_type_info {
254 struct mempool *mempool;
255 unsigned n_direct_buckets;
258 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 [HASHMAP_TYPE_PLAIN] = {
260 .head_size = sizeof(Hashmap),
261 .entry_size = sizeof(struct plain_hashmap_entry),
262 .mempool = &hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
265 [HASHMAP_TYPE_ORDERED] = {
266 .head_size = sizeof(OrderedHashmap),
267 .entry_size = sizeof(struct ordered_hashmap_entry),
268 .mempool = &ordered_hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
271 [HASHMAP_TYPE_SET] = {
272 .head_size = sizeof(Set),
273 .entry_size = sizeof(struct set_entry),
274 .mempool = &hashmap_pool,
275 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
279 unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
281 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
282 return (unsigned long) u;
285 int string_compare_func(const void *a, const void *b) {
289 const struct hash_ops string_hash_ops = {
290 .hash = string_hash_func,
291 .compare = string_compare_func
294 unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
296 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
297 return (unsigned long) u;
300 int trivial_compare_func(const void *a, const void *b) {
301 return a < b ? -1 : (a > b ? 1 : 0);
304 const struct hash_ops trivial_hash_ops = {
305 .hash = trivial_hash_func,
306 .compare = trivial_compare_func
309 unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
311 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
312 return (unsigned long) u;
315 int uint64_compare_func(const void *_a, const void *_b) {
317 a = *(const uint64_t*) _a;
318 b = *(const uint64_t*) _b;
319 return a < b ? -1 : (a > b ? 1 : 0);
322 const struct hash_ops uint64_hash_ops = {
323 .hash = uint64_hash_func,
324 .compare = uint64_compare_func
327 #if SIZEOF_DEV_T != 8
328 unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
330 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
331 return (unsigned long) u;
334 int devt_compare_func(const void *_a, const void *_b) {
336 a = *(const dev_t*) _a;
337 b = *(const dev_t*) _b;
338 return a < b ? -1 : (a > b ? 1 : 0);
341 const struct hash_ops devt_hash_ops = {
342 .hash = devt_hash_func,
343 .compare = devt_compare_func
347 static unsigned n_buckets(HashmapBase *h) {
348 return h->has_indirect ? h->indirect.n_buckets
349 : hashmap_type_info[h->type].n_direct_buckets;
352 static unsigned n_entries(HashmapBase *h) {
353 return h->has_indirect ? h->indirect.n_entries
354 : h->n_direct_entries;
357 static void n_entries_inc(HashmapBase *h) {
359 h->indirect.n_entries++;
361 h->n_direct_entries++;
364 static void n_entries_dec(HashmapBase *h) {
366 h->indirect.n_entries--;
368 h->n_direct_entries--;
371 static char *storage_ptr(HashmapBase *h) {
372 return h->has_indirect ? h->indirect.storage
376 static uint8_t *hash_key(HashmapBase *h) {
377 return h->has_indirect ? h->indirect.hash_key
381 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
382 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
384 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
386 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
387 static uint8_t current[HASH_KEY_SIZE];
388 static bool current_initialized = false;
390 /* Returns a hash function key to use. In order to keep things
391 * fast we will not generate a new key each time we allocate a
392 * new hash table. Instead, we'll just reuse the most recently
393 * generated one, except if we never generated one or when we
394 * are rehashing an entire hash table because we reached a
397 if (!current_initialized || !reuse_is_ok) {
398 random_bytes(current, sizeof(current));
399 current_initialized = true;
402 memcpy(hash_key, current, sizeof(current));
405 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
406 return (struct hashmap_base_entry*)
407 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
410 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
411 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
414 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
415 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
418 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
419 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
422 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
423 return &swap->e[idx - _IDX_SWAP_BEGIN];
426 /* Returns a pointer to the bucket at index idx.
427 * Understands real indexes and swap indexes, hence "_virtual". */
428 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
430 if (idx < _IDX_SWAP_BEGIN)
431 return bucket_at(h, idx);
433 if (idx < _IDX_SWAP_END)
434 return &bucket_at_swap(swap, idx)->p.b;
436 assert_not_reached("Invalid index");
439 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
441 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
444 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
445 return idx >= from ? idx - from
446 : n_buckets(h) + idx - from;
449 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
450 unsigned initial_bucket;
452 if (raw_dib == DIB_RAW_FREE)
455 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
459 * Having an overflow DIB value is very unlikely. The hash function
460 * would have to be bad. For example, in a table of size 2^24 filled
461 * to load factor 0.9 the maximum observed DIB is only about 60.
462 * In theory (assuming I used Maxima correctly), for an infinite size
463 * hash table with load factor 0.8 the probability of a given entry
464 * having DIB > 40 is 1.9e-8.
465 * This returns the correct DIB value by recomputing the hash value in
466 * the unlikely case. XXX Hitting this case could be a hint to rehash.
468 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
469 return bucket_distance(h, idx, initial_bucket);
472 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
473 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
476 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
479 dibs = dib_raw_ptr(h);
481 for ( ; idx < n_buckets(h); idx++)
482 if (dibs[idx] != DIB_RAW_FREE)
488 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
489 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
490 bucket_set_dib(h, idx, DIB_FREE);
493 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
494 unsigned from, unsigned to) {
495 struct hashmap_base_entry *e_from, *e_to;
499 e_from = bucket_at_virtual(h, swap, from);
500 e_to = bucket_at_virtual(h, swap, to);
502 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
504 if (h->type == HASHMAP_TYPE_ORDERED) {
505 OrderedHashmap *lh = (OrderedHashmap*) h;
506 struct ordered_hashmap_entry *le, *le_to;
508 le_to = (struct ordered_hashmap_entry*) e_to;
510 if (le_to->iterate_next != IDX_NIL) {
511 le = (struct ordered_hashmap_entry*)
512 bucket_at_virtual(h, swap, le_to->iterate_next);
513 le->iterate_previous = to;
516 if (le_to->iterate_previous != IDX_NIL) {
517 le = (struct ordered_hashmap_entry*)
518 bucket_at_virtual(h, swap, le_to->iterate_previous);
519 le->iterate_next = to;
522 if (lh->iterate_list_head == from)
523 lh->iterate_list_head = to;
524 if (lh->iterate_list_tail == from)
525 lh->iterate_list_tail = to;
529 static unsigned next_idx(HashmapBase *h, unsigned idx) {
530 return (idx + 1U) % n_buckets(h);
533 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
534 return (n_buckets(h) + idx - 1U) % n_buckets(h);
537 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
540 case HASHMAP_TYPE_PLAIN:
541 case HASHMAP_TYPE_ORDERED:
542 return ((struct plain_hashmap_entry*)e)->value;
544 case HASHMAP_TYPE_SET:
545 return (void*) e->key;
548 assert_not_reached("Unknown hashmap type");
552 static void base_remove_entry(HashmapBase *h, unsigned idx) {
553 unsigned left, right, prev, dib;
554 dib_raw_t raw_dib, *dibs;
556 dibs = dib_raw_ptr(h);
557 assert(dibs[idx] != DIB_RAW_FREE);
559 #ifdef ENABLE_DEBUG_HASHMAP
560 h->debug.rem_count++;
561 h->debug.last_rem_idx = idx;
565 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
566 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
567 raw_dib = dibs[right];
568 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
571 /* The buckets are not supposed to be all occupied and with DIB > 0.
572 * That would mean we could make everyone better off by shifting them
573 * backward. This scenario is impossible. */
574 assert(left != right);
577 if (h->type == HASHMAP_TYPE_ORDERED) {
578 OrderedHashmap *lh = (OrderedHashmap*) h;
579 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
581 if (le->iterate_next != IDX_NIL)
582 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
584 lh->iterate_list_tail = le->iterate_previous;
586 if (le->iterate_previous != IDX_NIL)
587 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
589 lh->iterate_list_head = le->iterate_next;
592 /* Now shift all buckets in the interval (left, right) one step backwards */
593 for (prev = left, left = next_idx(h, left); left != right;
594 prev = left, left = next_idx(h, left)) {
595 dib = bucket_calculate_dib(h, left, dibs[left]);
597 bucket_move_entry(h, NULL, left, prev);
598 bucket_set_dib(h, prev, dib - 1);
601 bucket_mark_free(h, prev);
604 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
606 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
607 struct ordered_hashmap_entry *e;
613 if (i->idx == IDX_NIL)
616 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
619 if (i->idx == IDX_FIRST) {
620 idx = h->iterate_list_head;
621 e = ordered_bucket_at(h, idx);
624 e = ordered_bucket_at(h, idx);
626 * We allow removing the current entry while iterating, but removal may cause
627 * a backward shift. The next entry may thus move one bucket to the left.
628 * To detect when it happens, we remember the key pointer of the entry we were
629 * going to iterate next. If it does not match, there was a backward shift.
631 if (e->p.b.key != i->next_key) {
632 idx = prev_idx(HASHMAP_BASE(h), idx);
633 e = ordered_bucket_at(h, idx);
635 assert(e->p.b.key == i->next_key);
638 #ifdef ENABLE_DEBUG_HASHMAP
642 if (e->iterate_next != IDX_NIL) {
643 struct ordered_hashmap_entry *n;
644 i->idx = e->iterate_next;
645 n = ordered_bucket_at(h, i->idx);
646 i->next_key = n->p.b.key;
657 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
663 if (i->idx == IDX_NIL)
666 if (i->idx == IDX_FIRST) {
667 /* fast forward to the first occupied bucket */
668 if (h->has_indirect) {
669 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
670 h->indirect.idx_lowest_entry = i->idx;
672 i->idx = skip_free_buckets(h, 0);
674 if (i->idx == IDX_NIL)
677 struct hashmap_base_entry *e;
681 e = bucket_at(h, i->idx);
683 * We allow removing the current entry while iterating, but removal may cause
684 * a backward shift. The next entry may thus move one bucket to the left.
685 * To detect when it happens, we remember the key pointer of the entry we were
686 * going to iterate next. If it does not match, there was a backward shift.
688 if (e->key != i->next_key)
689 e = bucket_at(h, --i->idx);
691 assert(e->key == i->next_key);
695 #ifdef ENABLE_DEBUG_HASHMAP
699 i->idx = skip_free_buckets(h, i->idx + 1);
700 if (i->idx != IDX_NIL)
701 i->next_key = bucket_at(h, i->idx)->key;
712 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
718 #ifdef ENABLE_DEBUG_HASHMAP
719 if (i->idx == IDX_FIRST) {
720 i->put_count = h->debug.put_count;
721 i->rem_count = h->debug.rem_count;
723 /* While iterating, must not add any new entries */
724 assert(i->put_count == h->debug.put_count);
725 /* ... or remove entries other than the current one */
726 assert(i->rem_count == h->debug.rem_count ||
727 (i->rem_count == h->debug.rem_count - 1 &&
728 i->prev_idx == h->debug.last_rem_idx));
729 /* Reset our removals counter */
730 i->rem_count = h->debug.rem_count;
734 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
735 : hashmap_iterate_in_internal_order(h, i);
738 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
739 struct hashmap_base_entry *e;
743 idx = hashmap_iterate_entry(h, i);
744 if (idx == IDX_NIL) {
753 e = bucket_at(h, idx);
754 data = entry_value(h, e);
763 bool set_iterate(Set *s, Iterator *i, void **value) {
764 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
767 #define HASHMAP_FOREACH_IDX(idx, h, i) \
768 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
770 (idx) = hashmap_iterate_entry((h), &(i)))
772 static void reset_direct_storage(HashmapBase *h) {
773 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
776 assert(!h->has_indirect);
778 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
779 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
782 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
784 const struct hashmap_type_info *hi = &hashmap_type_info[type];
787 use_pool = is_main_thread();
789 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
795 h->from_pool = use_pool;
796 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
798 if (type == HASHMAP_TYPE_ORDERED) {
799 OrderedHashmap *lh = (OrderedHashmap*)h;
800 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
803 reset_direct_storage(h);
805 if (!shared_hash_key_initialized) {
806 random_bytes(shared_hash_key, sizeof(shared_hash_key));
807 shared_hash_key_initialized= true;
810 #ifdef ENABLE_DEBUG_HASHMAP
811 h->debug.func = func;
812 h->debug.file = file;
813 h->debug.line = line;
814 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
815 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
816 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
822 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
823 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
826 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
827 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
830 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
831 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
834 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
835 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
843 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
851 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
852 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
855 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
856 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
859 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
860 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
863 static void hashmap_free_no_clear(HashmapBase *h) {
864 assert(!h->has_indirect);
865 assert(!h->n_direct_entries);
867 #ifdef ENABLE_DEBUG_HASHMAP
868 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
869 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
870 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
874 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
879 HashmapBase *internal_hashmap_free(HashmapBase *h) {
881 /* Free the hashmap, but nothing in it */
884 internal_hashmap_clear(h);
885 hashmap_free_no_clear(h);
891 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
893 /* Free the hashmap and all data objects in it, but not the
897 internal_hashmap_clear_free(h);
898 hashmap_free_no_clear(h);
904 Hashmap *hashmap_free_free_free(Hashmap *h) {
906 /* Free the hashmap and all data and key objects in it */
909 hashmap_clear_free_free(h);
910 hashmap_free_no_clear(HASHMAP_BASE(h));
916 void internal_hashmap_clear(HashmapBase *h) {
920 if (h->has_indirect) {
921 free(h->indirect.storage);
922 h->has_indirect = false;
925 h->n_direct_entries = 0;
926 reset_direct_storage(h);
928 if (h->type == HASHMAP_TYPE_ORDERED) {
929 OrderedHashmap *lh = (OrderedHashmap*) h;
930 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
934 void internal_hashmap_clear_free(HashmapBase *h) {
940 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
941 idx = skip_free_buckets(h, idx + 1))
942 free(entry_value(h, bucket_at(h, idx)));
944 internal_hashmap_clear(h);
947 void hashmap_clear_free_free(Hashmap *h) {
953 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
954 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
955 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
956 free((void*)e->b.key);
960 internal_hashmap_clear(HASHMAP_BASE(h));
963 static int resize_buckets(HashmapBase *h, unsigned entries_add);
966 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
967 * Performs Robin Hood swaps as it goes. The entry to put must be placed
968 * by the caller into swap slot IDX_PUT.
969 * If used for in-place resizing, may leave a displaced entry in swap slot
970 * IDX_PUT. Caller must rehash it next.
971 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
974 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
975 struct swap_entries *swap) {
976 dib_raw_t raw_dib, *dibs;
977 unsigned dib, distance;
979 #ifdef ENABLE_DEBUG_HASHMAP
980 h->debug.put_count++;
983 dibs = dib_raw_ptr(h);
985 for (distance = 0; ; distance++) {
987 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
988 if (raw_dib == DIB_RAW_REHASH)
989 bucket_move_entry(h, swap, idx, IDX_TMP);
991 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
992 h->indirect.idx_lowest_entry = idx;
994 bucket_set_dib(h, idx, distance);
995 bucket_move_entry(h, swap, IDX_PUT, idx);
996 if (raw_dib == DIB_RAW_REHASH) {
997 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1004 dib = bucket_calculate_dib(h, idx, raw_dib);
1006 if (dib < distance) {
1007 /* Found a wealthier entry. Go Robin Hood! */
1008 bucket_set_dib(h, idx, distance);
1010 /* swap the entries */
1011 bucket_move_entry(h, swap, idx, IDX_TMP);
1012 bucket_move_entry(h, swap, IDX_PUT, idx);
1013 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1018 idx = next_idx(h, idx);
1023 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1024 * The caller must place the entry (only its key and value, not link indexes)
1025 * in swap slot IDX_PUT.
1026 * Caller must ensure: the key does not exist yet in the hashmap.
1027 * that resize is not needed if !may_resize.
1028 * Returns: 1 if entry was put successfully.
1029 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1030 * Cannot return -ENOMEM if !may_resize.
1032 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1033 struct swap_entries *swap, bool may_resize) {
1034 struct ordered_hashmap_entry *new_entry;
1037 assert(idx < n_buckets(h));
1039 new_entry = bucket_at_swap(swap, IDX_PUT);
1042 r = resize_buckets(h, 1);
1046 idx = bucket_hash(h, new_entry->p.b.key);
1048 assert(n_entries(h) < n_buckets(h));
1050 if (h->type == HASHMAP_TYPE_ORDERED) {
1051 OrderedHashmap *lh = (OrderedHashmap*) h;
1053 new_entry->iterate_next = IDX_NIL;
1054 new_entry->iterate_previous = lh->iterate_list_tail;
1056 if (lh->iterate_list_tail != IDX_NIL) {
1057 struct ordered_hashmap_entry *old_tail;
1059 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1060 assert(old_tail->iterate_next == IDX_NIL);
1061 old_tail->iterate_next = IDX_PUT;
1064 lh->iterate_list_tail = IDX_PUT;
1065 if (lh->iterate_list_head == IDX_NIL)
1066 lh->iterate_list_head = IDX_PUT;
1069 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1072 #ifdef ENABLE_DEBUG_HASHMAP
1073 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1078 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1079 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1082 * Returns 0 if resize is not needed.
1083 * 1 if successfully resized.
1084 * -ENOMEM on allocation failure.
1086 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1087 struct swap_entries swap;
1089 dib_raw_t *old_dibs, *new_dibs;
1090 const struct hashmap_type_info *hi;
1091 unsigned idx, optimal_idx;
1092 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1098 hi = &hashmap_type_info[h->type];
1099 new_n_entries = n_entries(h) + entries_add;
1102 if (_unlikely_(new_n_entries < entries_add))
1105 /* For direct storage we allow 100% load, because it's tiny. */
1106 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1110 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1111 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1113 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1115 if (_unlikely_(new_n_buckets < new_n_entries))
1118 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1121 old_n_buckets = n_buckets(h);
1123 if (_likely_(new_n_buckets <= old_n_buckets))
1126 new_shift = log2u_round_up(MAX(
1127 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1128 2 * sizeof(struct direct_storage)));
1130 /* Realloc storage (buckets and DIB array). */
1131 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1136 /* Must upgrade direct to indirect storage. */
1137 if (!h->has_indirect) {
1138 memcpy(new_storage, h->direct.storage,
1139 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1140 h->indirect.n_entries = h->n_direct_entries;
1141 h->indirect.idx_lowest_entry = 0;
1142 h->n_direct_entries = 0;
1145 /* Get a new hash key. If we've just upgraded to indirect storage,
1146 * allow reusing a previously generated key. It's still a different key
1147 * from the shared one that we used for direct storage. */
1148 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1150 h->has_indirect = true;
1151 h->indirect.storage = new_storage;
1152 h->indirect.n_buckets = (1U << new_shift) /
1153 (hi->entry_size + sizeof(dib_raw_t));
1155 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1156 new_dibs = dib_raw_ptr(h);
1159 * Move the DIB array to the new place, replacing valid DIB values with
1160 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1161 * Note: Overlap is not possible, because we have at least doubled the
1162 * number of buckets and dib_raw_t is smaller than any entry type.
1164 for (idx = 0; idx < old_n_buckets; idx++) {
1165 assert(old_dibs[idx] != DIB_RAW_REHASH);
1166 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1170 /* Zero the area of newly added entries (including the old DIB area) */
1171 memzero(bucket_at(h, old_n_buckets),
1172 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1174 /* The upper half of the new DIB array needs initialization */
1175 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1176 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1178 /* Rehash entries that need it */
1180 for (idx = 0; idx < old_n_buckets; idx++) {
1181 if (new_dibs[idx] != DIB_RAW_REHASH)
1184 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1187 * Not much to do if by luck the entry hashes to its current
1188 * location. Just set its DIB.
1190 if (optimal_idx == idx) {
1196 new_dibs[idx] = DIB_RAW_FREE;
1197 bucket_move_entry(h, &swap, idx, IDX_PUT);
1198 /* bucket_move_entry does not clear the source */
1199 memzero(bucket_at(h, idx), hi->entry_size);
1203 * Find the new bucket for the current entry. This may make
1204 * another entry homeless and load it into IDX_PUT.
1206 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1209 /* Did the current entry displace another one? */
1211 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1212 } while (rehash_next);
1215 assert(n_rehashed == n_entries(h));
1221 * Finds an entry with a matching key
1222 * Returns: index of the found entry, or IDX_NIL if not found.
1224 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1225 struct hashmap_base_entry *e;
1226 unsigned dib, distance;
1227 dib_raw_t *dibs = dib_raw_ptr(h);
1229 assert(idx < n_buckets(h));
1231 for (distance = 0; ; distance++) {
1232 if (dibs[idx] == DIB_RAW_FREE)
1235 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1239 if (dib == distance) {
1240 e = bucket_at(h, idx);
1241 if (h->hash_ops->compare(e->key, key) == 0)
1245 idx = next_idx(h, idx);
1248 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1250 int hashmap_put(Hashmap *h, const void *key, void *value) {
1251 struct swap_entries swap;
1252 struct plain_hashmap_entry *e;
1257 hash = bucket_hash(h, key);
1258 idx = bucket_scan(h, hash, key);
1259 if (idx != IDX_NIL) {
1260 e = plain_bucket_at(h, idx);
1261 if (e->value == value)
1266 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1269 return hashmap_put_boldly(h, hash, &swap, true);
1272 int set_put(Set *s, const void *key) {
1273 struct swap_entries swap;
1274 struct hashmap_base_entry *e;
1279 hash = bucket_hash(s, key);
1280 idx = bucket_scan(s, hash, key);
1284 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1286 return hashmap_put_boldly(s, hash, &swap, true);
1289 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1290 struct swap_entries swap;
1291 struct plain_hashmap_entry *e;
1296 hash = bucket_hash(h, key);
1297 idx = bucket_scan(h, hash, key);
1298 if (idx != IDX_NIL) {
1299 e = plain_bucket_at(h, idx);
1300 #ifdef ENABLE_DEBUG_HASHMAP
1301 /* Although the key is equal, the key pointer may have changed,
1302 * and this would break our assumption for iterating. So count
1303 * this operation as incompatible with iteration. */
1304 if (e->b.key != key) {
1305 h->b.debug.put_count++;
1306 h->b.debug.rem_count++;
1307 h->b.debug.last_rem_idx = idx;
1315 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1318 return hashmap_put_boldly(h, hash, &swap, true);
1321 int hashmap_update(Hashmap *h, const void *key, void *value) {
1322 struct plain_hashmap_entry *e;
1327 hash = bucket_hash(h, key);
1328 idx = bucket_scan(h, hash, key);
1332 e = plain_bucket_at(h, idx);
1337 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1338 struct hashmap_base_entry *e;
1344 hash = bucket_hash(h, key);
1345 idx = bucket_scan(h, hash, key);
1349 e = bucket_at(h, idx);
1350 return entry_value(h, e);
1353 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1354 struct plain_hashmap_entry *e;
1360 hash = bucket_hash(h, key);
1361 idx = bucket_scan(h, hash, key);
1365 e = plain_bucket_at(h, idx);
1367 *key2 = (void*) e->b.key;
1372 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1378 hash = bucket_hash(h, key);
1379 return bucket_scan(h, hash, key) != IDX_NIL;
1382 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1383 struct hashmap_base_entry *e;
1390 hash = bucket_hash(h, key);
1391 idx = bucket_scan(h, hash, key);
1395 e = bucket_at(h, idx);
1396 data = entry_value(h, e);
1397 remove_entry(h, idx);
1402 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1403 struct plain_hashmap_entry *e;
1413 hash = bucket_hash(h, key);
1414 idx = bucket_scan(h, hash, key);
1415 if (idx == IDX_NIL) {
1421 e = plain_bucket_at(h, idx);
1424 *rkey = (void*) e->b.key;
1426 remove_entry(h, idx);
1431 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1432 struct swap_entries swap;
1433 struct plain_hashmap_entry *e;
1434 unsigned old_hash, new_hash, idx;
1439 old_hash = bucket_hash(h, old_key);
1440 idx = bucket_scan(h, old_hash, old_key);
1444 new_hash = bucket_hash(h, new_key);
1445 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1448 remove_entry(h, idx);
1450 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1453 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1458 /// UNNEEDED by elogind
1460 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1461 struct swap_entries swap;
1462 struct hashmap_base_entry *e;
1463 unsigned old_hash, new_hash, idx;
1468 old_hash = bucket_hash(s, old_key);
1469 idx = bucket_scan(s, old_hash, old_key);
1473 new_hash = bucket_hash(s, new_key);
1474 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1477 remove_entry(s, idx);
1479 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1481 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1487 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1488 struct swap_entries swap;
1489 struct plain_hashmap_entry *e;
1490 unsigned old_hash, new_hash, idx_old, idx_new;
1495 old_hash = bucket_hash(h, old_key);
1496 idx_old = bucket_scan(h, old_hash, old_key);
1497 if (idx_old == IDX_NIL)
1500 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1502 new_hash = bucket_hash(h, new_key);
1503 idx_new = bucket_scan(h, new_hash, new_key);
1504 if (idx_new != IDX_NIL)
1505 if (idx_old != idx_new) {
1506 remove_entry(h, idx_new);
1507 /* Compensate for a possible backward shift. */
1508 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1509 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1510 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1513 remove_entry(h, idx_old);
1515 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1518 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1523 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1524 struct plain_hashmap_entry *e;
1530 hash = bucket_hash(h, key);
1531 idx = bucket_scan(h, hash, key);
1535 e = plain_bucket_at(h, idx);
1536 if (e->value != value)
1539 remove_entry(h, idx);
1544 static unsigned find_first_entry(HashmapBase *h) {
1545 Iterator i = ITERATOR_FIRST;
1547 if (!h || !n_entries(h))
1550 return hashmap_iterate_entry(h, &i);
1553 void *internal_hashmap_first(HashmapBase *h) {
1556 idx = find_first_entry(h);
1560 return entry_value(h, bucket_at(h, idx));
1563 void *internal_hashmap_first_key(HashmapBase *h) {
1564 struct hashmap_base_entry *e;
1567 idx = find_first_entry(h);
1571 e = bucket_at(h, idx);
1572 return (void*) e->key;
1575 void *internal_hashmap_steal_first(HashmapBase *h) {
1576 struct hashmap_base_entry *e;
1580 idx = find_first_entry(h);
1584 e = bucket_at(h, idx);
1585 data = entry_value(h, e);
1586 remove_entry(h, idx);
1591 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1592 struct hashmap_base_entry *e;
1596 idx = find_first_entry(h);
1600 e = bucket_at(h, idx);
1601 key = (void*) e->key;
1602 remove_entry(h, idx);
1607 unsigned internal_hashmap_size(HashmapBase *h) {
1612 return n_entries(h);
1615 unsigned internal_hashmap_buckets(HashmapBase *h) {
1620 return n_buckets(h);
1623 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1629 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1630 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1633 r = hashmap_put(h, pe->b.key, pe->value);
1634 if (r < 0 && r != -EEXIST)
1641 int set_merge(Set *s, Set *other) {
1647 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1648 struct set_entry *se = set_bucket_at(other, idx);
1651 r = set_put(s, se->b.key);
1659 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1664 r = resize_buckets(h, entries_add);
1672 * The same as hashmap_merge(), but every new item from other is moved to h.
1673 * Keys already in h are skipped and stay in other.
1674 * Returns: 0 on success.
1675 * -ENOMEM on alloc failure, in which case no move has been done.
1677 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1678 struct swap_entries swap;
1679 struct hashmap_base_entry *e, *n;
1689 assert(other->type == h->type);
1692 * This reserves buckets for the worst case, where none of other's
1693 * entries are yet present in h. This is preferable to risking
1694 * an allocation failure in the middle of the moving and having to
1695 * rollback or return a partial result.
1697 r = resize_buckets(h, n_entries(other));
1701 HASHMAP_FOREACH_IDX(idx, other, i) {
1704 e = bucket_at(other, idx);
1705 h_hash = bucket_hash(h, e->key);
1706 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1709 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1711 if (h->type != HASHMAP_TYPE_SET)
1712 ((struct plain_hashmap_entry*) n)->value =
1713 ((struct plain_hashmap_entry*) e)->value;
1714 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1716 remove_entry(other, idx);
1722 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1723 struct swap_entries swap;
1724 unsigned h_hash, other_hash, idx;
1725 struct hashmap_base_entry *e, *n;
1730 h_hash = bucket_hash(h, key);
1731 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1737 assert(other->type == h->type);
1739 other_hash = bucket_hash(other, key);
1740 idx = bucket_scan(other, other_hash, key);
1744 e = bucket_at(other, idx);
1746 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1748 if (h->type != HASHMAP_TYPE_SET)
1749 ((struct plain_hashmap_entry*) n)->value =
1750 ((struct plain_hashmap_entry*) e)->value;
1751 r = hashmap_put_boldly(h, h_hash, &swap, true);
1755 remove_entry(other, idx);
1759 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1765 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1770 case HASHMAP_TYPE_PLAIN:
1771 case HASHMAP_TYPE_ORDERED:
1772 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1774 case HASHMAP_TYPE_SET:
1775 r = set_merge((Set*)copy, (Set*)h);
1778 assert_not_reached("Unknown hashmap type");
1782 internal_hashmap_free(copy);
1789 char **internal_hashmap_get_strv(HashmapBase *h) {
1794 sv = new(char*, n_entries(h)+1);
1799 HASHMAP_FOREACH_IDX(idx, h, i)
1800 sv[n++] = entry_value(h, bucket_at(h, idx));
1806 /// UNNEEDED by elogind
1808 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1809 struct ordered_hashmap_entry *e;
1815 hash = bucket_hash(h, key);
1816 idx = bucket_scan(h, hash, key);
1820 e = ordered_bucket_at(h, idx);
1821 if (e->iterate_next == IDX_NIL)
1823 return ordered_bucket_at(h, e->iterate_next)->p.value;
1826 int set_consume(Set *s, void *value) {
1829 r = set_put(s, value);
1836 int set_put_strdup(Set *s, const char *p) {
1847 r = set_consume(s, c);
1854 /// UNNEEDED by elogind
1856 int set_put_strdupv(Set *s, char **l) {
1860 STRV_FOREACH(i, l) {
1861 r = set_put_strdup(s, *i);