1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
7 Copyright 2014 Michal Schmidt
9 systemd is free software; you can redistribute it and/or modify it
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
12 (at your option) any later version.
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
28 #include "alloc-util.h"
32 #include "process-util.h"
33 #include "random-util.h"
35 #include "siphash24.h"
39 #ifdef ENABLE_DEBUG_HASHMAP
45 * Implementation of hashmaps.
47 * - uses less RAM compared to closed addressing (chaining), because
48 * our entries are small (especially in Sets, which tend to contain
49 * the majority of entries in systemd).
50 * Collision resolution: Robin Hood
51 * - tends to equalize displacement of entries from their optimal buckets.
52 * Probe sequence: linear
53 * - though theoretically worse than random probing/uniform hashing/double
54 * hashing, it is good for cache locality.
57 * Celis, P. 1986. Robin Hood Hashing.
58 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
59 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60 * - The results are derived for random probing. Suggests deletion with
61 * tombstones and two mean-centered search methods. None of that works
62 * well for linear probing.
64 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
65 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
66 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
67 * http://www.math.uu.se/~svante/papers/sj157.pdf
68 * - Applies to Robin Hood with linear probing. Contains remarks on
69 * the unsuitability of mean-centered search with linear probing.
71 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
72 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
73 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
74 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
75 * in a successful search), and Janson writes about displacement. C = d + 1.
77 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
78 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
79 * - Explanation of backward shift deletion with pictures.
81 * Khuong, P. 2013. The Other Robin Hood Hashing.
82 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
83 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
87 * XXX Ideas for improvement:
88 * For unordered hashmaps, randomize iteration order, similarly to Perl:
89 * http://blog.booking.com/hardening-perls-hash-function.html
92 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
93 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
94 #define INV_KEEP_FREE 5U
96 /* Fields common to entries of all hashmap/set types */
97 struct hashmap_base_entry {
101 /* Entry types for specific hashmap/set types
102 * hashmap_base_entry must be at the beginning of each entry struct. */
104 struct plain_hashmap_entry {
105 struct hashmap_base_entry b;
109 struct ordered_hashmap_entry {
110 struct plain_hashmap_entry p;
111 unsigned iterate_next, iterate_previous;
115 struct hashmap_base_entry b;
118 /* In several functions it is advantageous to have the hash table extended
119 * virtually by a couple of additional buckets. We reserve special index values
120 * for these "swap" buckets. */
121 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
122 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
123 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
124 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
126 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
127 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
129 assert_cc(IDX_FIRST == _IDX_SWAP_END);
130 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
132 /* Storage space for the "swap" buckets.
133 * All entry types can fit into a ordered_hashmap_entry. */
134 struct swap_entries {
135 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
138 /* Distance from Initial Bucket */
139 typedef uint8_t dib_raw_t;
140 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
141 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
142 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
143 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
145 #define DIB_FREE UINT_MAX
147 #ifdef ENABLE_DEBUG_HASHMAP
148 struct hashmap_debug_info {
149 LIST_FIELDS(struct hashmap_debug_info, debug_list);
150 unsigned max_entries; /* high watermark of n_entries */
152 /* who allocated this hashmap */
157 /* fields to detect modification while iterating */
158 unsigned put_count; /* counts puts into the hashmap */
159 unsigned rem_count; /* counts removals from hashmap */
160 unsigned last_rem_idx; /* remembers last removal index */
163 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
164 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
165 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
167 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
169 #else /* !ENABLE_DEBUG_HASHMAP */
170 #define HASHMAP_DEBUG_FIELDS
171 #endif /* ENABLE_DEBUG_HASHMAP */
175 HASHMAP_TYPE_ORDERED,
180 struct _packed_ indirect_storage {
181 char *storage; /* where buckets and DIBs are stored */
182 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
184 unsigned n_entries; /* number of stored entries */
185 unsigned n_buckets; /* number of buckets */
187 unsigned idx_lowest_entry; /* Index below which all buckets are free.
188 Makes "while(hashmap_steal_first())" loops
189 O(n) instead of O(n^2) for unordered hashmaps. */
190 uint8_t _pad[3]; /* padding for the whole HashmapBase */
191 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
194 struct direct_storage {
195 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
196 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
197 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
198 char storage[sizeof(struct indirect_storage)];
201 #define DIRECT_BUCKETS(entry_t) \
202 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
204 /* We should be able to store at least one entry directly. */
205 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
207 /* We have 3 bits for n_direct_entries. */
208 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
210 /* Hashmaps with directly stored entries all use this shared hash key.
211 * It's no big deal if the key is guessed, because there can be only
212 * a handful of directly stored entries in a hashmap. When a hashmap
213 * outgrows direct storage, it gets its own key for indirect storage. */
214 static uint8_t shared_hash_key[HASH_KEY_SIZE];
215 static bool shared_hash_key_initialized;
217 /* Fields that all hashmap/set types must have */
219 const struct hash_ops *hash_ops; /* hash and compare ops to use */
222 struct indirect_storage indirect; /* if has_indirect */
223 struct direct_storage direct; /* if !has_indirect */
226 enum HashmapType type:2; /* HASHMAP_TYPE_* */
227 bool has_indirect:1; /* whether indirect storage is used */
228 unsigned n_direct_entries:3; /* Number of entries in direct storage.
229 * Only valid if !has_indirect. */
230 bool from_pool:1; /* whether was allocated from mempool */
231 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
234 /* Specific hash types
235 * HashmapBase must be at the beginning of each hashmap struct. */
238 struct HashmapBase b;
241 struct OrderedHashmap {
242 struct HashmapBase b;
243 unsigned iterate_list_head, iterate_list_tail;
247 struct HashmapBase b;
250 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
251 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
252 /* No need for a separate Set pool */
253 assert_cc(sizeof(Hashmap) == sizeof(Set));
255 struct hashmap_type_info {
258 struct mempool *mempool;
259 unsigned n_direct_buckets;
262 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
263 [HASHMAP_TYPE_PLAIN] = {
264 .head_size = sizeof(Hashmap),
265 .entry_size = sizeof(struct plain_hashmap_entry),
266 .mempool = &hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
269 [HASHMAP_TYPE_ORDERED] = {
270 .head_size = sizeof(OrderedHashmap),
271 .entry_size = sizeof(struct ordered_hashmap_entry),
272 .mempool = &ordered_hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
275 [HASHMAP_TYPE_SET] = {
276 .head_size = sizeof(Set),
277 .entry_size = sizeof(struct set_entry),
278 .mempool = &hashmap_pool,
279 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
283 static unsigned n_buckets(HashmapBase *h) {
284 return h->has_indirect ? h->indirect.n_buckets
285 : hashmap_type_info[h->type].n_direct_buckets;
288 static unsigned n_entries(HashmapBase *h) {
289 return h->has_indirect ? h->indirect.n_entries
290 : h->n_direct_entries;
293 static void n_entries_inc(HashmapBase *h) {
295 h->indirect.n_entries++;
297 h->n_direct_entries++;
300 static void n_entries_dec(HashmapBase *h) {
302 h->indirect.n_entries--;
304 h->n_direct_entries--;
307 static char *storage_ptr(HashmapBase *h) {
308 return h->has_indirect ? h->indirect.storage
312 static uint8_t *hash_key(HashmapBase *h) {
313 return h->has_indirect ? h->indirect.hash_key
317 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
318 struct siphash state;
321 siphash24_init(&state, hash_key(h));
323 h->hash_ops->hash(p, &state);
325 hash = siphash24_finalize(&state);
327 return (unsigned) (hash % n_buckets(h));
329 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
331 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
332 static uint8_t current[HASH_KEY_SIZE];
333 static bool current_initialized = false;
335 /* Returns a hash function key to use. In order to keep things
336 * fast we will not generate a new key each time we allocate a
337 * new hash table. Instead, we'll just reuse the most recently
338 * generated one, except if we never generated one or when we
339 * are rehashing an entire hash table because we reached a
342 if (!current_initialized || !reuse_is_ok) {
343 random_bytes(current, sizeof(current));
344 current_initialized = true;
347 memcpy(hash_key, current, sizeof(current));
350 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
351 return (struct hashmap_base_entry*)
352 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
355 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
356 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
359 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
360 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
363 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
364 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
367 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
368 return &swap->e[idx - _IDX_SWAP_BEGIN];
371 /* Returns a pointer to the bucket at index idx.
372 * Understands real indexes and swap indexes, hence "_virtual". */
373 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
375 if (idx < _IDX_SWAP_BEGIN)
376 return bucket_at(h, idx);
378 if (idx < _IDX_SWAP_END)
379 return &bucket_at_swap(swap, idx)->p.b;
381 assert_not_reached("Invalid index");
384 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
386 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
389 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
390 return idx >= from ? idx - from
391 : n_buckets(h) + idx - from;
394 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
395 unsigned initial_bucket;
397 if (raw_dib == DIB_RAW_FREE)
400 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
404 * Having an overflow DIB value is very unlikely. The hash function
405 * would have to be bad. For example, in a table of size 2^24 filled
406 * to load factor 0.9 the maximum observed DIB is only about 60.
407 * In theory (assuming I used Maxima correctly), for an infinite size
408 * hash table with load factor 0.8 the probability of a given entry
409 * having DIB > 40 is 1.9e-8.
410 * This returns the correct DIB value by recomputing the hash value in
411 * the unlikely case. XXX Hitting this case could be a hint to rehash.
413 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
414 return bucket_distance(h, idx, initial_bucket);
417 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
418 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
421 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
424 dibs = dib_raw_ptr(h);
426 for ( ; idx < n_buckets(h); idx++)
427 if (dibs[idx] != DIB_RAW_FREE)
433 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
434 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
435 bucket_set_dib(h, idx, DIB_FREE);
438 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
439 unsigned from, unsigned to) {
440 struct hashmap_base_entry *e_from, *e_to;
444 e_from = bucket_at_virtual(h, swap, from);
445 e_to = bucket_at_virtual(h, swap, to);
447 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
449 if (h->type == HASHMAP_TYPE_ORDERED) {
450 OrderedHashmap *lh = (OrderedHashmap*) h;
451 struct ordered_hashmap_entry *le, *le_to;
453 le_to = (struct ordered_hashmap_entry*) e_to;
455 if (le_to->iterate_next != IDX_NIL) {
456 le = (struct ordered_hashmap_entry*)
457 bucket_at_virtual(h, swap, le_to->iterate_next);
458 le->iterate_previous = to;
461 if (le_to->iterate_previous != IDX_NIL) {
462 le = (struct ordered_hashmap_entry*)
463 bucket_at_virtual(h, swap, le_to->iterate_previous);
464 le->iterate_next = to;
467 if (lh->iterate_list_head == from)
468 lh->iterate_list_head = to;
469 if (lh->iterate_list_tail == from)
470 lh->iterate_list_tail = to;
474 static unsigned next_idx(HashmapBase *h, unsigned idx) {
475 return (idx + 1U) % n_buckets(h);
478 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
479 return (n_buckets(h) + idx - 1U) % n_buckets(h);
482 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
485 case HASHMAP_TYPE_PLAIN:
486 case HASHMAP_TYPE_ORDERED:
487 return ((struct plain_hashmap_entry*)e)->value;
489 case HASHMAP_TYPE_SET:
490 return (void*) e->key;
493 assert_not_reached("Unknown hashmap type");
497 static void base_remove_entry(HashmapBase *h, unsigned idx) {
498 unsigned left, right, prev, dib;
499 dib_raw_t raw_dib, *dibs;
501 dibs = dib_raw_ptr(h);
502 assert(dibs[idx] != DIB_RAW_FREE);
504 #ifdef ENABLE_DEBUG_HASHMAP
505 h->debug.rem_count++;
506 h->debug.last_rem_idx = idx;
510 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
511 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
512 raw_dib = dibs[right];
513 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
516 /* The buckets are not supposed to be all occupied and with DIB > 0.
517 * That would mean we could make everyone better off by shifting them
518 * backward. This scenario is impossible. */
519 assert(left != right);
522 if (h->type == HASHMAP_TYPE_ORDERED) {
523 OrderedHashmap *lh = (OrderedHashmap*) h;
524 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
526 if (le->iterate_next != IDX_NIL)
527 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
529 lh->iterate_list_tail = le->iterate_previous;
531 if (le->iterate_previous != IDX_NIL)
532 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
534 lh->iterate_list_head = le->iterate_next;
537 /* Now shift all buckets in the interval (left, right) one step backwards */
538 for (prev = left, left = next_idx(h, left); left != right;
539 prev = left, left = next_idx(h, left)) {
540 dib = bucket_calculate_dib(h, left, dibs[left]);
542 bucket_move_entry(h, NULL, left, prev);
543 bucket_set_dib(h, prev, dib - 1);
546 bucket_mark_free(h, prev);
549 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
551 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
552 struct ordered_hashmap_entry *e;
558 if (i->idx == IDX_NIL)
561 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
564 if (i->idx == IDX_FIRST) {
565 idx = h->iterate_list_head;
566 e = ordered_bucket_at(h, idx);
569 e = ordered_bucket_at(h, idx);
571 * We allow removing the current entry while iterating, but removal may cause
572 * a backward shift. The next entry may thus move one bucket to the left.
573 * To detect when it happens, we remember the key pointer of the entry we were
574 * going to iterate next. If it does not match, there was a backward shift.
576 if (e->p.b.key != i->next_key) {
577 idx = prev_idx(HASHMAP_BASE(h), idx);
578 e = ordered_bucket_at(h, idx);
580 assert(e->p.b.key == i->next_key);
583 #ifdef ENABLE_DEBUG_HASHMAP
587 if (e->iterate_next != IDX_NIL) {
588 struct ordered_hashmap_entry *n;
589 i->idx = e->iterate_next;
590 n = ordered_bucket_at(h, i->idx);
591 i->next_key = n->p.b.key;
602 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
608 if (i->idx == IDX_NIL)
611 if (i->idx == IDX_FIRST) {
612 /* fast forward to the first occupied bucket */
613 if (h->has_indirect) {
614 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
615 h->indirect.idx_lowest_entry = i->idx;
617 i->idx = skip_free_buckets(h, 0);
619 if (i->idx == IDX_NIL)
622 struct hashmap_base_entry *e;
626 e = bucket_at(h, i->idx);
628 * We allow removing the current entry while iterating, but removal may cause
629 * a backward shift. The next entry may thus move one bucket to the left.
630 * To detect when it happens, we remember the key pointer of the entry we were
631 * going to iterate next. If it does not match, there was a backward shift.
633 if (e->key != i->next_key)
634 e = bucket_at(h, --i->idx);
636 assert(e->key == i->next_key);
640 #ifdef ENABLE_DEBUG_HASHMAP
644 i->idx = skip_free_buckets(h, i->idx + 1);
645 if (i->idx != IDX_NIL)
646 i->next_key = bucket_at(h, i->idx)->key;
657 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
663 #ifdef ENABLE_DEBUG_HASHMAP
664 if (i->idx == IDX_FIRST) {
665 i->put_count = h->debug.put_count;
666 i->rem_count = h->debug.rem_count;
668 /* While iterating, must not add any new entries */
669 assert(i->put_count == h->debug.put_count);
670 /* ... or remove entries other than the current one */
671 assert(i->rem_count == h->debug.rem_count ||
672 (i->rem_count == h->debug.rem_count - 1 &&
673 i->prev_idx == h->debug.last_rem_idx));
674 /* Reset our removals counter */
675 i->rem_count = h->debug.rem_count;
679 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
680 : hashmap_iterate_in_internal_order(h, i);
683 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
684 struct hashmap_base_entry *e;
688 idx = hashmap_iterate_entry(h, i);
689 if (idx == IDX_NIL) {
698 e = bucket_at(h, idx);
699 data = entry_value(h, e);
708 bool set_iterate(Set *s, Iterator *i, void **value) {
709 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
712 #define HASHMAP_FOREACH_IDX(idx, h, i) \
713 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
715 (idx) = hashmap_iterate_entry((h), &(i)))
717 static void reset_direct_storage(HashmapBase *h) {
718 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
721 assert(!h->has_indirect);
723 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
724 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
727 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
729 const struct hashmap_type_info *hi = &hashmap_type_info[type];
732 use_pool = is_main_thread();
734 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
740 h->from_pool = use_pool;
741 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
743 if (type == HASHMAP_TYPE_ORDERED) {
744 OrderedHashmap *lh = (OrderedHashmap*)h;
745 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
748 reset_direct_storage(h);
750 if (!shared_hash_key_initialized) {
751 random_bytes(shared_hash_key, sizeof(shared_hash_key));
752 shared_hash_key_initialized= true;
755 #ifdef ENABLE_DEBUG_HASHMAP
756 h->debug.func = func;
757 h->debug.file = file;
758 h->debug.line = line;
759 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
760 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
761 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
767 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
768 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
771 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
772 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
775 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
776 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
779 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
780 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
788 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
796 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
797 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
800 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
801 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
804 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
805 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
808 static void hashmap_free_no_clear(HashmapBase *h) {
809 assert(!h->has_indirect);
810 assert(!h->n_direct_entries);
812 #ifdef ENABLE_DEBUG_HASHMAP
813 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
814 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
815 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
819 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
824 HashmapBase *internal_hashmap_free(HashmapBase *h) {
826 /* Free the hashmap, but nothing in it */
829 internal_hashmap_clear(h);
830 hashmap_free_no_clear(h);
836 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
838 /* Free the hashmap and all data objects in it, but not the
842 internal_hashmap_clear_free(h);
843 hashmap_free_no_clear(h);
849 Hashmap *hashmap_free_free_free(Hashmap *h) {
851 /* Free the hashmap and all data and key objects in it */
854 hashmap_clear_free_free(h);
855 hashmap_free_no_clear(HASHMAP_BASE(h));
861 void internal_hashmap_clear(HashmapBase *h) {
865 if (h->has_indirect) {
866 free(h->indirect.storage);
867 h->has_indirect = false;
870 h->n_direct_entries = 0;
871 reset_direct_storage(h);
873 if (h->type == HASHMAP_TYPE_ORDERED) {
874 OrderedHashmap *lh = (OrderedHashmap*) h;
875 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
879 void internal_hashmap_clear_free(HashmapBase *h) {
885 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
886 idx = skip_free_buckets(h, idx + 1))
887 free(entry_value(h, bucket_at(h, idx)));
889 internal_hashmap_clear(h);
892 void hashmap_clear_free_free(Hashmap *h) {
898 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
899 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
900 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
901 free((void*)e->b.key);
905 internal_hashmap_clear(HASHMAP_BASE(h));
908 static int resize_buckets(HashmapBase *h, unsigned entries_add);
911 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
912 * Performs Robin Hood swaps as it goes. The entry to put must be placed
913 * by the caller into swap slot IDX_PUT.
914 * If used for in-place resizing, may leave a displaced entry in swap slot
915 * IDX_PUT. Caller must rehash it next.
916 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
919 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
920 struct swap_entries *swap) {
921 dib_raw_t raw_dib, *dibs;
922 unsigned dib, distance;
924 #ifdef ENABLE_DEBUG_HASHMAP
925 h->debug.put_count++;
928 dibs = dib_raw_ptr(h);
930 for (distance = 0; ; distance++) {
932 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
933 if (raw_dib == DIB_RAW_REHASH)
934 bucket_move_entry(h, swap, idx, IDX_TMP);
936 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
937 h->indirect.idx_lowest_entry = idx;
939 bucket_set_dib(h, idx, distance);
940 bucket_move_entry(h, swap, IDX_PUT, idx);
941 if (raw_dib == DIB_RAW_REHASH) {
942 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
949 dib = bucket_calculate_dib(h, idx, raw_dib);
951 if (dib < distance) {
952 /* Found a wealthier entry. Go Robin Hood! */
953 bucket_set_dib(h, idx, distance);
955 /* swap the entries */
956 bucket_move_entry(h, swap, idx, IDX_TMP);
957 bucket_move_entry(h, swap, IDX_PUT, idx);
958 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
963 idx = next_idx(h, idx);
968 * Puts an entry into a hashmap, boldly - no check whether key already exists.
969 * The caller must place the entry (only its key and value, not link indexes)
970 * in swap slot IDX_PUT.
971 * Caller must ensure: the key does not exist yet in the hashmap.
972 * that resize is not needed if !may_resize.
973 * Returns: 1 if entry was put successfully.
974 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
975 * Cannot return -ENOMEM if !may_resize.
977 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
978 struct swap_entries *swap, bool may_resize) {
979 struct ordered_hashmap_entry *new_entry;
982 assert(idx < n_buckets(h));
984 new_entry = bucket_at_swap(swap, IDX_PUT);
987 r = resize_buckets(h, 1);
991 idx = bucket_hash(h, new_entry->p.b.key);
993 assert(n_entries(h) < n_buckets(h));
995 if (h->type == HASHMAP_TYPE_ORDERED) {
996 OrderedHashmap *lh = (OrderedHashmap*) h;
998 new_entry->iterate_next = IDX_NIL;
999 new_entry->iterate_previous = lh->iterate_list_tail;
1001 if (lh->iterate_list_tail != IDX_NIL) {
1002 struct ordered_hashmap_entry *old_tail;
1004 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1005 assert(old_tail->iterate_next == IDX_NIL);
1006 old_tail->iterate_next = IDX_PUT;
1009 lh->iterate_list_tail = IDX_PUT;
1010 if (lh->iterate_list_head == IDX_NIL)
1011 lh->iterate_list_head = IDX_PUT;
1014 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1017 #ifdef ENABLE_DEBUG_HASHMAP
1018 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1023 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1024 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1027 * Returns 0 if resize is not needed.
1028 * 1 if successfully resized.
1029 * -ENOMEM on allocation failure.
1031 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1032 struct swap_entries swap;
1034 dib_raw_t *old_dibs, *new_dibs;
1035 const struct hashmap_type_info *hi;
1036 unsigned idx, optimal_idx;
1037 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1043 hi = &hashmap_type_info[h->type];
1044 new_n_entries = n_entries(h) + entries_add;
1047 if (_unlikely_(new_n_entries < entries_add))
1050 /* For direct storage we allow 100% load, because it's tiny. */
1051 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1055 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1056 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1058 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1060 if (_unlikely_(new_n_buckets < new_n_entries))
1063 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1066 old_n_buckets = n_buckets(h);
1068 if (_likely_(new_n_buckets <= old_n_buckets))
1071 new_shift = log2u_round_up(MAX(
1072 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1073 2 * sizeof(struct direct_storage)));
1075 /* Realloc storage (buckets and DIB array). */
1076 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1081 /* Must upgrade direct to indirect storage. */
1082 if (!h->has_indirect) {
1083 memcpy(new_storage, h->direct.storage,
1084 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1085 h->indirect.n_entries = h->n_direct_entries;
1086 h->indirect.idx_lowest_entry = 0;
1087 h->n_direct_entries = 0;
1090 /* Get a new hash key. If we've just upgraded to indirect storage,
1091 * allow reusing a previously generated key. It's still a different key
1092 * from the shared one that we used for direct storage. */
1093 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1095 h->has_indirect = true;
1096 h->indirect.storage = new_storage;
1097 h->indirect.n_buckets = (1U << new_shift) /
1098 (hi->entry_size + sizeof(dib_raw_t));
1100 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1101 new_dibs = dib_raw_ptr(h);
1104 * Move the DIB array to the new place, replacing valid DIB values with
1105 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1106 * Note: Overlap is not possible, because we have at least doubled the
1107 * number of buckets and dib_raw_t is smaller than any entry type.
1109 for (idx = 0; idx < old_n_buckets; idx++) {
1110 assert(old_dibs[idx] != DIB_RAW_REHASH);
1111 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1115 /* Zero the area of newly added entries (including the old DIB area) */
1116 memzero(bucket_at(h, old_n_buckets),
1117 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1119 /* The upper half of the new DIB array needs initialization */
1120 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1121 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1123 /* Rehash entries that need it */
1125 for (idx = 0; idx < old_n_buckets; idx++) {
1126 if (new_dibs[idx] != DIB_RAW_REHASH)
1129 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1132 * Not much to do if by luck the entry hashes to its current
1133 * location. Just set its DIB.
1135 if (optimal_idx == idx) {
1141 new_dibs[idx] = DIB_RAW_FREE;
1142 bucket_move_entry(h, &swap, idx, IDX_PUT);
1143 /* bucket_move_entry does not clear the source */
1144 memzero(bucket_at(h, idx), hi->entry_size);
1148 * Find the new bucket for the current entry. This may make
1149 * another entry homeless and load it into IDX_PUT.
1151 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1154 /* Did the current entry displace another one? */
1156 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1157 } while (rehash_next);
1160 assert(n_rehashed == n_entries(h));
1166 * Finds an entry with a matching key
1167 * Returns: index of the found entry, or IDX_NIL if not found.
1169 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1170 struct hashmap_base_entry *e;
1171 unsigned dib, distance;
1172 dib_raw_t *dibs = dib_raw_ptr(h);
1174 assert(idx < n_buckets(h));
1176 for (distance = 0; ; distance++) {
1177 if (dibs[idx] == DIB_RAW_FREE)
1180 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1184 if (dib == distance) {
1185 e = bucket_at(h, idx);
1186 if (h->hash_ops->compare(e->key, key) == 0)
1190 idx = next_idx(h, idx);
1193 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1195 int hashmap_put(Hashmap *h, const void *key, void *value) {
1196 struct swap_entries swap;
1197 struct plain_hashmap_entry *e;
1202 hash = bucket_hash(h, key);
1203 idx = bucket_scan(h, hash, key);
1204 if (idx != IDX_NIL) {
1205 e = plain_bucket_at(h, idx);
1206 if (e->value == value)
1211 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1214 return hashmap_put_boldly(h, hash, &swap, true);
1217 int set_put(Set *s, const void *key) {
1218 struct swap_entries swap;
1219 struct hashmap_base_entry *e;
1224 hash = bucket_hash(s, key);
1225 idx = bucket_scan(s, hash, key);
1229 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1231 return hashmap_put_boldly(s, hash, &swap, true);
1234 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1235 struct swap_entries swap;
1236 struct plain_hashmap_entry *e;
1241 hash = bucket_hash(h, key);
1242 idx = bucket_scan(h, hash, key);
1243 if (idx != IDX_NIL) {
1244 e = plain_bucket_at(h, idx);
1245 #ifdef ENABLE_DEBUG_HASHMAP
1246 /* Although the key is equal, the key pointer may have changed,
1247 * and this would break our assumption for iterating. So count
1248 * this operation as incompatible with iteration. */
1249 if (e->b.key != key) {
1250 h->b.debug.put_count++;
1251 h->b.debug.rem_count++;
1252 h->b.debug.last_rem_idx = idx;
1260 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1263 return hashmap_put_boldly(h, hash, &swap, true);
1266 int hashmap_update(Hashmap *h, const void *key, void *value) {
1267 struct plain_hashmap_entry *e;
1272 hash = bucket_hash(h, key);
1273 idx = bucket_scan(h, hash, key);
1277 e = plain_bucket_at(h, idx);
1282 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1283 struct hashmap_base_entry *e;
1289 hash = bucket_hash(h, key);
1290 idx = bucket_scan(h, hash, key);
1294 e = bucket_at(h, idx);
1295 return entry_value(h, e);
1298 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1299 struct plain_hashmap_entry *e;
1305 hash = bucket_hash(h, key);
1306 idx = bucket_scan(h, hash, key);
1310 e = plain_bucket_at(h, idx);
1312 *key2 = (void*) e->b.key;
1317 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1323 hash = bucket_hash(h, key);
1324 return bucket_scan(h, hash, key) != IDX_NIL;
1327 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1328 struct hashmap_base_entry *e;
1335 hash = bucket_hash(h, key);
1336 idx = bucket_scan(h, hash, key);
1340 e = bucket_at(h, idx);
1341 data = entry_value(h, e);
1342 remove_entry(h, idx);
1347 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1348 struct plain_hashmap_entry *e;
1358 hash = bucket_hash(h, key);
1359 idx = bucket_scan(h, hash, key);
1360 if (idx == IDX_NIL) {
1366 e = plain_bucket_at(h, idx);
1369 *rkey = (void*) e->b.key;
1371 remove_entry(h, idx);
1376 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1377 struct swap_entries swap;
1378 struct plain_hashmap_entry *e;
1379 unsigned old_hash, new_hash, idx;
1384 old_hash = bucket_hash(h, old_key);
1385 idx = bucket_scan(h, old_hash, old_key);
1389 new_hash = bucket_hash(h, new_key);
1390 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1393 remove_entry(h, idx);
1395 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1398 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1403 #if 0 /// UNNEEDED by elogind
1404 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1405 struct swap_entries swap;
1406 struct hashmap_base_entry *e;
1407 unsigned old_hash, new_hash, idx;
1412 old_hash = bucket_hash(s, old_key);
1413 idx = bucket_scan(s, old_hash, old_key);
1417 new_hash = bucket_hash(s, new_key);
1418 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1421 remove_entry(s, idx);
1423 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1425 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1431 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1432 struct swap_entries swap;
1433 struct plain_hashmap_entry *e;
1434 unsigned old_hash, new_hash, idx_old, idx_new;
1439 old_hash = bucket_hash(h, old_key);
1440 idx_old = bucket_scan(h, old_hash, old_key);
1441 if (idx_old == IDX_NIL)
1444 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1446 new_hash = bucket_hash(h, new_key);
1447 idx_new = bucket_scan(h, new_hash, new_key);
1448 if (idx_new != IDX_NIL)
1449 if (idx_old != idx_new) {
1450 remove_entry(h, idx_new);
1451 /* Compensate for a possible backward shift. */
1452 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1453 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1454 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1457 remove_entry(h, idx_old);
1459 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1462 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1467 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1468 struct plain_hashmap_entry *e;
1474 hash = bucket_hash(h, key);
1475 idx = bucket_scan(h, hash, key);
1479 e = plain_bucket_at(h, idx);
1480 if (e->value != value)
1483 remove_entry(h, idx);
1488 static unsigned find_first_entry(HashmapBase *h) {
1489 Iterator i = ITERATOR_FIRST;
1491 if (!h || !n_entries(h))
1494 return hashmap_iterate_entry(h, &i);
1497 void *internal_hashmap_first(HashmapBase *h) {
1500 idx = find_first_entry(h);
1504 return entry_value(h, bucket_at(h, idx));
1507 void *internal_hashmap_first_key(HashmapBase *h) {
1508 struct hashmap_base_entry *e;
1511 idx = find_first_entry(h);
1515 e = bucket_at(h, idx);
1516 return (void*) e->key;
1519 void *internal_hashmap_steal_first(HashmapBase *h) {
1520 struct hashmap_base_entry *e;
1524 idx = find_first_entry(h);
1528 e = bucket_at(h, idx);
1529 data = entry_value(h, e);
1530 remove_entry(h, idx);
1535 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1536 struct hashmap_base_entry *e;
1540 idx = find_first_entry(h);
1544 e = bucket_at(h, idx);
1545 key = (void*) e->key;
1546 remove_entry(h, idx);
1551 unsigned internal_hashmap_size(HashmapBase *h) {
1556 return n_entries(h);
1559 unsigned internal_hashmap_buckets(HashmapBase *h) {
1564 return n_buckets(h);
1567 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1573 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1574 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1577 r = hashmap_put(h, pe->b.key, pe->value);
1578 if (r < 0 && r != -EEXIST)
1585 int set_merge(Set *s, Set *other) {
1591 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1592 struct set_entry *se = set_bucket_at(other, idx);
1595 r = set_put(s, se->b.key);
1603 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1608 r = resize_buckets(h, entries_add);
1616 * The same as hashmap_merge(), but every new item from other is moved to h.
1617 * Keys already in h are skipped and stay in other.
1618 * Returns: 0 on success.
1619 * -ENOMEM on alloc failure, in which case no move has been done.
1621 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1622 struct swap_entries swap;
1623 struct hashmap_base_entry *e, *n;
1633 assert(other->type == h->type);
1636 * This reserves buckets for the worst case, where none of other's
1637 * entries are yet present in h. This is preferable to risking
1638 * an allocation failure in the middle of the moving and having to
1639 * rollback or return a partial result.
1641 r = resize_buckets(h, n_entries(other));
1645 HASHMAP_FOREACH_IDX(idx, other, i) {
1648 e = bucket_at(other, idx);
1649 h_hash = bucket_hash(h, e->key);
1650 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1653 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1655 if (h->type != HASHMAP_TYPE_SET)
1656 ((struct plain_hashmap_entry*) n)->value =
1657 ((struct plain_hashmap_entry*) e)->value;
1658 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1660 remove_entry(other, idx);
1666 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1667 struct swap_entries swap;
1668 unsigned h_hash, other_hash, idx;
1669 struct hashmap_base_entry *e, *n;
1674 h_hash = bucket_hash(h, key);
1675 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1681 assert(other->type == h->type);
1683 other_hash = bucket_hash(other, key);
1684 idx = bucket_scan(other, other_hash, key);
1688 e = bucket_at(other, idx);
1690 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1692 if (h->type != HASHMAP_TYPE_SET)
1693 ((struct plain_hashmap_entry*) n)->value =
1694 ((struct plain_hashmap_entry*) e)->value;
1695 r = hashmap_put_boldly(h, h_hash, &swap, true);
1699 remove_entry(other, idx);
1703 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1709 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1714 case HASHMAP_TYPE_PLAIN:
1715 case HASHMAP_TYPE_ORDERED:
1716 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1718 case HASHMAP_TYPE_SET:
1719 r = set_merge((Set*)copy, (Set*)h);
1722 assert_not_reached("Unknown hashmap type");
1726 internal_hashmap_free(copy);
1733 char **internal_hashmap_get_strv(HashmapBase *h) {
1738 sv = new(char*, n_entries(h)+1);
1743 HASHMAP_FOREACH_IDX(idx, h, i)
1744 sv[n++] = entry_value(h, bucket_at(h, idx));
1750 #if 0 /// UNNEEDED by elogind
1751 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1752 struct ordered_hashmap_entry *e;
1758 hash = bucket_hash(h, key);
1759 idx = bucket_scan(h, hash, key);
1763 e = ordered_bucket_at(h, idx);
1764 if (e->iterate_next == IDX_NIL)
1766 return ordered_bucket_at(h, e->iterate_next)->p.value;
1770 int set_consume(Set *s, void *value) {
1773 r = set_put(s, value);
1780 int set_put_strdup(Set *s, const char *p) {
1791 r = set_consume(s, c);
1798 #if 0 /// UNNEEDED by elogind
1799 int set_put_strdupv(Set *s, char **l) {
1803 STRV_FOREACH(i, l) {
1804 r = set_put_strdup(s, *i);