2 This file is part of systemd.
4 Copyright 2010 Lennart Poettering
5 Copyright 2014 Michal Schmidt
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
26 #include "alloc-util.h"
30 #include "process-util.h"
31 #include "random-util.h"
33 #include "siphash24.h"
37 #ifdef ENABLE_DEBUG_HASHMAP
43 * Implementation of hashmaps.
45 * - uses less RAM compared to closed addressing (chaining), because
46 * our entries are small (especially in Sets, which tend to contain
47 * the majority of entries in systemd).
48 * Collision resolution: Robin Hood
49 * - tends to equalize displacement of entries from their optimal buckets.
50 * Probe sequence: linear
51 * - though theoretically worse than random probing/uniform hashing/double
52 * hashing, it is good for cache locality.
55 * Celis, P. 1986. Robin Hood Hashing.
56 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
57 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
58 * - The results are derived for random probing. Suggests deletion with
59 * tombstones and two mean-centered search methods. None of that works
60 * well for linear probing.
62 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
63 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
64 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
65 * http://www.math.uu.se/~svante/papers/sj157.pdf
66 * - Applies to Robin Hood with linear probing. Contains remarks on
67 * the unsuitability of mean-centered search with linear probing.
69 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
70 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
71 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
72 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
73 * in a successful search), and Janson writes about displacement. C = d + 1.
75 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
76 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
77 * - Explanation of backward shift deletion with pictures.
79 * Khuong, P. 2013. The Other Robin Hood Hashing.
80 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
81 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
85 * XXX Ideas for improvement:
86 * For unordered hashmaps, randomize iteration order, similarly to Perl:
87 * http://blog.booking.com/hardening-perls-hash-function.html
90 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
91 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
92 #define INV_KEEP_FREE 5U
94 /* Fields common to entries of all hashmap/set types */
95 struct hashmap_base_entry {
99 /* Entry types for specific hashmap/set types
100 * hashmap_base_entry must be at the beginning of each entry struct. */
102 struct plain_hashmap_entry {
103 struct hashmap_base_entry b;
107 struct ordered_hashmap_entry {
108 struct plain_hashmap_entry p;
109 unsigned iterate_next, iterate_previous;
113 struct hashmap_base_entry b;
116 /* In several functions it is advantageous to have the hash table extended
117 * virtually by a couple of additional buckets. We reserve special index values
118 * for these "swap" buckets. */
119 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
120 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
121 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
122 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
124 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
125 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
127 assert_cc(IDX_FIRST == _IDX_SWAP_END);
128 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
130 /* Storage space for the "swap" buckets.
131 * All entry types can fit into a ordered_hashmap_entry. */
132 struct swap_entries {
133 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
136 /* Distance from Initial Bucket */
137 typedef uint8_t dib_raw_t;
138 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
139 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
140 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
141 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
143 #define DIB_FREE UINT_MAX
145 #ifdef ENABLE_DEBUG_HASHMAP
146 struct hashmap_debug_info {
147 LIST_FIELDS(struct hashmap_debug_info, debug_list);
148 unsigned max_entries; /* high watermark of n_entries */
150 /* who allocated this hashmap */
155 /* fields to detect modification while iterating */
156 unsigned put_count; /* counts puts into the hashmap */
157 unsigned rem_count; /* counts removals from hashmap */
158 unsigned last_rem_idx; /* remembers last removal index */
161 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
162 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
163 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
165 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
167 #else /* !ENABLE_DEBUG_HASHMAP */
168 #define HASHMAP_DEBUG_FIELDS
169 #endif /* ENABLE_DEBUG_HASHMAP */
173 HASHMAP_TYPE_ORDERED,
178 struct _packed_ indirect_storage {
179 void *storage; /* where buckets and DIBs are stored */
180 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
182 unsigned n_entries; /* number of stored entries */
183 unsigned n_buckets; /* number of buckets */
185 unsigned idx_lowest_entry; /* Index below which all buckets are free.
186 Makes "while(hashmap_steal_first())" loops
187 O(n) instead of O(n^2) for unordered hashmaps. */
188 uint8_t _pad[3]; /* padding for the whole HashmapBase */
189 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
192 struct direct_storage {
193 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
194 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
195 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
196 uint8_t storage[sizeof(struct indirect_storage)];
199 #define DIRECT_BUCKETS(entry_t) \
200 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
202 /* We should be able to store at least one entry directly. */
203 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
205 /* We have 3 bits for n_direct_entries. */
206 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
208 /* Hashmaps with directly stored entries all use this shared hash key.
209 * It's no big deal if the key is guessed, because there can be only
210 * a handful of directly stored entries in a hashmap. When a hashmap
211 * outgrows direct storage, it gets its own key for indirect storage. */
212 static uint8_t shared_hash_key[HASH_KEY_SIZE];
213 static bool shared_hash_key_initialized;
215 /* Fields that all hashmap/set types must have */
217 const struct hash_ops *hash_ops; /* hash and compare ops to use */
220 struct indirect_storage indirect; /* if has_indirect */
221 struct direct_storage direct; /* if !has_indirect */
224 enum HashmapType type:2; /* HASHMAP_TYPE_* */
225 bool has_indirect:1; /* whether indirect storage is used */
226 unsigned n_direct_entries:3; /* Number of entries in direct storage.
227 * Only valid if !has_indirect. */
228 bool from_pool:1; /* whether was allocated from mempool */
229 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
232 /* Specific hash types
233 * HashmapBase must be at the beginning of each hashmap struct. */
236 struct HashmapBase b;
239 struct OrderedHashmap {
240 struct HashmapBase b;
241 unsigned iterate_list_head, iterate_list_tail;
245 struct HashmapBase b;
248 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
249 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
250 /* No need for a separate Set pool */
251 assert_cc(sizeof(Hashmap) == sizeof(Set));
253 struct hashmap_type_info {
256 struct mempool *mempool;
257 unsigned n_direct_buckets;
260 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
261 [HASHMAP_TYPE_PLAIN] = {
262 .head_size = sizeof(Hashmap),
263 .entry_size = sizeof(struct plain_hashmap_entry),
264 .mempool = &hashmap_pool,
265 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
267 [HASHMAP_TYPE_ORDERED] = {
268 .head_size = sizeof(OrderedHashmap),
269 .entry_size = sizeof(struct ordered_hashmap_entry),
270 .mempool = &ordered_hashmap_pool,
271 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
273 [HASHMAP_TYPE_SET] = {
274 .head_size = sizeof(Set),
275 .entry_size = sizeof(struct set_entry),
276 .mempool = &hashmap_pool,
277 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
281 static unsigned n_buckets(HashmapBase *h) {
282 return h->has_indirect ? h->indirect.n_buckets
283 : hashmap_type_info[h->type].n_direct_buckets;
286 static unsigned n_entries(HashmapBase *h) {
287 return h->has_indirect ? h->indirect.n_entries
288 : h->n_direct_entries;
291 static void n_entries_inc(HashmapBase *h) {
293 h->indirect.n_entries++;
295 h->n_direct_entries++;
298 static void n_entries_dec(HashmapBase *h) {
300 h->indirect.n_entries--;
302 h->n_direct_entries--;
305 static void *storage_ptr(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.storage
310 static uint8_t *hash_key(HashmapBase *h) {
311 return h->has_indirect ? h->indirect.hash_key
315 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
316 struct siphash state;
319 siphash24_init(&state, hash_key(h));
321 h->hash_ops->hash(p, &state);
323 hash = siphash24_finalize(&state);
325 return (unsigned) (hash % n_buckets(h));
327 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
329 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
330 static uint8_t current[HASH_KEY_SIZE];
331 static bool current_initialized = false;
333 /* Returns a hash function key to use. In order to keep things
334 * fast we will not generate a new key each time we allocate a
335 * new hash table. Instead, we'll just reuse the most recently
336 * generated one, except if we never generated one or when we
337 * are rehashing an entire hash table because we reached a
340 if (!current_initialized || !reuse_is_ok) {
341 random_bytes(current, sizeof(current));
342 current_initialized = true;
345 memcpy(hash_key, current, sizeof(current));
348 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
349 return (struct hashmap_base_entry*)
350 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
353 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
354 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
357 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
358 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
361 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
362 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
365 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
366 return &swap->e[idx - _IDX_SWAP_BEGIN];
369 /* Returns a pointer to the bucket at index idx.
370 * Understands real indexes and swap indexes, hence "_virtual". */
371 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
373 if (idx < _IDX_SWAP_BEGIN)
374 return bucket_at(h, idx);
376 if (idx < _IDX_SWAP_END)
377 return &bucket_at_swap(swap, idx)->p.b;
379 assert_not_reached("Invalid index");
382 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
384 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
387 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
388 return idx >= from ? idx - from
389 : n_buckets(h) + idx - from;
392 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
393 unsigned initial_bucket;
395 if (raw_dib == DIB_RAW_FREE)
398 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
402 * Having an overflow DIB value is very unlikely. The hash function
403 * would have to be bad. For example, in a table of size 2^24 filled
404 * to load factor 0.9 the maximum observed DIB is only about 60.
405 * In theory (assuming I used Maxima correctly), for an infinite size
406 * hash table with load factor 0.8 the probability of a given entry
407 * having DIB > 40 is 1.9e-8.
408 * This returns the correct DIB value by recomputing the hash value in
409 * the unlikely case. XXX Hitting this case could be a hint to rehash.
411 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
412 return bucket_distance(h, idx, initial_bucket);
415 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
416 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
419 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
422 dibs = dib_raw_ptr(h);
424 for ( ; idx < n_buckets(h); idx++)
425 if (dibs[idx] != DIB_RAW_FREE)
431 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
432 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
433 bucket_set_dib(h, idx, DIB_FREE);
436 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
437 unsigned from, unsigned to) {
438 struct hashmap_base_entry *e_from, *e_to;
442 e_from = bucket_at_virtual(h, swap, from);
443 e_to = bucket_at_virtual(h, swap, to);
445 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
447 if (h->type == HASHMAP_TYPE_ORDERED) {
448 OrderedHashmap *lh = (OrderedHashmap*) h;
449 struct ordered_hashmap_entry *le, *le_to;
451 le_to = (struct ordered_hashmap_entry*) e_to;
453 if (le_to->iterate_next != IDX_NIL) {
454 le = (struct ordered_hashmap_entry*)
455 bucket_at_virtual(h, swap, le_to->iterate_next);
456 le->iterate_previous = to;
459 if (le_to->iterate_previous != IDX_NIL) {
460 le = (struct ordered_hashmap_entry*)
461 bucket_at_virtual(h, swap, le_to->iterate_previous);
462 le->iterate_next = to;
465 if (lh->iterate_list_head == from)
466 lh->iterate_list_head = to;
467 if (lh->iterate_list_tail == from)
468 lh->iterate_list_tail = to;
472 static unsigned next_idx(HashmapBase *h, unsigned idx) {
473 return (idx + 1U) % n_buckets(h);
476 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
477 return (n_buckets(h) + idx - 1U) % n_buckets(h);
480 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
483 case HASHMAP_TYPE_PLAIN:
484 case HASHMAP_TYPE_ORDERED:
485 return ((struct plain_hashmap_entry*)e)->value;
487 case HASHMAP_TYPE_SET:
488 return (void*) e->key;
491 assert_not_reached("Unknown hashmap type");
495 static void base_remove_entry(HashmapBase *h, unsigned idx) {
496 unsigned left, right, prev, dib;
497 dib_raw_t raw_dib, *dibs;
499 dibs = dib_raw_ptr(h);
500 assert(dibs[idx] != DIB_RAW_FREE);
502 #ifdef ENABLE_DEBUG_HASHMAP
503 h->debug.rem_count++;
504 h->debug.last_rem_idx = idx;
508 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
509 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
510 raw_dib = dibs[right];
511 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
514 /* The buckets are not supposed to be all occupied and with DIB > 0.
515 * That would mean we could make everyone better off by shifting them
516 * backward. This scenario is impossible. */
517 assert(left != right);
520 if (h->type == HASHMAP_TYPE_ORDERED) {
521 OrderedHashmap *lh = (OrderedHashmap*) h;
522 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
524 if (le->iterate_next != IDX_NIL)
525 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
527 lh->iterate_list_tail = le->iterate_previous;
529 if (le->iterate_previous != IDX_NIL)
530 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
532 lh->iterate_list_head = le->iterate_next;
535 /* Now shift all buckets in the interval (left, right) one step backwards */
536 for (prev = left, left = next_idx(h, left); left != right;
537 prev = left, left = next_idx(h, left)) {
538 dib = bucket_calculate_dib(h, left, dibs[left]);
540 bucket_move_entry(h, NULL, left, prev);
541 bucket_set_dib(h, prev, dib - 1);
544 bucket_mark_free(h, prev);
547 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
549 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
550 struct ordered_hashmap_entry *e;
556 if (i->idx == IDX_NIL)
559 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
562 if (i->idx == IDX_FIRST) {
563 idx = h->iterate_list_head;
564 e = ordered_bucket_at(h, idx);
567 e = ordered_bucket_at(h, idx);
569 * We allow removing the current entry while iterating, but removal may cause
570 * a backward shift. The next entry may thus move one bucket to the left.
571 * To detect when it happens, we remember the key pointer of the entry we were
572 * going to iterate next. If it does not match, there was a backward shift.
574 if (e->p.b.key != i->next_key) {
575 idx = prev_idx(HASHMAP_BASE(h), idx);
576 e = ordered_bucket_at(h, idx);
578 assert(e->p.b.key == i->next_key);
581 #ifdef ENABLE_DEBUG_HASHMAP
585 if (e->iterate_next != IDX_NIL) {
586 struct ordered_hashmap_entry *n;
587 i->idx = e->iterate_next;
588 n = ordered_bucket_at(h, i->idx);
589 i->next_key = n->p.b.key;
600 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
606 if (i->idx == IDX_NIL)
609 if (i->idx == IDX_FIRST) {
610 /* fast forward to the first occupied bucket */
611 if (h->has_indirect) {
612 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
613 h->indirect.idx_lowest_entry = i->idx;
615 i->idx = skip_free_buckets(h, 0);
617 if (i->idx == IDX_NIL)
620 struct hashmap_base_entry *e;
624 e = bucket_at(h, i->idx);
626 * We allow removing the current entry while iterating, but removal may cause
627 * a backward shift. The next entry may thus move one bucket to the left.
628 * To detect when it happens, we remember the key pointer of the entry we were
629 * going to iterate next. If it does not match, there was a backward shift.
631 if (e->key != i->next_key)
632 e = bucket_at(h, --i->idx);
634 assert(e->key == i->next_key);
638 #ifdef ENABLE_DEBUG_HASHMAP
642 i->idx = skip_free_buckets(h, i->idx + 1);
643 if (i->idx != IDX_NIL)
644 i->next_key = bucket_at(h, i->idx)->key;
655 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
661 #ifdef ENABLE_DEBUG_HASHMAP
662 if (i->idx == IDX_FIRST) {
663 i->put_count = h->debug.put_count;
664 i->rem_count = h->debug.rem_count;
666 /* While iterating, must not add any new entries */
667 assert(i->put_count == h->debug.put_count);
668 /* ... or remove entries other than the current one */
669 assert(i->rem_count == h->debug.rem_count ||
670 (i->rem_count == h->debug.rem_count - 1 &&
671 i->prev_idx == h->debug.last_rem_idx));
672 /* Reset our removals counter */
673 i->rem_count = h->debug.rem_count;
677 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
678 : hashmap_iterate_in_internal_order(h, i);
681 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
682 struct hashmap_base_entry *e;
686 idx = hashmap_iterate_entry(h, i);
687 if (idx == IDX_NIL) {
696 e = bucket_at(h, idx);
697 data = entry_value(h, e);
706 bool set_iterate(Set *s, Iterator *i, void **value) {
707 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
710 #define HASHMAP_FOREACH_IDX(idx, h, i) \
711 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
713 (idx) = hashmap_iterate_entry((h), &(i)))
715 static void reset_direct_storage(HashmapBase *h) {
716 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
719 assert(!h->has_indirect);
721 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
722 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
725 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
727 const struct hashmap_type_info *hi = &hashmap_type_info[type];
730 use_pool = is_main_thread();
732 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
738 h->from_pool = use_pool;
739 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
741 if (type == HASHMAP_TYPE_ORDERED) {
742 OrderedHashmap *lh = (OrderedHashmap*)h;
743 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
746 reset_direct_storage(h);
748 if (!shared_hash_key_initialized) {
749 random_bytes(shared_hash_key, sizeof(shared_hash_key));
750 shared_hash_key_initialized= true;
753 #ifdef ENABLE_DEBUG_HASHMAP
754 h->debug.func = func;
755 h->debug.file = file;
756 h->debug.line = line;
757 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
758 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
759 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
765 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
766 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
769 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
770 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
773 Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
774 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
777 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
778 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
786 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
794 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
795 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
798 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
799 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
802 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
803 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
806 static void hashmap_free_no_clear(HashmapBase *h) {
807 assert(!h->has_indirect);
808 assert(!h->n_direct_entries);
810 #ifdef ENABLE_DEBUG_HASHMAP
811 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
812 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
813 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
817 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
822 HashmapBase *internal_hashmap_free(HashmapBase *h) {
824 /* Free the hashmap, but nothing in it */
827 internal_hashmap_clear(h);
828 hashmap_free_no_clear(h);
834 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
836 /* Free the hashmap and all data objects in it, but not the
840 internal_hashmap_clear_free(h);
841 hashmap_free_no_clear(h);
847 Hashmap *hashmap_free_free_free(Hashmap *h) {
849 /* Free the hashmap and all data and key objects in it */
852 hashmap_clear_free_free(h);
853 hashmap_free_no_clear(HASHMAP_BASE(h));
859 void internal_hashmap_clear(HashmapBase *h) {
863 if (h->has_indirect) {
864 free(h->indirect.storage);
865 h->has_indirect = false;
868 h->n_direct_entries = 0;
869 reset_direct_storage(h);
871 if (h->type == HASHMAP_TYPE_ORDERED) {
872 OrderedHashmap *lh = (OrderedHashmap*) h;
873 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
877 void internal_hashmap_clear_free(HashmapBase *h) {
883 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
884 idx = skip_free_buckets(h, idx + 1))
885 free(entry_value(h, bucket_at(h, idx)));
887 internal_hashmap_clear(h);
890 void hashmap_clear_free_free(Hashmap *h) {
896 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
897 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
898 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
899 free((void*)e->b.key);
903 internal_hashmap_clear(HASHMAP_BASE(h));
906 static int resize_buckets(HashmapBase *h, unsigned entries_add);
909 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
910 * Performs Robin Hood swaps as it goes. The entry to put must be placed
911 * by the caller into swap slot IDX_PUT.
912 * If used for in-place resizing, may leave a displaced entry in swap slot
913 * IDX_PUT. Caller must rehash it next.
914 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
917 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
918 struct swap_entries *swap) {
919 dib_raw_t raw_dib, *dibs;
920 unsigned dib, distance;
922 #ifdef ENABLE_DEBUG_HASHMAP
923 h->debug.put_count++;
926 dibs = dib_raw_ptr(h);
928 for (distance = 0; ; distance++) {
930 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
931 if (raw_dib == DIB_RAW_REHASH)
932 bucket_move_entry(h, swap, idx, IDX_TMP);
934 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
935 h->indirect.idx_lowest_entry = idx;
937 bucket_set_dib(h, idx, distance);
938 bucket_move_entry(h, swap, IDX_PUT, idx);
939 if (raw_dib == DIB_RAW_REHASH) {
940 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
947 dib = bucket_calculate_dib(h, idx, raw_dib);
949 if (dib < distance) {
950 /* Found a wealthier entry. Go Robin Hood! */
951 bucket_set_dib(h, idx, distance);
953 /* swap the entries */
954 bucket_move_entry(h, swap, idx, IDX_TMP);
955 bucket_move_entry(h, swap, IDX_PUT, idx);
956 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
961 idx = next_idx(h, idx);
966 * Puts an entry into a hashmap, boldly - no check whether key already exists.
967 * The caller must place the entry (only its key and value, not link indexes)
968 * in swap slot IDX_PUT.
969 * Caller must ensure: the key does not exist yet in the hashmap.
970 * that resize is not needed if !may_resize.
971 * Returns: 1 if entry was put successfully.
972 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
973 * Cannot return -ENOMEM if !may_resize.
975 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
976 struct swap_entries *swap, bool may_resize) {
977 struct ordered_hashmap_entry *new_entry;
980 assert(idx < n_buckets(h));
982 new_entry = bucket_at_swap(swap, IDX_PUT);
985 r = resize_buckets(h, 1);
989 idx = bucket_hash(h, new_entry->p.b.key);
991 assert(n_entries(h) < n_buckets(h));
993 if (h->type == HASHMAP_TYPE_ORDERED) {
994 OrderedHashmap *lh = (OrderedHashmap*) h;
996 new_entry->iterate_next = IDX_NIL;
997 new_entry->iterate_previous = lh->iterate_list_tail;
999 if (lh->iterate_list_tail != IDX_NIL) {
1000 struct ordered_hashmap_entry *old_tail;
1002 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1003 assert(old_tail->iterate_next == IDX_NIL);
1004 old_tail->iterate_next = IDX_PUT;
1007 lh->iterate_list_tail = IDX_PUT;
1008 if (lh->iterate_list_head == IDX_NIL)
1009 lh->iterate_list_head = IDX_PUT;
1012 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1015 #ifdef ENABLE_DEBUG_HASHMAP
1016 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1021 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1022 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1025 * Returns 0 if resize is not needed.
1026 * 1 if successfully resized.
1027 * -ENOMEM on allocation failure.
1029 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1030 struct swap_entries swap;
1032 dib_raw_t *old_dibs, *new_dibs;
1033 const struct hashmap_type_info *hi;
1034 unsigned idx, optimal_idx;
1035 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1041 hi = &hashmap_type_info[h->type];
1042 new_n_entries = n_entries(h) + entries_add;
1045 if (_unlikely_(new_n_entries < entries_add))
1048 /* For direct storage we allow 100% load, because it's tiny. */
1049 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1053 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1054 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1056 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1058 if (_unlikely_(new_n_buckets < new_n_entries))
1061 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1064 old_n_buckets = n_buckets(h);
1066 if (_likely_(new_n_buckets <= old_n_buckets))
1069 new_shift = log2u_round_up(MAX(
1070 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1071 2 * sizeof(struct direct_storage)));
1073 /* Realloc storage (buckets and DIB array). */
1074 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1079 /* Must upgrade direct to indirect storage. */
1080 if (!h->has_indirect) {
1081 memcpy(new_storage, h->direct.storage,
1082 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1083 h->indirect.n_entries = h->n_direct_entries;
1084 h->indirect.idx_lowest_entry = 0;
1085 h->n_direct_entries = 0;
1088 /* Get a new hash key. If we've just upgraded to indirect storage,
1089 * allow reusing a previously generated key. It's still a different key
1090 * from the shared one that we used for direct storage. */
1091 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1093 h->has_indirect = true;
1094 h->indirect.storage = new_storage;
1095 h->indirect.n_buckets = (1U << new_shift) /
1096 (hi->entry_size + sizeof(dib_raw_t));
1098 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1099 new_dibs = dib_raw_ptr(h);
1102 * Move the DIB array to the new place, replacing valid DIB values with
1103 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1104 * Note: Overlap is not possible, because we have at least doubled the
1105 * number of buckets and dib_raw_t is smaller than any entry type.
1107 for (idx = 0; idx < old_n_buckets; idx++) {
1108 assert(old_dibs[idx] != DIB_RAW_REHASH);
1109 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1113 /* Zero the area of newly added entries (including the old DIB area) */
1114 memzero(bucket_at(h, old_n_buckets),
1115 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1117 /* The upper half of the new DIB array needs initialization */
1118 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1119 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1121 /* Rehash entries that need it */
1123 for (idx = 0; idx < old_n_buckets; idx++) {
1124 if (new_dibs[idx] != DIB_RAW_REHASH)
1127 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1130 * Not much to do if by luck the entry hashes to its current
1131 * location. Just set its DIB.
1133 if (optimal_idx == idx) {
1139 new_dibs[idx] = DIB_RAW_FREE;
1140 bucket_move_entry(h, &swap, idx, IDX_PUT);
1141 /* bucket_move_entry does not clear the source */
1142 memzero(bucket_at(h, idx), hi->entry_size);
1146 * Find the new bucket for the current entry. This may make
1147 * another entry homeless and load it into IDX_PUT.
1149 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1152 /* Did the current entry displace another one? */
1154 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1155 } while (rehash_next);
1158 assert(n_rehashed == n_entries(h));
1164 * Finds an entry with a matching key
1165 * Returns: index of the found entry, or IDX_NIL if not found.
1167 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1168 struct hashmap_base_entry *e;
1169 unsigned dib, distance;
1170 dib_raw_t *dibs = dib_raw_ptr(h);
1172 assert(idx < n_buckets(h));
1174 for (distance = 0; ; distance++) {
1175 if (dibs[idx] == DIB_RAW_FREE)
1178 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1182 if (dib == distance) {
1183 e = bucket_at(h, idx);
1184 if (h->hash_ops->compare(e->key, key) == 0)
1188 idx = next_idx(h, idx);
1191 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1193 int hashmap_put(Hashmap *h, const void *key, void *value) {
1194 struct swap_entries swap;
1195 struct plain_hashmap_entry *e;
1200 hash = bucket_hash(h, key);
1201 idx = bucket_scan(h, hash, key);
1202 if (idx != IDX_NIL) {
1203 e = plain_bucket_at(h, idx);
1204 if (e->value == value)
1209 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1212 return hashmap_put_boldly(h, hash, &swap, true);
1215 int set_put(Set *s, const void *key) {
1216 struct swap_entries swap;
1217 struct hashmap_base_entry *e;
1222 hash = bucket_hash(s, key);
1223 idx = bucket_scan(s, hash, key);
1227 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1229 return hashmap_put_boldly(s, hash, &swap, true);
1232 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1233 struct swap_entries swap;
1234 struct plain_hashmap_entry *e;
1239 hash = bucket_hash(h, key);
1240 idx = bucket_scan(h, hash, key);
1241 if (idx != IDX_NIL) {
1242 e = plain_bucket_at(h, idx);
1243 #ifdef ENABLE_DEBUG_HASHMAP
1244 /* Although the key is equal, the key pointer may have changed,
1245 * and this would break our assumption for iterating. So count
1246 * this operation as incompatible with iteration. */
1247 if (e->b.key != key) {
1248 h->b.debug.put_count++;
1249 h->b.debug.rem_count++;
1250 h->b.debug.last_rem_idx = idx;
1258 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1261 return hashmap_put_boldly(h, hash, &swap, true);
1264 int hashmap_update(Hashmap *h, const void *key, void *value) {
1265 struct plain_hashmap_entry *e;
1270 hash = bucket_hash(h, key);
1271 idx = bucket_scan(h, hash, key);
1275 e = plain_bucket_at(h, idx);
1280 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1281 struct hashmap_base_entry *e;
1287 hash = bucket_hash(h, key);
1288 idx = bucket_scan(h, hash, key);
1292 e = bucket_at(h, idx);
1293 return entry_value(h, e);
1296 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1297 struct plain_hashmap_entry *e;
1303 hash = bucket_hash(h, key);
1304 idx = bucket_scan(h, hash, key);
1308 e = plain_bucket_at(h, idx);
1310 *key2 = (void*) e->b.key;
1315 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1321 hash = bucket_hash(h, key);
1322 return bucket_scan(h, hash, key) != IDX_NIL;
1325 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1326 struct hashmap_base_entry *e;
1333 hash = bucket_hash(h, key);
1334 idx = bucket_scan(h, hash, key);
1338 e = bucket_at(h, idx);
1339 data = entry_value(h, e);
1340 remove_entry(h, idx);
1345 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1346 struct plain_hashmap_entry *e;
1356 hash = bucket_hash(h, key);
1357 idx = bucket_scan(h, hash, key);
1358 if (idx == IDX_NIL) {
1364 e = plain_bucket_at(h, idx);
1367 *rkey = (void*) e->b.key;
1369 remove_entry(h, idx);
1374 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1375 struct swap_entries swap;
1376 struct plain_hashmap_entry *e;
1377 unsigned old_hash, new_hash, idx;
1382 old_hash = bucket_hash(h, old_key);
1383 idx = bucket_scan(h, old_hash, old_key);
1387 new_hash = bucket_hash(h, new_key);
1388 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1391 remove_entry(h, idx);
1393 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1396 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1401 #if 0 /// UNNEEDED by elogind
1402 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1403 struct swap_entries swap;
1404 struct hashmap_base_entry *e;
1405 unsigned old_hash, new_hash, idx;
1410 old_hash = bucket_hash(s, old_key);
1411 idx = bucket_scan(s, old_hash, old_key);
1415 new_hash = bucket_hash(s, new_key);
1416 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1419 remove_entry(s, idx);
1421 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1423 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1429 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1430 struct swap_entries swap;
1431 struct plain_hashmap_entry *e;
1432 unsigned old_hash, new_hash, idx_old, idx_new;
1437 old_hash = bucket_hash(h, old_key);
1438 idx_old = bucket_scan(h, old_hash, old_key);
1439 if (idx_old == IDX_NIL)
1442 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1444 new_hash = bucket_hash(h, new_key);
1445 idx_new = bucket_scan(h, new_hash, new_key);
1446 if (idx_new != IDX_NIL)
1447 if (idx_old != idx_new) {
1448 remove_entry(h, idx_new);
1449 /* Compensate for a possible backward shift. */
1450 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1451 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1452 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1455 remove_entry(h, idx_old);
1457 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1460 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1465 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1466 struct plain_hashmap_entry *e;
1472 hash = bucket_hash(h, key);
1473 idx = bucket_scan(h, hash, key);
1477 e = plain_bucket_at(h, idx);
1478 if (e->value != value)
1481 remove_entry(h, idx);
1486 static unsigned find_first_entry(HashmapBase *h) {
1487 Iterator i = ITERATOR_FIRST;
1489 if (!h || !n_entries(h))
1492 return hashmap_iterate_entry(h, &i);
1495 void *internal_hashmap_first(HashmapBase *h) {
1498 idx = find_first_entry(h);
1502 return entry_value(h, bucket_at(h, idx));
1505 void *internal_hashmap_first_key(HashmapBase *h) {
1506 struct hashmap_base_entry *e;
1509 idx = find_first_entry(h);
1513 e = bucket_at(h, idx);
1514 return (void*) e->key;
1517 void *internal_hashmap_steal_first(HashmapBase *h) {
1518 struct hashmap_base_entry *e;
1522 idx = find_first_entry(h);
1526 e = bucket_at(h, idx);
1527 data = entry_value(h, e);
1528 remove_entry(h, idx);
1533 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1534 struct hashmap_base_entry *e;
1538 idx = find_first_entry(h);
1542 e = bucket_at(h, idx);
1543 key = (void*) e->key;
1544 remove_entry(h, idx);
1549 unsigned internal_hashmap_size(HashmapBase *h) {
1554 return n_entries(h);
1557 unsigned internal_hashmap_buckets(HashmapBase *h) {
1562 return n_buckets(h);
1565 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1571 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1572 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1575 r = hashmap_put(h, pe->b.key, pe->value);
1576 if (r < 0 && r != -EEXIST)
1583 int set_merge(Set *s, Set *other) {
1589 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1590 struct set_entry *se = set_bucket_at(other, idx);
1593 r = set_put(s, se->b.key);
1601 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1606 r = resize_buckets(h, entries_add);
1614 * The same as hashmap_merge(), but every new item from other is moved to h.
1615 * Keys already in h are skipped and stay in other.
1616 * Returns: 0 on success.
1617 * -ENOMEM on alloc failure, in which case no move has been done.
1619 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1620 struct swap_entries swap;
1621 struct hashmap_base_entry *e, *n;
1631 assert(other->type == h->type);
1634 * This reserves buckets for the worst case, where none of other's
1635 * entries are yet present in h. This is preferable to risking
1636 * an allocation failure in the middle of the moving and having to
1637 * rollback or return a partial result.
1639 r = resize_buckets(h, n_entries(other));
1643 HASHMAP_FOREACH_IDX(idx, other, i) {
1646 e = bucket_at(other, idx);
1647 h_hash = bucket_hash(h, e->key);
1648 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1651 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1653 if (h->type != HASHMAP_TYPE_SET)
1654 ((struct plain_hashmap_entry*) n)->value =
1655 ((struct plain_hashmap_entry*) e)->value;
1656 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1658 remove_entry(other, idx);
1664 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1665 struct swap_entries swap;
1666 unsigned h_hash, other_hash, idx;
1667 struct hashmap_base_entry *e, *n;
1672 h_hash = bucket_hash(h, key);
1673 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1679 assert(other->type == h->type);
1681 other_hash = bucket_hash(other, key);
1682 idx = bucket_scan(other, other_hash, key);
1686 e = bucket_at(other, idx);
1688 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1690 if (h->type != HASHMAP_TYPE_SET)
1691 ((struct plain_hashmap_entry*) n)->value =
1692 ((struct plain_hashmap_entry*) e)->value;
1693 r = hashmap_put_boldly(h, h_hash, &swap, true);
1697 remove_entry(other, idx);
1701 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1707 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1712 case HASHMAP_TYPE_PLAIN:
1713 case HASHMAP_TYPE_ORDERED:
1714 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1716 case HASHMAP_TYPE_SET:
1717 r = set_merge((Set*)copy, (Set*)h);
1720 assert_not_reached("Unknown hashmap type");
1724 internal_hashmap_free(copy);
1731 char **internal_hashmap_get_strv(HashmapBase *h) {
1736 sv = new(char*, n_entries(h)+1);
1741 HASHMAP_FOREACH_IDX(idx, h, i)
1742 sv[n++] = entry_value(h, bucket_at(h, idx));
1748 #if 0 /// UNNEEDED by elogind
1749 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1750 struct ordered_hashmap_entry *e;
1756 hash = bucket_hash(h, key);
1757 idx = bucket_scan(h, hash, key);
1761 e = ordered_bucket_at(h, idx);
1762 if (e->iterate_next == IDX_NIL)
1764 return ordered_bucket_at(h, e->iterate_next)->p.value;
1768 int set_consume(Set *s, void *value) {
1771 r = set_put(s, value);
1778 int set_put_strdup(Set *s, const char *p) {
1789 r = set_consume(s, c);
1796 #if 0 /// UNNEEDED by elogind
1797 int set_put_strdupv(Set *s, char **l) {
1801 STRV_FOREACH(i, l) {
1802 r = set_put_strdup(s, *i);