chiark / gitweb /
basic: modernize conf-files.c a bit
[elogind.git] / src / basic / hashmap.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4   This file is part of systemd.
5
6   Copyright 2010 Lennart Poettering
7   Copyright 2014 Michal Schmidt
8
9   systemd is free software; you can redistribute it and/or modify it
10   under the terms of the GNU Lesser General Public License as published by
11   the Free Software Foundation; either version 2.1 of the License, or
12   (at your option) any later version.
13
14   systemd is distributed in the hope that it will be useful, but
15   WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17   Lesser General Public License for more details.
18
19   You should have received a copy of the GNU Lesser General Public License
20   along with systemd; If not, see <http://www.gnu.org/licenses/>.
21 ***/
22
23 #include <errno.h>
24 #include <stdint.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include "alloc-util.h"
29 #include "hashmap.h"
30 #include "macro.h"
31 #include "mempool.h"
32 #include "process-util.h"
33 #include "random-util.h"
34 #include "set.h"
35 #include "siphash24.h"
36 #include "strv.h"
37 #include "util.h"
38
39 #ifdef ENABLE_DEBUG_HASHMAP
40 #include <pthread.h>
41 #include "list.h"
42 #endif
43
44 /*
45  * Implementation of hashmaps.
46  * Addressing: open
47  *   - uses less RAM compared to closed addressing (chaining), because
48  *     our entries are small (especially in Sets, which tend to contain
49  *     the majority of entries in systemd).
50  * Collision resolution: Robin Hood
51  *   - tends to equalize displacement of entries from their optimal buckets.
52  * Probe sequence: linear
53  *   - though theoretically worse than random probing/uniform hashing/double
54  *     hashing, it is good for cache locality.
55  *
56  * References:
57  * Celis, P. 1986. Robin Hood Hashing.
58  * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
59  * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60  * - The results are derived for random probing. Suggests deletion with
61  *   tombstones and two mean-centered search methods. None of that works
62  *   well for linear probing.
63  *
64  * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
65  * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
66  * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
67  * http://www.math.uu.se/~svante/papers/sj157.pdf
68  * - Applies to Robin Hood with linear probing. Contains remarks on
69  *   the unsuitability of mean-centered search with linear probing.
70  *
71  * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
72  * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
73  * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
74  * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
75  *   in a successful search), and Janson writes about displacement. C = d + 1.
76  *
77  * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
78  * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
79  * - Explanation of backward shift deletion with pictures.
80  *
81  * Khuong, P. 2013. The Other Robin Hood Hashing.
82  * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
83  * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
84  */
85
86 /*
87  * XXX Ideas for improvement:
88  * For unordered hashmaps, randomize iteration order, similarly to Perl:
89  * http://blog.booking.com/hardening-perls-hash-function.html
90  */
91
92 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
93  * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
94 #define INV_KEEP_FREE            5U
95
96 /* Fields common to entries of all hashmap/set types */
97 struct hashmap_base_entry {
98         const void *key;
99 };
100
101 /* Entry types for specific hashmap/set types
102  * hashmap_base_entry must be at the beginning of each entry struct. */
103
104 struct plain_hashmap_entry {
105         struct hashmap_base_entry b;
106         void *value;
107 };
108
109 struct ordered_hashmap_entry {
110         struct plain_hashmap_entry p;
111         unsigned iterate_next, iterate_previous;
112 };
113
114 struct set_entry {
115         struct hashmap_base_entry b;
116 };
117
118 /* In several functions it is advantageous to have the hash table extended
119  * virtually by a couple of additional buckets. We reserve special index values
120  * for these "swap" buckets. */
121 #define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
122 #define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
123 #define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
124 #define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
125
126 #define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
127 #define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
128
129 assert_cc(IDX_FIRST == _IDX_SWAP_END);
130 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
131
132 /* Storage space for the "swap" buckets.
133  * All entry types can fit into a ordered_hashmap_entry. */
134 struct swap_entries {
135         struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
136 };
137
138 /* Distance from Initial Bucket */
139 typedef uint8_t dib_raw_t;
140 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
141 #define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
142 #define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
143 #define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
144
145 #define DIB_FREE UINT_MAX
146
147 #ifdef ENABLE_DEBUG_HASHMAP
148 struct hashmap_debug_info {
149         LIST_FIELDS(struct hashmap_debug_info, debug_list);
150         unsigned max_entries;  /* high watermark of n_entries */
151
152         /* who allocated this hashmap */
153         int line;
154         const char *file;
155         const char *func;
156
157         /* fields to detect modification while iterating */
158         unsigned put_count;    /* counts puts into the hashmap */
159         unsigned rem_count;    /* counts removals from hashmap */
160         unsigned last_rem_idx; /* remembers last removal index */
161 };
162
163 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
164 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
165 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
166
167 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
168
169 #else /* !ENABLE_DEBUG_HASHMAP */
170 #define HASHMAP_DEBUG_FIELDS
171 #endif /* ENABLE_DEBUG_HASHMAP */
172
173 enum HashmapType {
174         HASHMAP_TYPE_PLAIN,
175         HASHMAP_TYPE_ORDERED,
176         HASHMAP_TYPE_SET,
177         _HASHMAP_TYPE_MAX
178 };
179
180 struct _packed_ indirect_storage {
181         char    *storage;                  /* where buckets and DIBs are stored */
182         uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
183
184         unsigned n_entries;                /* number of stored entries */
185         unsigned n_buckets;                /* number of buckets */
186
187         unsigned idx_lowest_entry;         /* Index below which all buckets are free.
188                                               Makes "while(hashmap_steal_first())" loops
189                                               O(n) instead of O(n^2) for unordered hashmaps. */
190         uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
191         /* The bitfields in HashmapBase complete the alignment of the whole thing. */
192 };
193
194 struct direct_storage {
195         /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
196          * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
197          *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
198         char storage[sizeof(struct indirect_storage)];
199 };
200
201 #define DIRECT_BUCKETS(entry_t) \
202         (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
203
204 /* We should be able to store at least one entry directly. */
205 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
206
207 /* We have 3 bits for n_direct_entries. */
208 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
209
210 /* Hashmaps with directly stored entries all use this shared hash key.
211  * It's no big deal if the key is guessed, because there can be only
212  * a handful of directly stored entries in a hashmap. When a hashmap
213  * outgrows direct storage, it gets its own key for indirect storage. */
214 static uint8_t shared_hash_key[HASH_KEY_SIZE];
215 static bool shared_hash_key_initialized;
216
217 /* Fields that all hashmap/set types must have */
218 struct HashmapBase {
219         const struct hash_ops *hash_ops;  /* hash and compare ops to use */
220
221         union _packed_ {
222                 struct indirect_storage indirect; /* if  has_indirect */
223                 struct direct_storage direct;     /* if !has_indirect */
224         };
225
226         enum HashmapType type:2;     /* HASHMAP_TYPE_* */
227         bool has_indirect:1;         /* whether indirect storage is used */
228         unsigned n_direct_entries:3; /* Number of entries in direct storage.
229                                       * Only valid if !has_indirect. */
230         bool from_pool:1;            /* whether was allocated from mempool */
231         HASHMAP_DEBUG_FIELDS         /* optional hashmap_debug_info */
232 };
233
234 /* Specific hash types
235  * HashmapBase must be at the beginning of each hashmap struct. */
236
237 struct Hashmap {
238         struct HashmapBase b;
239 };
240
241 struct OrderedHashmap {
242         struct HashmapBase b;
243         unsigned iterate_list_head, iterate_list_tail;
244 };
245
246 struct Set {
247         struct HashmapBase b;
248 };
249
250 DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
251 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
252 /* No need for a separate Set pool */
253 assert_cc(sizeof(Hashmap) == sizeof(Set));
254
255 struct hashmap_type_info {
256         size_t head_size;
257         size_t entry_size;
258         struct mempool *mempool;
259         unsigned n_direct_buckets;
260 };
261
262 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
263         [HASHMAP_TYPE_PLAIN] = {
264                 .head_size        = sizeof(Hashmap),
265                 .entry_size       = sizeof(struct plain_hashmap_entry),
266                 .mempool          = &hashmap_pool,
267                 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
268         },
269         [HASHMAP_TYPE_ORDERED] = {
270                 .head_size        = sizeof(OrderedHashmap),
271                 .entry_size       = sizeof(struct ordered_hashmap_entry),
272                 .mempool          = &ordered_hashmap_pool,
273                 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
274         },
275         [HASHMAP_TYPE_SET] = {
276                 .head_size        = sizeof(Set),
277                 .entry_size       = sizeof(struct set_entry),
278                 .mempool          = &hashmap_pool,
279                 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
280         },
281 };
282
283 void string_hash_func(const void *p, struct siphash *state) {
284         siphash24_compress(p, strlen(p) + 1, state);
285 }
286
287 int string_compare_func(const void *a, const void *b) {
288         return strcmp(a, b);
289 }
290
291 const struct hash_ops string_hash_ops = {
292         .hash = string_hash_func,
293         .compare = string_compare_func
294 };
295
296 void trivial_hash_func(const void *p, struct siphash *state) {
297         siphash24_compress(&p, sizeof(p), state);
298 }
299
300 int trivial_compare_func(const void *a, const void *b) {
301         return a < b ? -1 : (a > b ? 1 : 0);
302 }
303
304 const struct hash_ops trivial_hash_ops = {
305         .hash = trivial_hash_func,
306         .compare = trivial_compare_func
307 };
308
309 void uint64_hash_func(const void *p, struct siphash *state) {
310         siphash24_compress(p, sizeof(uint64_t), state);
311 }
312
313 int uint64_compare_func(const void *_a, const void *_b) {
314         uint64_t a, b;
315         a = *(const uint64_t*) _a;
316         b = *(const uint64_t*) _b;
317         return a < b ? -1 : (a > b ? 1 : 0);
318 }
319
320 const struct hash_ops uint64_hash_ops = {
321         .hash = uint64_hash_func,
322         .compare = uint64_compare_func
323 };
324
325 #if SIZEOF_DEV_T != 8
326 void devt_hash_func(const void *p, struct siphash *state) {
327         siphash24_compress(p, sizeof(dev_t), state);
328 }
329
330 int devt_compare_func(const void *_a, const void *_b) {
331         dev_t a, b;
332         a = *(const dev_t*) _a;
333         b = *(const dev_t*) _b;
334         return a < b ? -1 : (a > b ? 1 : 0);
335 }
336
337 const struct hash_ops devt_hash_ops = {
338         .hash = devt_hash_func,
339         .compare = devt_compare_func
340 };
341 #endif
342
343 static unsigned n_buckets(HashmapBase *h) {
344         return h->has_indirect ? h->indirect.n_buckets
345                                : hashmap_type_info[h->type].n_direct_buckets;
346 }
347
348 static unsigned n_entries(HashmapBase *h) {
349         return h->has_indirect ? h->indirect.n_entries
350                                : h->n_direct_entries;
351 }
352
353 static void n_entries_inc(HashmapBase *h) {
354         if (h->has_indirect)
355                 h->indirect.n_entries++;
356         else
357                 h->n_direct_entries++;
358 }
359
360 static void n_entries_dec(HashmapBase *h) {
361         if (h->has_indirect)
362                 h->indirect.n_entries--;
363         else
364                 h->n_direct_entries--;
365 }
366
367 static char *storage_ptr(HashmapBase *h) {
368         return h->has_indirect ? h->indirect.storage
369                                : h->direct.storage;
370 }
371
372 static uint8_t *hash_key(HashmapBase *h) {
373         return h->has_indirect ? h->indirect.hash_key
374                                : shared_hash_key;
375 }
376
377 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
378         struct siphash state;
379         uint64_t hash;
380
381         siphash24_init(&state, hash_key(h));
382
383         h->hash_ops->hash(p, &state);
384
385         hash = siphash24_finalize(&state);
386
387         return (unsigned) (hash % n_buckets(h));
388 }
389 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
390
391 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
392         static uint8_t current[HASH_KEY_SIZE];
393         static bool current_initialized = false;
394
395         /* Returns a hash function key to use. In order to keep things
396          * fast we will not generate a new key each time we allocate a
397          * new hash table. Instead, we'll just reuse the most recently
398          * generated one, except if we never generated one or when we
399          * are rehashing an entire hash table because we reached a
400          * fill level */
401
402         if (!current_initialized || !reuse_is_ok) {
403                 random_bytes(current, sizeof(current));
404                 current_initialized = true;
405         }
406
407         memcpy(hash_key, current, sizeof(current));
408 }
409
410 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
411         return (struct hashmap_base_entry*)
412                 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
413 }
414
415 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
416         return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
417 }
418
419 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
420         return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
421 }
422
423 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
424         return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
425 }
426
427 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
428         return &swap->e[idx - _IDX_SWAP_BEGIN];
429 }
430
431 /* Returns a pointer to the bucket at index idx.
432  * Understands real indexes and swap indexes, hence "_virtual". */
433 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
434                                                     unsigned idx) {
435         if (idx < _IDX_SWAP_BEGIN)
436                 return bucket_at(h, idx);
437
438         if (idx < _IDX_SWAP_END)
439                 return &bucket_at_swap(swap, idx)->p.b;
440
441         assert_not_reached("Invalid index");
442 }
443
444 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
445         return (dib_raw_t*)
446                 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
447 }
448
449 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
450         return idx >= from ? idx - from
451                            : n_buckets(h) + idx - from;
452 }
453
454 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
455         unsigned initial_bucket;
456
457         if (raw_dib == DIB_RAW_FREE)
458                 return DIB_FREE;
459
460         if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
461                 return raw_dib;
462
463         /*
464          * Having an overflow DIB value is very unlikely. The hash function
465          * would have to be bad. For example, in a table of size 2^24 filled
466          * to load factor 0.9 the maximum observed DIB is only about 60.
467          * In theory (assuming I used Maxima correctly), for an infinite size
468          * hash table with load factor 0.8 the probability of a given entry
469          * having DIB > 40 is 1.9e-8.
470          * This returns the correct DIB value by recomputing the hash value in
471          * the unlikely case. XXX Hitting this case could be a hint to rehash.
472          */
473         initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
474         return bucket_distance(h, idx, initial_bucket);
475 }
476
477 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
478         dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
479 }
480
481 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
482         dib_raw_t *dibs;
483
484         dibs = dib_raw_ptr(h);
485
486         for ( ; idx < n_buckets(h); idx++)
487                 if (dibs[idx] != DIB_RAW_FREE)
488                         return idx;
489
490         return IDX_NIL;
491 }
492
493 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
494         memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
495         bucket_set_dib(h, idx, DIB_FREE);
496 }
497
498 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
499                               unsigned from, unsigned to) {
500         struct hashmap_base_entry *e_from, *e_to;
501
502         assert(from != to);
503
504         e_from = bucket_at_virtual(h, swap, from);
505         e_to   = bucket_at_virtual(h, swap, to);
506
507         memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
508
509         if (h->type == HASHMAP_TYPE_ORDERED) {
510                 OrderedHashmap *lh = (OrderedHashmap*) h;
511                 struct ordered_hashmap_entry *le, *le_to;
512
513                 le_to = (struct ordered_hashmap_entry*) e_to;
514
515                 if (le_to->iterate_next != IDX_NIL) {
516                         le = (struct ordered_hashmap_entry*)
517                              bucket_at_virtual(h, swap, le_to->iterate_next);
518                         le->iterate_previous = to;
519                 }
520
521                 if (le_to->iterate_previous != IDX_NIL) {
522                         le = (struct ordered_hashmap_entry*)
523                              bucket_at_virtual(h, swap, le_to->iterate_previous);
524                         le->iterate_next = to;
525                 }
526
527                 if (lh->iterate_list_head == from)
528                         lh->iterate_list_head = to;
529                 if (lh->iterate_list_tail == from)
530                         lh->iterate_list_tail = to;
531         }
532 }
533
534 static unsigned next_idx(HashmapBase *h, unsigned idx) {
535         return (idx + 1U) % n_buckets(h);
536 }
537
538 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
539         return (n_buckets(h) + idx - 1U) % n_buckets(h);
540 }
541
542 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
543         switch (h->type) {
544
545         case HASHMAP_TYPE_PLAIN:
546         case HASHMAP_TYPE_ORDERED:
547                 return ((struct plain_hashmap_entry*)e)->value;
548
549         case HASHMAP_TYPE_SET:
550                 return (void*) e->key;
551
552         default:
553                 assert_not_reached("Unknown hashmap type");
554         }
555 }
556
557 static void base_remove_entry(HashmapBase *h, unsigned idx) {
558         unsigned left, right, prev, dib;
559         dib_raw_t raw_dib, *dibs;
560
561         dibs = dib_raw_ptr(h);
562         assert(dibs[idx] != DIB_RAW_FREE);
563
564 #ifdef ENABLE_DEBUG_HASHMAP
565         h->debug.rem_count++;
566         h->debug.last_rem_idx = idx;
567 #endif
568
569         left = idx;
570         /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
571         for (right = next_idx(h, left); ; right = next_idx(h, right)) {
572                 raw_dib = dibs[right];
573                 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
574                         break;
575
576                 /* The buckets are not supposed to be all occupied and with DIB > 0.
577                  * That would mean we could make everyone better off by shifting them
578                  * backward. This scenario is impossible. */
579                 assert(left != right);
580         }
581
582         if (h->type == HASHMAP_TYPE_ORDERED) {
583                 OrderedHashmap *lh = (OrderedHashmap*) h;
584                 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
585
586                 if (le->iterate_next != IDX_NIL)
587                         ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
588                 else
589                         lh->iterate_list_tail = le->iterate_previous;
590
591                 if (le->iterate_previous != IDX_NIL)
592                         ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
593                 else
594                         lh->iterate_list_head = le->iterate_next;
595         }
596
597         /* Now shift all buckets in the interval (left, right) one step backwards */
598         for (prev = left, left = next_idx(h, left); left != right;
599              prev = left, left = next_idx(h, left)) {
600                 dib = bucket_calculate_dib(h, left, dibs[left]);
601                 assert(dib != 0);
602                 bucket_move_entry(h, NULL, left, prev);
603                 bucket_set_dib(h, prev, dib - 1);
604         }
605
606         bucket_mark_free(h, prev);
607         n_entries_dec(h);
608 }
609 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
610
611 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
612         struct ordered_hashmap_entry *e;
613         unsigned idx;
614
615         assert(h);
616         assert(i);
617
618         if (i->idx == IDX_NIL)
619                 goto at_end;
620
621         if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
622                 goto at_end;
623
624         if (i->idx == IDX_FIRST) {
625                 idx = h->iterate_list_head;
626                 e = ordered_bucket_at(h, idx);
627         } else {
628                 idx = i->idx;
629                 e = ordered_bucket_at(h, idx);
630                 /*
631                  * We allow removing the current entry while iterating, but removal may cause
632                  * a backward shift. The next entry may thus move one bucket to the left.
633                  * To detect when it happens, we remember the key pointer of the entry we were
634                  * going to iterate next. If it does not match, there was a backward shift.
635                  */
636                 if (e->p.b.key != i->next_key) {
637                         idx = prev_idx(HASHMAP_BASE(h), idx);
638                         e = ordered_bucket_at(h, idx);
639                 }
640                 assert(e->p.b.key == i->next_key);
641         }
642
643 #ifdef ENABLE_DEBUG_HASHMAP
644         i->prev_idx = idx;
645 #endif
646
647         if (e->iterate_next != IDX_NIL) {
648                 struct ordered_hashmap_entry *n;
649                 i->idx = e->iterate_next;
650                 n = ordered_bucket_at(h, i->idx);
651                 i->next_key = n->p.b.key;
652         } else
653                 i->idx = IDX_NIL;
654
655         return idx;
656
657 at_end:
658         i->idx = IDX_NIL;
659         return IDX_NIL;
660 }
661
662 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
663         unsigned idx;
664
665         assert(h);
666         assert(i);
667
668         if (i->idx == IDX_NIL)
669                 goto at_end;
670
671         if (i->idx == IDX_FIRST) {
672                 /* fast forward to the first occupied bucket */
673                 if (h->has_indirect) {
674                         i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
675                         h->indirect.idx_lowest_entry = i->idx;
676                 } else
677                         i->idx = skip_free_buckets(h, 0);
678
679                 if (i->idx == IDX_NIL)
680                         goto at_end;
681         } else {
682                 struct hashmap_base_entry *e;
683
684                 assert(i->idx > 0);
685
686                 e = bucket_at(h, i->idx);
687                 /*
688                  * We allow removing the current entry while iterating, but removal may cause
689                  * a backward shift. The next entry may thus move one bucket to the left.
690                  * To detect when it happens, we remember the key pointer of the entry we were
691                  * going to iterate next. If it does not match, there was a backward shift.
692                  */
693                 if (e->key != i->next_key)
694                         e = bucket_at(h, --i->idx);
695
696                 assert(e->key == i->next_key);
697         }
698
699         idx = i->idx;
700 #ifdef ENABLE_DEBUG_HASHMAP
701         i->prev_idx = idx;
702 #endif
703
704         i->idx = skip_free_buckets(h, i->idx + 1);
705         if (i->idx != IDX_NIL)
706                 i->next_key = bucket_at(h, i->idx)->key;
707         else
708                 i->idx = IDX_NIL;
709
710         return idx;
711
712 at_end:
713         i->idx = IDX_NIL;
714         return IDX_NIL;
715 }
716
717 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
718         if (!h) {
719                 i->idx = IDX_NIL;
720                 return IDX_NIL;
721         }
722
723 #ifdef ENABLE_DEBUG_HASHMAP
724         if (i->idx == IDX_FIRST) {
725                 i->put_count = h->debug.put_count;
726                 i->rem_count = h->debug.rem_count;
727         } else {
728                 /* While iterating, must not add any new entries */
729                 assert(i->put_count == h->debug.put_count);
730                 /* ... or remove entries other than the current one */
731                 assert(i->rem_count == h->debug.rem_count ||
732                        (i->rem_count == h->debug.rem_count - 1 &&
733                         i->prev_idx == h->debug.last_rem_idx));
734                 /* Reset our removals counter */
735                 i->rem_count = h->debug.rem_count;
736         }
737 #endif
738
739         return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
740                                                : hashmap_iterate_in_internal_order(h, i);
741 }
742
743 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
744         struct hashmap_base_entry *e;
745         void *data;
746         unsigned idx;
747
748         idx = hashmap_iterate_entry(h, i);
749         if (idx == IDX_NIL) {
750                 if (value)
751                         *value = NULL;
752                 if (key)
753                         *key = NULL;
754
755                 return false;
756         }
757
758         e = bucket_at(h, idx);
759         data = entry_value(h, e);
760         if (value)
761                 *value = data;
762         if (key)
763                 *key = e->key;
764
765         return true;
766 }
767
768 bool set_iterate(Set *s, Iterator *i, void **value) {
769         return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
770 }
771
772 #define HASHMAP_FOREACH_IDX(idx, h, i) \
773         for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
774              (idx != IDX_NIL); \
775              (idx) = hashmap_iterate_entry((h), &(i)))
776
777 static void reset_direct_storage(HashmapBase *h) {
778         const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
779         void *p;
780
781         assert(!h->has_indirect);
782
783         p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
784         memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
785 }
786
787 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
788         HashmapBase *h;
789         const struct hashmap_type_info *hi = &hashmap_type_info[type];
790         bool use_pool;
791
792         use_pool = is_main_thread();
793
794         h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
795
796         if (!h)
797                 return NULL;
798
799         h->type = type;
800         h->from_pool = use_pool;
801         h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
802
803         if (type == HASHMAP_TYPE_ORDERED) {
804                 OrderedHashmap *lh = (OrderedHashmap*)h;
805                 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
806         }
807
808         reset_direct_storage(h);
809
810         if (!shared_hash_key_initialized) {
811                 random_bytes(shared_hash_key, sizeof(shared_hash_key));
812                 shared_hash_key_initialized= true;
813         }
814
815 #ifdef ENABLE_DEBUG_HASHMAP
816         h->debug.func = func;
817         h->debug.file = file;
818         h->debug.line = line;
819         assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
820         LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
821         assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
822 #endif
823
824         return h;
825 }
826
827 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
828         return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
829 }
830
831 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
832         return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
833 }
834
835 Set *internal_set_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
836         return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
837 }
838
839 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
840                                          enum HashmapType type HASHMAP_DEBUG_PARAMS) {
841         HashmapBase *q;
842
843         assert(h);
844
845         if (*h)
846                 return 0;
847
848         q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
849         if (!q)
850                 return -ENOMEM;
851
852         *h = q;
853         return 0;
854 }
855
856 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
857         return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
858 }
859
860 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
861         return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
862 }
863
864 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
865         return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
866 }
867
868 static void hashmap_free_no_clear(HashmapBase *h) {
869         assert(!h->has_indirect);
870         assert(!h->n_direct_entries);
871
872 #ifdef ENABLE_DEBUG_HASHMAP
873         assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
874         LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
875         assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
876 #endif
877
878         if (h->from_pool)
879                 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
880         else
881                 free(h);
882 }
883
884 HashmapBase *internal_hashmap_free(HashmapBase *h) {
885
886         /* Free the hashmap, but nothing in it */
887
888         if (h) {
889                 internal_hashmap_clear(h);
890                 hashmap_free_no_clear(h);
891         }
892
893         return NULL;
894 }
895
896 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
897
898         /* Free the hashmap and all data objects in it, but not the
899          * keys */
900
901         if (h) {
902                 internal_hashmap_clear_free(h);
903                 hashmap_free_no_clear(h);
904         }
905
906         return NULL;
907 }
908
909 Hashmap *hashmap_free_free_free(Hashmap *h) {
910
911         /* Free the hashmap and all data and key objects in it */
912
913         if (h) {
914                 hashmap_clear_free_free(h);
915                 hashmap_free_no_clear(HASHMAP_BASE(h));
916         }
917
918         return NULL;
919 }
920
921 void internal_hashmap_clear(HashmapBase *h) {
922         if (!h)
923                 return;
924
925         if (h->has_indirect) {
926                 free(h->indirect.storage);
927                 h->has_indirect = false;
928         }
929
930         h->n_direct_entries = 0;
931         reset_direct_storage(h);
932
933         if (h->type == HASHMAP_TYPE_ORDERED) {
934                 OrderedHashmap *lh = (OrderedHashmap*) h;
935                 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
936         }
937 }
938
939 void internal_hashmap_clear_free(HashmapBase *h) {
940         unsigned idx;
941
942         if (!h)
943                 return;
944
945         for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
946              idx = skip_free_buckets(h, idx + 1))
947                 free(entry_value(h, bucket_at(h, idx)));
948
949         internal_hashmap_clear(h);
950 }
951
952 void hashmap_clear_free_free(Hashmap *h) {
953         unsigned idx;
954
955         if (!h)
956                 return;
957
958         for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
959              idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
960                 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
961                 free((void*)e->b.key);
962                 free(e->value);
963         }
964
965         internal_hashmap_clear(HASHMAP_BASE(h));
966 }
967
968 static int resize_buckets(HashmapBase *h, unsigned entries_add);
969
970 /*
971  * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
972  * Performs Robin Hood swaps as it goes. The entry to put must be placed
973  * by the caller into swap slot IDX_PUT.
974  * If used for in-place resizing, may leave a displaced entry in swap slot
975  * IDX_PUT. Caller must rehash it next.
976  * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
977  *          false otherwise.
978  */
979 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
980                                    struct swap_entries *swap) {
981         dib_raw_t raw_dib, *dibs;
982         unsigned dib, distance;
983
984 #ifdef ENABLE_DEBUG_HASHMAP
985         h->debug.put_count++;
986 #endif
987
988         dibs = dib_raw_ptr(h);
989
990         for (distance = 0; ; distance++) {
991                 raw_dib = dibs[idx];
992                 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
993                         if (raw_dib == DIB_RAW_REHASH)
994                                 bucket_move_entry(h, swap, idx, IDX_TMP);
995
996                         if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
997                                 h->indirect.idx_lowest_entry = idx;
998
999                         bucket_set_dib(h, idx, distance);
1000                         bucket_move_entry(h, swap, IDX_PUT, idx);
1001                         if (raw_dib == DIB_RAW_REHASH) {
1002                                 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1003                                 return true;
1004                         }
1005
1006                         return false;
1007                 }
1008
1009                 dib = bucket_calculate_dib(h, idx, raw_dib);
1010
1011                 if (dib < distance) {
1012                         /* Found a wealthier entry. Go Robin Hood! */
1013                         bucket_set_dib(h, idx, distance);
1014
1015                         /* swap the entries */
1016                         bucket_move_entry(h, swap, idx, IDX_TMP);
1017                         bucket_move_entry(h, swap, IDX_PUT, idx);
1018                         bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1019
1020                         distance = dib;
1021                 }
1022
1023                 idx = next_idx(h, idx);
1024         }
1025 }
1026
1027 /*
1028  * Puts an entry into a hashmap, boldly - no check whether key already exists.
1029  * The caller must place the entry (only its key and value, not link indexes)
1030  * in swap slot IDX_PUT.
1031  * Caller must ensure: the key does not exist yet in the hashmap.
1032  *                     that resize is not needed if !may_resize.
1033  * Returns: 1 if entry was put successfully.
1034  *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1035  *          Cannot return -ENOMEM if !may_resize.
1036  */
1037 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1038                                    struct swap_entries *swap, bool may_resize) {
1039         struct ordered_hashmap_entry *new_entry;
1040         int r;
1041
1042         assert(idx < n_buckets(h));
1043
1044         new_entry = bucket_at_swap(swap, IDX_PUT);
1045
1046         if (may_resize) {
1047                 r = resize_buckets(h, 1);
1048                 if (r < 0)
1049                         return r;
1050                 if (r > 0)
1051                         idx = bucket_hash(h, new_entry->p.b.key);
1052         }
1053         assert(n_entries(h) < n_buckets(h));
1054
1055         if (h->type == HASHMAP_TYPE_ORDERED) {
1056                 OrderedHashmap *lh = (OrderedHashmap*) h;
1057
1058                 new_entry->iterate_next = IDX_NIL;
1059                 new_entry->iterate_previous = lh->iterate_list_tail;
1060
1061                 if (lh->iterate_list_tail != IDX_NIL) {
1062                         struct ordered_hashmap_entry *old_tail;
1063
1064                         old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1065                         assert(old_tail->iterate_next == IDX_NIL);
1066                         old_tail->iterate_next = IDX_PUT;
1067                 }
1068
1069                 lh->iterate_list_tail = IDX_PUT;
1070                 if (lh->iterate_list_head == IDX_NIL)
1071                         lh->iterate_list_head = IDX_PUT;
1072         }
1073
1074         assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1075
1076         n_entries_inc(h);
1077 #ifdef ENABLE_DEBUG_HASHMAP
1078         h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1079 #endif
1080
1081         return 1;
1082 }
1083 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1084         hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1085
1086 /*
1087  * Returns 0 if resize is not needed.
1088  *         1 if successfully resized.
1089  *         -ENOMEM on allocation failure.
1090  */
1091 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1092         struct swap_entries swap;
1093         char *new_storage;
1094         dib_raw_t *old_dibs, *new_dibs;
1095         const struct hashmap_type_info *hi;
1096         unsigned idx, optimal_idx;
1097         unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1098         uint8_t new_shift;
1099         bool rehash_next;
1100
1101         assert(h);
1102
1103         hi = &hashmap_type_info[h->type];
1104         new_n_entries = n_entries(h) + entries_add;
1105
1106         /* overflow? */
1107         if (_unlikely_(new_n_entries < entries_add))
1108                 return -ENOMEM;
1109
1110         /* For direct storage we allow 100% load, because it's tiny. */
1111         if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1112                 return 0;
1113
1114         /*
1115          * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1116          * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1117          */
1118         new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1119         /* overflow? */
1120         if (_unlikely_(new_n_buckets < new_n_entries))
1121                 return -ENOMEM;
1122
1123         if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1124                 return -ENOMEM;
1125
1126         old_n_buckets = n_buckets(h);
1127
1128         if (_likely_(new_n_buckets <= old_n_buckets))
1129                 return 0;
1130
1131         new_shift = log2u_round_up(MAX(
1132                         new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1133                         2 * sizeof(struct direct_storage)));
1134
1135         /* Realloc storage (buckets and DIB array). */
1136         new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1137                               1U << new_shift);
1138         if (!new_storage)
1139                 return -ENOMEM;
1140
1141         /* Must upgrade direct to indirect storage. */
1142         if (!h->has_indirect) {
1143                 memcpy(new_storage, h->direct.storage,
1144                        old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1145                 h->indirect.n_entries = h->n_direct_entries;
1146                 h->indirect.idx_lowest_entry = 0;
1147                 h->n_direct_entries = 0;
1148         }
1149
1150         /* Get a new hash key. If we've just upgraded to indirect storage,
1151          * allow reusing a previously generated key. It's still a different key
1152          * from the shared one that we used for direct storage. */
1153         get_hash_key(h->indirect.hash_key, !h->has_indirect);
1154
1155         h->has_indirect = true;
1156         h->indirect.storage = new_storage;
1157         h->indirect.n_buckets = (1U << new_shift) /
1158                                 (hi->entry_size + sizeof(dib_raw_t));
1159
1160         old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1161         new_dibs = dib_raw_ptr(h);
1162
1163         /*
1164          * Move the DIB array to the new place, replacing valid DIB values with
1165          * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1166          * Note: Overlap is not possible, because we have at least doubled the
1167          * number of buckets and dib_raw_t is smaller than any entry type.
1168          */
1169         for (idx = 0; idx < old_n_buckets; idx++) {
1170                 assert(old_dibs[idx] != DIB_RAW_REHASH);
1171                 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1172                                                               : DIB_RAW_REHASH;
1173         }
1174
1175         /* Zero the area of newly added entries (including the old DIB area) */
1176         memzero(bucket_at(h, old_n_buckets),
1177                (n_buckets(h) - old_n_buckets) * hi->entry_size);
1178
1179         /* The upper half of the new DIB array needs initialization */
1180         memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1181                (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1182
1183         /* Rehash entries that need it */
1184         n_rehashed = 0;
1185         for (idx = 0; idx < old_n_buckets; idx++) {
1186                 if (new_dibs[idx] != DIB_RAW_REHASH)
1187                         continue;
1188
1189                 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1190
1191                 /*
1192                  * Not much to do if by luck the entry hashes to its current
1193                  * location. Just set its DIB.
1194                  */
1195                 if (optimal_idx == idx) {
1196                         new_dibs[idx] = 0;
1197                         n_rehashed++;
1198                         continue;
1199                 }
1200
1201                 new_dibs[idx] = DIB_RAW_FREE;
1202                 bucket_move_entry(h, &swap, idx, IDX_PUT);
1203                 /* bucket_move_entry does not clear the source */
1204                 memzero(bucket_at(h, idx), hi->entry_size);
1205
1206                 do {
1207                         /*
1208                          * Find the new bucket for the current entry. This may make
1209                          * another entry homeless and load it into IDX_PUT.
1210                          */
1211                         rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1212                         n_rehashed++;
1213
1214                         /* Did the current entry displace another one? */
1215                         if (rehash_next)
1216                                 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1217                 } while (rehash_next);
1218         }
1219
1220         assert(n_rehashed == n_entries(h));
1221
1222         return 1;
1223 }
1224
1225 /*
1226  * Finds an entry with a matching key
1227  * Returns: index of the found entry, or IDX_NIL if not found.
1228  */
1229 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1230         struct hashmap_base_entry *e;
1231         unsigned dib, distance;
1232         dib_raw_t *dibs = dib_raw_ptr(h);
1233
1234         assert(idx < n_buckets(h));
1235
1236         for (distance = 0; ; distance++) {
1237                 if (dibs[idx] == DIB_RAW_FREE)
1238                         return IDX_NIL;
1239
1240                 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1241
1242                 if (dib < distance)
1243                         return IDX_NIL;
1244                 if (dib == distance) {
1245                         e = bucket_at(h, idx);
1246                         if (h->hash_ops->compare(e->key, key) == 0)
1247                                 return idx;
1248                 }
1249
1250                 idx = next_idx(h, idx);
1251         }
1252 }
1253 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1254
1255 int hashmap_put(Hashmap *h, const void *key, void *value) {
1256         struct swap_entries swap;
1257         struct plain_hashmap_entry *e;
1258         unsigned hash, idx;
1259
1260         assert(h);
1261
1262         hash = bucket_hash(h, key);
1263         idx = bucket_scan(h, hash, key);
1264         if (idx != IDX_NIL) {
1265                 e = plain_bucket_at(h, idx);
1266                 if (e->value == value)
1267                         return 0;
1268                 return -EEXIST;
1269         }
1270
1271         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1272         e->b.key = key;
1273         e->value = value;
1274         return hashmap_put_boldly(h, hash, &swap, true);
1275 }
1276
1277 int set_put(Set *s, const void *key) {
1278         struct swap_entries swap;
1279         struct hashmap_base_entry *e;
1280         unsigned hash, idx;
1281
1282         assert(s);
1283
1284         hash = bucket_hash(s, key);
1285         idx = bucket_scan(s, hash, key);
1286         if (idx != IDX_NIL)
1287                 return 0;
1288
1289         e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1290         e->key = key;
1291         return hashmap_put_boldly(s, hash, &swap, true);
1292 }
1293
1294 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1295         struct swap_entries swap;
1296         struct plain_hashmap_entry *e;
1297         unsigned hash, idx;
1298
1299         assert(h);
1300
1301         hash = bucket_hash(h, key);
1302         idx = bucket_scan(h, hash, key);
1303         if (idx != IDX_NIL) {
1304                 e = plain_bucket_at(h, idx);
1305 #ifdef ENABLE_DEBUG_HASHMAP
1306                 /* Although the key is equal, the key pointer may have changed,
1307                  * and this would break our assumption for iterating. So count
1308                  * this operation as incompatible with iteration. */
1309                 if (e->b.key != key) {
1310                         h->b.debug.put_count++;
1311                         h->b.debug.rem_count++;
1312                         h->b.debug.last_rem_idx = idx;
1313                 }
1314 #endif
1315                 e->b.key = key;
1316                 e->value = value;
1317                 return 0;
1318         }
1319
1320         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1321         e->b.key = key;
1322         e->value = value;
1323         return hashmap_put_boldly(h, hash, &swap, true);
1324 }
1325
1326 int hashmap_update(Hashmap *h, const void *key, void *value) {
1327         struct plain_hashmap_entry *e;
1328         unsigned hash, idx;
1329
1330         assert(h);
1331
1332         hash = bucket_hash(h, key);
1333         idx = bucket_scan(h, hash, key);
1334         if (idx == IDX_NIL)
1335                 return -ENOENT;
1336
1337         e = plain_bucket_at(h, idx);
1338         e->value = value;
1339         return 0;
1340 }
1341
1342 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1343         struct hashmap_base_entry *e;
1344         unsigned hash, idx;
1345
1346         if (!h)
1347                 return NULL;
1348
1349         hash = bucket_hash(h, key);
1350         idx = bucket_scan(h, hash, key);
1351         if (idx == IDX_NIL)
1352                 return NULL;
1353
1354         e = bucket_at(h, idx);
1355         return entry_value(h, e);
1356 }
1357
1358 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1359         struct plain_hashmap_entry *e;
1360         unsigned hash, idx;
1361
1362         if (!h)
1363                 return NULL;
1364
1365         hash = bucket_hash(h, key);
1366         idx = bucket_scan(h, hash, key);
1367         if (idx == IDX_NIL)
1368                 return NULL;
1369
1370         e = plain_bucket_at(h, idx);
1371         if (key2)
1372                 *key2 = (void*) e->b.key;
1373
1374         return e->value;
1375 }
1376
1377 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1378         unsigned hash;
1379
1380         if (!h)
1381                 return false;
1382
1383         hash = bucket_hash(h, key);
1384         return bucket_scan(h, hash, key) != IDX_NIL;
1385 }
1386
1387 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1388         struct hashmap_base_entry *e;
1389         unsigned hash, idx;
1390         void *data;
1391
1392         if (!h)
1393                 return NULL;
1394
1395         hash = bucket_hash(h, key);
1396         idx = bucket_scan(h, hash, key);
1397         if (idx == IDX_NIL)
1398                 return NULL;
1399
1400         e = bucket_at(h, idx);
1401         data = entry_value(h, e);
1402         remove_entry(h, idx);
1403
1404         return data;
1405 }
1406
1407 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1408         struct plain_hashmap_entry *e;
1409         unsigned hash, idx;
1410         void *data;
1411
1412         if (!h) {
1413                 if (rkey)
1414                         *rkey = NULL;
1415                 return NULL;
1416         }
1417
1418         hash = bucket_hash(h, key);
1419         idx = bucket_scan(h, hash, key);
1420         if (idx == IDX_NIL) {
1421                 if (rkey)
1422                         *rkey = NULL;
1423                 return NULL;
1424         }
1425
1426         e = plain_bucket_at(h, idx);
1427         data = e->value;
1428         if (rkey)
1429                 *rkey = (void*) e->b.key;
1430
1431         remove_entry(h, idx);
1432
1433         return data;
1434 }
1435
1436 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1437         struct swap_entries swap;
1438         struct plain_hashmap_entry *e;
1439         unsigned old_hash, new_hash, idx;
1440
1441         if (!h)
1442                 return -ENOENT;
1443
1444         old_hash = bucket_hash(h, old_key);
1445         idx = bucket_scan(h, old_hash, old_key);
1446         if (idx == IDX_NIL)
1447                 return -ENOENT;
1448
1449         new_hash = bucket_hash(h, new_key);
1450         if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1451                 return -EEXIST;
1452
1453         remove_entry(h, idx);
1454
1455         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1456         e->b.key = new_key;
1457         e->value = value;
1458         assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1459
1460         return 0;
1461 }
1462
1463 #if 0 /// UNNEEDED by elogind
1464 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1465         struct swap_entries swap;
1466         struct hashmap_base_entry *e;
1467         unsigned old_hash, new_hash, idx;
1468
1469         if (!s)
1470                 return -ENOENT;
1471
1472         old_hash = bucket_hash(s, old_key);
1473         idx = bucket_scan(s, old_hash, old_key);
1474         if (idx == IDX_NIL)
1475                 return -ENOENT;
1476
1477         new_hash = bucket_hash(s, new_key);
1478         if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1479                 return -EEXIST;
1480
1481         remove_entry(s, idx);
1482
1483         e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1484         e->key = new_key;
1485         assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1486
1487         return 0;
1488 }
1489 #endif // 0
1490
1491 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1492         struct swap_entries swap;
1493         struct plain_hashmap_entry *e;
1494         unsigned old_hash, new_hash, idx_old, idx_new;
1495
1496         if (!h)
1497                 return -ENOENT;
1498
1499         old_hash = bucket_hash(h, old_key);
1500         idx_old = bucket_scan(h, old_hash, old_key);
1501         if (idx_old == IDX_NIL)
1502                 return -ENOENT;
1503
1504         old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1505
1506         new_hash = bucket_hash(h, new_key);
1507         idx_new = bucket_scan(h, new_hash, new_key);
1508         if (idx_new != IDX_NIL)
1509                 if (idx_old != idx_new) {
1510                         remove_entry(h, idx_new);
1511                         /* Compensate for a possible backward shift. */
1512                         if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1513                                 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1514                         assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1515                 }
1516
1517         remove_entry(h, idx_old);
1518
1519         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1520         e->b.key = new_key;
1521         e->value = value;
1522         assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1523
1524         return 0;
1525 }
1526
1527 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1528         struct plain_hashmap_entry *e;
1529         unsigned hash, idx;
1530
1531         if (!h)
1532                 return NULL;
1533
1534         hash = bucket_hash(h, key);
1535         idx = bucket_scan(h, hash, key);
1536         if (idx == IDX_NIL)
1537                 return NULL;
1538
1539         e = plain_bucket_at(h, idx);
1540         if (e->value != value)
1541                 return NULL;
1542
1543         remove_entry(h, idx);
1544
1545         return value;
1546 }
1547
1548 static unsigned find_first_entry(HashmapBase *h) {
1549         Iterator i = ITERATOR_FIRST;
1550
1551         if (!h || !n_entries(h))
1552                 return IDX_NIL;
1553
1554         return hashmap_iterate_entry(h, &i);
1555 }
1556
1557 void *internal_hashmap_first(HashmapBase *h) {
1558         unsigned idx;
1559
1560         idx = find_first_entry(h);
1561         if (idx == IDX_NIL)
1562                 return NULL;
1563
1564         return entry_value(h, bucket_at(h, idx));
1565 }
1566
1567 void *internal_hashmap_first_key(HashmapBase *h) {
1568         struct hashmap_base_entry *e;
1569         unsigned idx;
1570
1571         idx = find_first_entry(h);
1572         if (idx == IDX_NIL)
1573                 return NULL;
1574
1575         e = bucket_at(h, idx);
1576         return (void*) e->key;
1577 }
1578
1579 void *internal_hashmap_steal_first(HashmapBase *h) {
1580         struct hashmap_base_entry *e;
1581         void *data;
1582         unsigned idx;
1583
1584         idx = find_first_entry(h);
1585         if (idx == IDX_NIL)
1586                 return NULL;
1587
1588         e = bucket_at(h, idx);
1589         data = entry_value(h, e);
1590         remove_entry(h, idx);
1591
1592         return data;
1593 }
1594
1595 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1596         struct hashmap_base_entry *e;
1597         void *key;
1598         unsigned idx;
1599
1600         idx = find_first_entry(h);
1601         if (idx == IDX_NIL)
1602                 return NULL;
1603
1604         e = bucket_at(h, idx);
1605         key = (void*) e->key;
1606         remove_entry(h, idx);
1607
1608         return key;
1609 }
1610
1611 unsigned internal_hashmap_size(HashmapBase *h) {
1612
1613         if (!h)
1614                 return 0;
1615
1616         return n_entries(h);
1617 }
1618
1619 unsigned internal_hashmap_buckets(HashmapBase *h) {
1620
1621         if (!h)
1622                 return 0;
1623
1624         return n_buckets(h);
1625 }
1626
1627 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1628         Iterator i;
1629         unsigned idx;
1630
1631         assert(h);
1632
1633         HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1634                 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1635                 int r;
1636
1637                 r = hashmap_put(h, pe->b.key, pe->value);
1638                 if (r < 0 && r != -EEXIST)
1639                         return r;
1640         }
1641
1642         return 0;
1643 }
1644
1645 int set_merge(Set *s, Set *other) {
1646         Iterator i;
1647         unsigned idx;
1648
1649         assert(s);
1650
1651         HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1652                 struct set_entry *se = set_bucket_at(other, idx);
1653                 int r;
1654
1655                 r = set_put(s, se->b.key);
1656                 if (r < 0)
1657                         return r;
1658         }
1659
1660         return 0;
1661 }
1662
1663 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1664         int r;
1665
1666         assert(h);
1667
1668         r = resize_buckets(h, entries_add);
1669         if (r < 0)
1670                 return r;
1671
1672         return 0;
1673 }
1674
1675 /*
1676  * The same as hashmap_merge(), but every new item from other is moved to h.
1677  * Keys already in h are skipped and stay in other.
1678  * Returns: 0 on success.
1679  *          -ENOMEM on alloc failure, in which case no move has been done.
1680  */
1681 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1682         struct swap_entries swap;
1683         struct hashmap_base_entry *e, *n;
1684         Iterator i;
1685         unsigned idx;
1686         int r;
1687
1688         assert(h);
1689
1690         if (!other)
1691                 return 0;
1692
1693         assert(other->type == h->type);
1694
1695         /*
1696          * This reserves buckets for the worst case, where none of other's
1697          * entries are yet present in h. This is preferable to risking
1698          * an allocation failure in the middle of the moving and having to
1699          * rollback or return a partial result.
1700          */
1701         r = resize_buckets(h, n_entries(other));
1702         if (r < 0)
1703                 return r;
1704
1705         HASHMAP_FOREACH_IDX(idx, other, i) {
1706                 unsigned h_hash;
1707
1708                 e = bucket_at(other, idx);
1709                 h_hash = bucket_hash(h, e->key);
1710                 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1711                         continue;
1712
1713                 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1714                 n->key = e->key;
1715                 if (h->type != HASHMAP_TYPE_SET)
1716                         ((struct plain_hashmap_entry*) n)->value =
1717                                 ((struct plain_hashmap_entry*) e)->value;
1718                 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1719
1720                 remove_entry(other, idx);
1721         }
1722
1723         return 0;
1724 }
1725
1726 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1727         struct swap_entries swap;
1728         unsigned h_hash, other_hash, idx;
1729         struct hashmap_base_entry *e, *n;
1730         int r;
1731
1732         assert(h);
1733
1734         h_hash = bucket_hash(h, key);
1735         if (bucket_scan(h, h_hash, key) != IDX_NIL)
1736                 return -EEXIST;
1737
1738         if (!other)
1739                 return -ENOENT;
1740
1741         assert(other->type == h->type);
1742
1743         other_hash = bucket_hash(other, key);
1744         idx = bucket_scan(other, other_hash, key);
1745         if (idx == IDX_NIL)
1746                 return -ENOENT;
1747
1748         e = bucket_at(other, idx);
1749
1750         n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1751         n->key = e->key;
1752         if (h->type != HASHMAP_TYPE_SET)
1753                 ((struct plain_hashmap_entry*) n)->value =
1754                         ((struct plain_hashmap_entry*) e)->value;
1755         r = hashmap_put_boldly(h, h_hash, &swap, true);
1756         if (r < 0)
1757                 return r;
1758
1759         remove_entry(other, idx);
1760         return 0;
1761 }
1762
1763 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1764         HashmapBase *copy;
1765         int r;
1766
1767         assert(h);
1768
1769         copy = hashmap_base_new(h->hash_ops, h->type  HASHMAP_DEBUG_SRC_ARGS);
1770         if (!copy)
1771                 return NULL;
1772
1773         switch (h->type) {
1774         case HASHMAP_TYPE_PLAIN:
1775         case HASHMAP_TYPE_ORDERED:
1776                 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1777                 break;
1778         case HASHMAP_TYPE_SET:
1779                 r = set_merge((Set*)copy, (Set*)h);
1780                 break;
1781         default:
1782                 assert_not_reached("Unknown hashmap type");
1783         }
1784
1785         if (r < 0) {
1786                 internal_hashmap_free(copy);
1787                 return NULL;
1788         }
1789
1790         return copy;
1791 }
1792
1793 char **internal_hashmap_get_strv(HashmapBase *h) {
1794         char **sv;
1795         Iterator i;
1796         unsigned idx, n;
1797
1798         sv = new(char*, n_entries(h)+1);
1799         if (!sv)
1800                 return NULL;
1801
1802         n = 0;
1803         HASHMAP_FOREACH_IDX(idx, h, i)
1804                 sv[n++] = entry_value(h, bucket_at(h, idx));
1805         sv[n] = NULL;
1806
1807         return sv;
1808 }
1809
1810 #if 0 /// UNNEEDED by elogind
1811 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1812         struct ordered_hashmap_entry *e;
1813         unsigned hash, idx;
1814
1815         if (!h)
1816                 return NULL;
1817
1818         hash = bucket_hash(h, key);
1819         idx = bucket_scan(h, hash, key);
1820         if (idx == IDX_NIL)
1821                 return NULL;
1822
1823         e = ordered_bucket_at(h, idx);
1824         if (e->iterate_next == IDX_NIL)
1825                 return NULL;
1826         return ordered_bucket_at(h, e->iterate_next)->p.value;
1827 }
1828 #endif // 0
1829
1830 int set_consume(Set *s, void *value) {
1831         int r;
1832
1833         r = set_put(s, value);
1834         if (r <= 0)
1835                 free(value);
1836
1837         return r;
1838 }
1839
1840 int set_put_strdup(Set *s, const char *p) {
1841         char *c;
1842         int r;
1843
1844         assert(s);
1845         assert(p);
1846
1847         c = strdup(p);
1848         if (!c)
1849                 return -ENOMEM;
1850
1851         r = set_consume(s, c);
1852         if (r == -EEXIST)
1853                 return 0;
1854
1855         return r;
1856 }
1857
1858 #if 0 /// UNNEEDED by elogind
1859 int set_put_strdupv(Set *s, char **l) {
1860         int n = 0, r;
1861         char **i;
1862
1863         STRV_FOREACH(i, l) {
1864                 r = set_put_strdup(s, *i);
1865                 if (r < 0)
1866                         return r;
1867
1868                 n += r;
1869         }
1870
1871         return n;
1872 }
1873 #endif // 0