chiark / gitweb /
terminal-util: remove unnecessary check of result of isatty() (#4000)
[elogind.git] / src / basic / hashmap.c
1 /***
2   This file is part of systemd.
3
4   Copyright 2010 Lennart Poettering
5   Copyright 2014 Michal Schmidt
6
7   systemd is free software; you can redistribute it and/or modify it
8   under the terms of the GNU Lesser General Public License as published by
9   the Free Software Foundation; either version 2.1 of the License, or
10   (at your option) any later version.
11
12   systemd is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   Lesser General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdint.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "alloc-util.h"
27 #include "hashmap.h"
28 #include "macro.h"
29 #include "mempool.h"
30 #include "process-util.h"
31 #include "random-util.h"
32 #include "set.h"
33 #include "siphash24.h"
34 #include "strv.h"
35 #include "util.h"
36
37 #ifdef ENABLE_DEBUG_HASHMAP
38 #include <pthread.h>
39 #include "list.h"
40 #endif
41
42 /*
43  * Implementation of hashmaps.
44  * Addressing: open
45  *   - uses less RAM compared to closed addressing (chaining), because
46  *     our entries are small (especially in Sets, which tend to contain
47  *     the majority of entries in systemd).
48  * Collision resolution: Robin Hood
49  *   - tends to equalize displacement of entries from their optimal buckets.
50  * Probe sequence: linear
51  *   - though theoretically worse than random probing/uniform hashing/double
52  *     hashing, it is good for cache locality.
53  *
54  * References:
55  * Celis, P. 1986. Robin Hood Hashing.
56  * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
57  * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
58  * - The results are derived for random probing. Suggests deletion with
59  *   tombstones and two mean-centered search methods. None of that works
60  *   well for linear probing.
61  *
62  * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
63  * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
64  * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
65  * http://www.math.uu.se/~svante/papers/sj157.pdf
66  * - Applies to Robin Hood with linear probing. Contains remarks on
67  *   the unsuitability of mean-centered search with linear probing.
68  *
69  * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
70  * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
71  * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
72  * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
73  *   in a successful search), and Janson writes about displacement. C = d + 1.
74  *
75  * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
76  * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
77  * - Explanation of backward shift deletion with pictures.
78  *
79  * Khuong, P. 2013. The Other Robin Hood Hashing.
80  * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
81  * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
82  */
83
84 /*
85  * XXX Ideas for improvement:
86  * For unordered hashmaps, randomize iteration order, similarly to Perl:
87  * http://blog.booking.com/hardening-perls-hash-function.html
88  */
89
90 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
91  * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
92 #define INV_KEEP_FREE            5U
93
94 /* Fields common to entries of all hashmap/set types */
95 struct hashmap_base_entry {
96         const void *key;
97 };
98
99 /* Entry types for specific hashmap/set types
100  * hashmap_base_entry must be at the beginning of each entry struct. */
101
102 struct plain_hashmap_entry {
103         struct hashmap_base_entry b;
104         void *value;
105 };
106
107 struct ordered_hashmap_entry {
108         struct plain_hashmap_entry p;
109         unsigned iterate_next, iterate_previous;
110 };
111
112 struct set_entry {
113         struct hashmap_base_entry b;
114 };
115
116 /* In several functions it is advantageous to have the hash table extended
117  * virtually by a couple of additional buckets. We reserve special index values
118  * for these "swap" buckets. */
119 #define _IDX_SWAP_BEGIN     (UINT_MAX - 3)
120 #define IDX_PUT             (_IDX_SWAP_BEGIN + 0)
121 #define IDX_TMP             (_IDX_SWAP_BEGIN + 1)
122 #define _IDX_SWAP_END       (_IDX_SWAP_BEGIN + 2)
123
124 #define IDX_FIRST           (UINT_MAX - 1) /* special index for freshly initialized iterators */
125 #define IDX_NIL             UINT_MAX       /* special index value meaning "none" or "end" */
126
127 assert_cc(IDX_FIRST == _IDX_SWAP_END);
128 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
129
130 /* Storage space for the "swap" buckets.
131  * All entry types can fit into a ordered_hashmap_entry. */
132 struct swap_entries {
133         struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
134 };
135
136 /* Distance from Initial Bucket */
137 typedef uint8_t dib_raw_t;
138 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU)   /* indicates DIB value is greater than representable */
139 #define DIB_RAW_REHASH   ((dib_raw_t)0xfeU)   /* entry yet to be rehashed during in-place resize */
140 #define DIB_RAW_FREE     ((dib_raw_t)0xffU)   /* a free bucket */
141 #define DIB_RAW_INIT     ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
142
143 #define DIB_FREE UINT_MAX
144
145 #ifdef ENABLE_DEBUG_HASHMAP
146 struct hashmap_debug_info {
147         LIST_FIELDS(struct hashmap_debug_info, debug_list);
148         unsigned max_entries;  /* high watermark of n_entries */
149
150         /* who allocated this hashmap */
151         int line;
152         const char *file;
153         const char *func;
154
155         /* fields to detect modification while iterating */
156         unsigned put_count;    /* counts puts into the hashmap */
157         unsigned rem_count;    /* counts removals from hashmap */
158         unsigned last_rem_idx; /* remembers last removal index */
159 };
160
161 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
162 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
163 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
164
165 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
166
167 #else /* !ENABLE_DEBUG_HASHMAP */
168 #define HASHMAP_DEBUG_FIELDS
169 #endif /* ENABLE_DEBUG_HASHMAP */
170
171 enum HashmapType {
172         HASHMAP_TYPE_PLAIN,
173         HASHMAP_TYPE_ORDERED,
174         HASHMAP_TYPE_SET,
175         _HASHMAP_TYPE_MAX
176 };
177
178 struct _packed_ indirect_storage {
179         void *storage;                     /* where buckets and DIBs are stored */
180         uint8_t  hash_key[HASH_KEY_SIZE];  /* hash key; changes during resize */
181
182         unsigned n_entries;                /* number of stored entries */
183         unsigned n_buckets;                /* number of buckets */
184
185         unsigned idx_lowest_entry;         /* Index below which all buckets are free.
186                                               Makes "while(hashmap_steal_first())" loops
187                                               O(n) instead of O(n^2) for unordered hashmaps. */
188         uint8_t  _pad[3];                  /* padding for the whole HashmapBase */
189         /* The bitfields in HashmapBase complete the alignment of the whole thing. */
190 };
191
192 struct direct_storage {
193         /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
194          * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
195          *              or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
196         uint8_t storage[sizeof(struct indirect_storage)];
197 };
198
199 #define DIRECT_BUCKETS(entry_t) \
200         (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
201
202 /* We should be able to store at least one entry directly. */
203 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
204
205 /* We have 3 bits for n_direct_entries. */
206 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
207
208 /* Hashmaps with directly stored entries all use this shared hash key.
209  * It's no big deal if the key is guessed, because there can be only
210  * a handful of directly stored entries in a hashmap. When a hashmap
211  * outgrows direct storage, it gets its own key for indirect storage. */
212 static uint8_t shared_hash_key[HASH_KEY_SIZE];
213 static bool shared_hash_key_initialized;
214
215 /* Fields that all hashmap/set types must have */
216 struct HashmapBase {
217         const struct hash_ops *hash_ops;  /* hash and compare ops to use */
218
219         union _packed_ {
220                 struct indirect_storage indirect; /* if  has_indirect */
221                 struct direct_storage direct;     /* if !has_indirect */
222         };
223
224         enum HashmapType type:2;     /* HASHMAP_TYPE_* */
225         bool has_indirect:1;         /* whether indirect storage is used */
226         unsigned n_direct_entries:3; /* Number of entries in direct storage.
227                                       * Only valid if !has_indirect. */
228         bool from_pool:1;            /* whether was allocated from mempool */
229         HASHMAP_DEBUG_FIELDS         /* optional hashmap_debug_info */
230 };
231
232 /* Specific hash types
233  * HashmapBase must be at the beginning of each hashmap struct. */
234
235 struct Hashmap {
236         struct HashmapBase b;
237 };
238
239 struct OrderedHashmap {
240         struct HashmapBase b;
241         unsigned iterate_list_head, iterate_list_tail;
242 };
243
244 struct Set {
245         struct HashmapBase b;
246 };
247
248 DEFINE_MEMPOOL(hashmap_pool,         Hashmap,        8);
249 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
250 /* No need for a separate Set pool */
251 assert_cc(sizeof(Hashmap) == sizeof(Set));
252
253 struct hashmap_type_info {
254         size_t head_size;
255         size_t entry_size;
256         struct mempool *mempool;
257         unsigned n_direct_buckets;
258 };
259
260 static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
261         [HASHMAP_TYPE_PLAIN] = {
262                 .head_size        = sizeof(Hashmap),
263                 .entry_size       = sizeof(struct plain_hashmap_entry),
264                 .mempool          = &hashmap_pool,
265                 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
266         },
267         [HASHMAP_TYPE_ORDERED] = {
268                 .head_size        = sizeof(OrderedHashmap),
269                 .entry_size       = sizeof(struct ordered_hashmap_entry),
270                 .mempool          = &ordered_hashmap_pool,
271                 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
272         },
273         [HASHMAP_TYPE_SET] = {
274                 .head_size        = sizeof(Set),
275                 .entry_size       = sizeof(struct set_entry),
276                 .mempool          = &hashmap_pool,
277                 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
278         },
279 };
280
281 static unsigned n_buckets(HashmapBase *h) {
282         return h->has_indirect ? h->indirect.n_buckets
283                                : hashmap_type_info[h->type].n_direct_buckets;
284 }
285
286 static unsigned n_entries(HashmapBase *h) {
287         return h->has_indirect ? h->indirect.n_entries
288                                : h->n_direct_entries;
289 }
290
291 static void n_entries_inc(HashmapBase *h) {
292         if (h->has_indirect)
293                 h->indirect.n_entries++;
294         else
295                 h->n_direct_entries++;
296 }
297
298 static void n_entries_dec(HashmapBase *h) {
299         if (h->has_indirect)
300                 h->indirect.n_entries--;
301         else
302                 h->n_direct_entries--;
303 }
304
305 static void *storage_ptr(HashmapBase *h) {
306         return h->has_indirect ? h->indirect.storage
307                                : h->direct.storage;
308 }
309
310 static uint8_t *hash_key(HashmapBase *h) {
311         return h->has_indirect ? h->indirect.hash_key
312                                : shared_hash_key;
313 }
314
315 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
316         struct siphash state;
317         uint64_t hash;
318
319         siphash24_init(&state, hash_key(h));
320
321         h->hash_ops->hash(p, &state);
322
323         hash = siphash24_finalize(&state);
324
325         return (unsigned) (hash % n_buckets(h));
326 }
327 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
328
329 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
330         static uint8_t current[HASH_KEY_SIZE];
331         static bool current_initialized = false;
332
333         /* Returns a hash function key to use. In order to keep things
334          * fast we will not generate a new key each time we allocate a
335          * new hash table. Instead, we'll just reuse the most recently
336          * generated one, except if we never generated one or when we
337          * are rehashing an entire hash table because we reached a
338          * fill level */
339
340         if (!current_initialized || !reuse_is_ok) {
341                 random_bytes(current, sizeof(current));
342                 current_initialized = true;
343         }
344
345         memcpy(hash_key, current, sizeof(current));
346 }
347
348 static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
349         return (struct hashmap_base_entry*)
350                 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
351 }
352
353 static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
354         return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
355 }
356
357 static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
358         return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
359 }
360
361 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
362         return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
363 }
364
365 static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
366         return &swap->e[idx - _IDX_SWAP_BEGIN];
367 }
368
369 /* Returns a pointer to the bucket at index idx.
370  * Understands real indexes and swap indexes, hence "_virtual". */
371 static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
372                                                     unsigned idx) {
373         if (idx < _IDX_SWAP_BEGIN)
374                 return bucket_at(h, idx);
375
376         if (idx < _IDX_SWAP_END)
377                 return &bucket_at_swap(swap, idx)->p.b;
378
379         assert_not_reached("Invalid index");
380 }
381
382 static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
383         return (dib_raw_t*)
384                 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
385 }
386
387 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
388         return idx >= from ? idx - from
389                            : n_buckets(h) + idx - from;
390 }
391
392 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
393         unsigned initial_bucket;
394
395         if (raw_dib == DIB_RAW_FREE)
396                 return DIB_FREE;
397
398         if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
399                 return raw_dib;
400
401         /*
402          * Having an overflow DIB value is very unlikely. The hash function
403          * would have to be bad. For example, in a table of size 2^24 filled
404          * to load factor 0.9 the maximum observed DIB is only about 60.
405          * In theory (assuming I used Maxima correctly), for an infinite size
406          * hash table with load factor 0.8 the probability of a given entry
407          * having DIB > 40 is 1.9e-8.
408          * This returns the correct DIB value by recomputing the hash value in
409          * the unlikely case. XXX Hitting this case could be a hint to rehash.
410          */
411         initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
412         return bucket_distance(h, idx, initial_bucket);
413 }
414
415 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
416         dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
417 }
418
419 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
420         dib_raw_t *dibs;
421
422         dibs = dib_raw_ptr(h);
423
424         for ( ; idx < n_buckets(h); idx++)
425                 if (dibs[idx] != DIB_RAW_FREE)
426                         return idx;
427
428         return IDX_NIL;
429 }
430
431 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
432         memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
433         bucket_set_dib(h, idx, DIB_FREE);
434 }
435
436 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
437                               unsigned from, unsigned to) {
438         struct hashmap_base_entry *e_from, *e_to;
439
440         assert(from != to);
441
442         e_from = bucket_at_virtual(h, swap, from);
443         e_to   = bucket_at_virtual(h, swap, to);
444
445         memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
446
447         if (h->type == HASHMAP_TYPE_ORDERED) {
448                 OrderedHashmap *lh = (OrderedHashmap*) h;
449                 struct ordered_hashmap_entry *le, *le_to;
450
451                 le_to = (struct ordered_hashmap_entry*) e_to;
452
453                 if (le_to->iterate_next != IDX_NIL) {
454                         le = (struct ordered_hashmap_entry*)
455                              bucket_at_virtual(h, swap, le_to->iterate_next);
456                         le->iterate_previous = to;
457                 }
458
459                 if (le_to->iterate_previous != IDX_NIL) {
460                         le = (struct ordered_hashmap_entry*)
461                              bucket_at_virtual(h, swap, le_to->iterate_previous);
462                         le->iterate_next = to;
463                 }
464
465                 if (lh->iterate_list_head == from)
466                         lh->iterate_list_head = to;
467                 if (lh->iterate_list_tail == from)
468                         lh->iterate_list_tail = to;
469         }
470 }
471
472 static unsigned next_idx(HashmapBase *h, unsigned idx) {
473         return (idx + 1U) % n_buckets(h);
474 }
475
476 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
477         return (n_buckets(h) + idx - 1U) % n_buckets(h);
478 }
479
480 static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
481         switch (h->type) {
482
483         case HASHMAP_TYPE_PLAIN:
484         case HASHMAP_TYPE_ORDERED:
485                 return ((struct plain_hashmap_entry*)e)->value;
486
487         case HASHMAP_TYPE_SET:
488                 return (void*) e->key;
489
490         default:
491                 assert_not_reached("Unknown hashmap type");
492         }
493 }
494
495 static void base_remove_entry(HashmapBase *h, unsigned idx) {
496         unsigned left, right, prev, dib;
497         dib_raw_t raw_dib, *dibs;
498
499         dibs = dib_raw_ptr(h);
500         assert(dibs[idx] != DIB_RAW_FREE);
501
502 #ifdef ENABLE_DEBUG_HASHMAP
503         h->debug.rem_count++;
504         h->debug.last_rem_idx = idx;
505 #endif
506
507         left = idx;
508         /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
509         for (right = next_idx(h, left); ; right = next_idx(h, right)) {
510                 raw_dib = dibs[right];
511                 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
512                         break;
513
514                 /* The buckets are not supposed to be all occupied and with DIB > 0.
515                  * That would mean we could make everyone better off by shifting them
516                  * backward. This scenario is impossible. */
517                 assert(left != right);
518         }
519
520         if (h->type == HASHMAP_TYPE_ORDERED) {
521                 OrderedHashmap *lh = (OrderedHashmap*) h;
522                 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
523
524                 if (le->iterate_next != IDX_NIL)
525                         ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
526                 else
527                         lh->iterate_list_tail = le->iterate_previous;
528
529                 if (le->iterate_previous != IDX_NIL)
530                         ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
531                 else
532                         lh->iterate_list_head = le->iterate_next;
533         }
534
535         /* Now shift all buckets in the interval (left, right) one step backwards */
536         for (prev = left, left = next_idx(h, left); left != right;
537              prev = left, left = next_idx(h, left)) {
538                 dib = bucket_calculate_dib(h, left, dibs[left]);
539                 assert(dib != 0);
540                 bucket_move_entry(h, NULL, left, prev);
541                 bucket_set_dib(h, prev, dib - 1);
542         }
543
544         bucket_mark_free(h, prev);
545         n_entries_dec(h);
546 }
547 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
548
549 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
550         struct ordered_hashmap_entry *e;
551         unsigned idx;
552
553         assert(h);
554         assert(i);
555
556         if (i->idx == IDX_NIL)
557                 goto at_end;
558
559         if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
560                 goto at_end;
561
562         if (i->idx == IDX_FIRST) {
563                 idx = h->iterate_list_head;
564                 e = ordered_bucket_at(h, idx);
565         } else {
566                 idx = i->idx;
567                 e = ordered_bucket_at(h, idx);
568                 /*
569                  * We allow removing the current entry while iterating, but removal may cause
570                  * a backward shift. The next entry may thus move one bucket to the left.
571                  * To detect when it happens, we remember the key pointer of the entry we were
572                  * going to iterate next. If it does not match, there was a backward shift.
573                  */
574                 if (e->p.b.key != i->next_key) {
575                         idx = prev_idx(HASHMAP_BASE(h), idx);
576                         e = ordered_bucket_at(h, idx);
577                 }
578                 assert(e->p.b.key == i->next_key);
579         }
580
581 #ifdef ENABLE_DEBUG_HASHMAP
582         i->prev_idx = idx;
583 #endif
584
585         if (e->iterate_next != IDX_NIL) {
586                 struct ordered_hashmap_entry *n;
587                 i->idx = e->iterate_next;
588                 n = ordered_bucket_at(h, i->idx);
589                 i->next_key = n->p.b.key;
590         } else
591                 i->idx = IDX_NIL;
592
593         return idx;
594
595 at_end:
596         i->idx = IDX_NIL;
597         return IDX_NIL;
598 }
599
600 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
601         unsigned idx;
602
603         assert(h);
604         assert(i);
605
606         if (i->idx == IDX_NIL)
607                 goto at_end;
608
609         if (i->idx == IDX_FIRST) {
610                 /* fast forward to the first occupied bucket */
611                 if (h->has_indirect) {
612                         i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
613                         h->indirect.idx_lowest_entry = i->idx;
614                 } else
615                         i->idx = skip_free_buckets(h, 0);
616
617                 if (i->idx == IDX_NIL)
618                         goto at_end;
619         } else {
620                 struct hashmap_base_entry *e;
621
622                 assert(i->idx > 0);
623
624                 e = bucket_at(h, i->idx);
625                 /*
626                  * We allow removing the current entry while iterating, but removal may cause
627                  * a backward shift. The next entry may thus move one bucket to the left.
628                  * To detect when it happens, we remember the key pointer of the entry we were
629                  * going to iterate next. If it does not match, there was a backward shift.
630                  */
631                 if (e->key != i->next_key)
632                         e = bucket_at(h, --i->idx);
633
634                 assert(e->key == i->next_key);
635         }
636
637         idx = i->idx;
638 #ifdef ENABLE_DEBUG_HASHMAP
639         i->prev_idx = idx;
640 #endif
641
642         i->idx = skip_free_buckets(h, i->idx + 1);
643         if (i->idx != IDX_NIL)
644                 i->next_key = bucket_at(h, i->idx)->key;
645         else
646                 i->idx = IDX_NIL;
647
648         return idx;
649
650 at_end:
651         i->idx = IDX_NIL;
652         return IDX_NIL;
653 }
654
655 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
656         if (!h) {
657                 i->idx = IDX_NIL;
658                 return IDX_NIL;
659         }
660
661 #ifdef ENABLE_DEBUG_HASHMAP
662         if (i->idx == IDX_FIRST) {
663                 i->put_count = h->debug.put_count;
664                 i->rem_count = h->debug.rem_count;
665         } else {
666                 /* While iterating, must not add any new entries */
667                 assert(i->put_count == h->debug.put_count);
668                 /* ... or remove entries other than the current one */
669                 assert(i->rem_count == h->debug.rem_count ||
670                        (i->rem_count == h->debug.rem_count - 1 &&
671                         i->prev_idx == h->debug.last_rem_idx));
672                 /* Reset our removals counter */
673                 i->rem_count = h->debug.rem_count;
674         }
675 #endif
676
677         return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
678                                                : hashmap_iterate_in_internal_order(h, i);
679 }
680
681 bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
682         struct hashmap_base_entry *e;
683         void *data;
684         unsigned idx;
685
686         idx = hashmap_iterate_entry(h, i);
687         if (idx == IDX_NIL) {
688                 if (value)
689                         *value = NULL;
690                 if (key)
691                         *key = NULL;
692
693                 return false;
694         }
695
696         e = bucket_at(h, idx);
697         data = entry_value(h, e);
698         if (value)
699                 *value = data;
700         if (key)
701                 *key = e->key;
702
703         return true;
704 }
705
706 bool set_iterate(Set *s, Iterator *i, void **value) {
707         return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
708 }
709
710 #define HASHMAP_FOREACH_IDX(idx, h, i) \
711         for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
712              (idx != IDX_NIL); \
713              (idx) = hashmap_iterate_entry((h), &(i)))
714
715 static void reset_direct_storage(HashmapBase *h) {
716         const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
717         void *p;
718
719         assert(!h->has_indirect);
720
721         p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
722         memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
723 }
724
725 static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
726         HashmapBase *h;
727         const struct hashmap_type_info *hi = &hashmap_type_info[type];
728         bool use_pool;
729
730         use_pool = is_main_thread();
731
732         h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
733
734         if (!h)
735                 return NULL;
736
737         h->type = type;
738         h->from_pool = use_pool;
739         h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
740
741         if (type == HASHMAP_TYPE_ORDERED) {
742                 OrderedHashmap *lh = (OrderedHashmap*)h;
743                 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
744         }
745
746         reset_direct_storage(h);
747
748         if (!shared_hash_key_initialized) {
749                 random_bytes(shared_hash_key, sizeof(shared_hash_key));
750                 shared_hash_key_initialized= true;
751         }
752
753 #ifdef ENABLE_DEBUG_HASHMAP
754         h->debug.func = func;
755         h->debug.file = file;
756         h->debug.line = line;
757         assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
758         LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
759         assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
760 #endif
761
762         return h;
763 }
764
765 Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
766         return (Hashmap*)        hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
767 }
768
769 OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
770         return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
771 }
772
773 Set *internal_set_new(const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
774         return (Set*)            hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
775 }
776
777 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
778                                          enum HashmapType type HASHMAP_DEBUG_PARAMS) {
779         HashmapBase *q;
780
781         assert(h);
782
783         if (*h)
784                 return 0;
785
786         q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
787         if (!q)
788                 return -ENOMEM;
789
790         *h = q;
791         return 0;
792 }
793
794 int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
795         return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
796 }
797
798 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
799         return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
800 }
801
802 int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops  HASHMAP_DEBUG_PARAMS) {
803         return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
804 }
805
806 static void hashmap_free_no_clear(HashmapBase *h) {
807         assert(!h->has_indirect);
808         assert(!h->n_direct_entries);
809
810 #ifdef ENABLE_DEBUG_HASHMAP
811         assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
812         LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
813         assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
814 #endif
815
816         if (h->from_pool)
817                 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
818         else
819                 free(h);
820 }
821
822 HashmapBase *internal_hashmap_free(HashmapBase *h) {
823
824         /* Free the hashmap, but nothing in it */
825
826         if (h) {
827                 internal_hashmap_clear(h);
828                 hashmap_free_no_clear(h);
829         }
830
831         return NULL;
832 }
833
834 HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
835
836         /* Free the hashmap and all data objects in it, but not the
837          * keys */
838
839         if (h) {
840                 internal_hashmap_clear_free(h);
841                 hashmap_free_no_clear(h);
842         }
843
844         return NULL;
845 }
846
847 Hashmap *hashmap_free_free_free(Hashmap *h) {
848
849         /* Free the hashmap and all data and key objects in it */
850
851         if (h) {
852                 hashmap_clear_free_free(h);
853                 hashmap_free_no_clear(HASHMAP_BASE(h));
854         }
855
856         return NULL;
857 }
858
859 void internal_hashmap_clear(HashmapBase *h) {
860         if (!h)
861                 return;
862
863         if (h->has_indirect) {
864                 free(h->indirect.storage);
865                 h->has_indirect = false;
866         }
867
868         h->n_direct_entries = 0;
869         reset_direct_storage(h);
870
871         if (h->type == HASHMAP_TYPE_ORDERED) {
872                 OrderedHashmap *lh = (OrderedHashmap*) h;
873                 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
874         }
875 }
876
877 void internal_hashmap_clear_free(HashmapBase *h) {
878         unsigned idx;
879
880         if (!h)
881                 return;
882
883         for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
884              idx = skip_free_buckets(h, idx + 1))
885                 free(entry_value(h, bucket_at(h, idx)));
886
887         internal_hashmap_clear(h);
888 }
889
890 void hashmap_clear_free_free(Hashmap *h) {
891         unsigned idx;
892
893         if (!h)
894                 return;
895
896         for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
897              idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
898                 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
899                 free((void*)e->b.key);
900                 free(e->value);
901         }
902
903         internal_hashmap_clear(HASHMAP_BASE(h));
904 }
905
906 static int resize_buckets(HashmapBase *h, unsigned entries_add);
907
908 /*
909  * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
910  * Performs Robin Hood swaps as it goes. The entry to put must be placed
911  * by the caller into swap slot IDX_PUT.
912  * If used for in-place resizing, may leave a displaced entry in swap slot
913  * IDX_PUT. Caller must rehash it next.
914  * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
915  *          false otherwise.
916  */
917 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
918                                    struct swap_entries *swap) {
919         dib_raw_t raw_dib, *dibs;
920         unsigned dib, distance;
921
922 #ifdef ENABLE_DEBUG_HASHMAP
923         h->debug.put_count++;
924 #endif
925
926         dibs = dib_raw_ptr(h);
927
928         for (distance = 0; ; distance++) {
929                 raw_dib = dibs[idx];
930                 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
931                         if (raw_dib == DIB_RAW_REHASH)
932                                 bucket_move_entry(h, swap, idx, IDX_TMP);
933
934                         if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
935                                 h->indirect.idx_lowest_entry = idx;
936
937                         bucket_set_dib(h, idx, distance);
938                         bucket_move_entry(h, swap, IDX_PUT, idx);
939                         if (raw_dib == DIB_RAW_REHASH) {
940                                 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
941                                 return true;
942                         }
943
944                         return false;
945                 }
946
947                 dib = bucket_calculate_dib(h, idx, raw_dib);
948
949                 if (dib < distance) {
950                         /* Found a wealthier entry. Go Robin Hood! */
951                         bucket_set_dib(h, idx, distance);
952
953                         /* swap the entries */
954                         bucket_move_entry(h, swap, idx, IDX_TMP);
955                         bucket_move_entry(h, swap, IDX_PUT, idx);
956                         bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
957
958                         distance = dib;
959                 }
960
961                 idx = next_idx(h, idx);
962         }
963 }
964
965 /*
966  * Puts an entry into a hashmap, boldly - no check whether key already exists.
967  * The caller must place the entry (only its key and value, not link indexes)
968  * in swap slot IDX_PUT.
969  * Caller must ensure: the key does not exist yet in the hashmap.
970  *                     that resize is not needed if !may_resize.
971  * Returns: 1 if entry was put successfully.
972  *          -ENOMEM if may_resize==true and resize failed with -ENOMEM.
973  *          Cannot return -ENOMEM if !may_resize.
974  */
975 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
976                                    struct swap_entries *swap, bool may_resize) {
977         struct ordered_hashmap_entry *new_entry;
978         int r;
979
980         assert(idx < n_buckets(h));
981
982         new_entry = bucket_at_swap(swap, IDX_PUT);
983
984         if (may_resize) {
985                 r = resize_buckets(h, 1);
986                 if (r < 0)
987                         return r;
988                 if (r > 0)
989                         idx = bucket_hash(h, new_entry->p.b.key);
990         }
991         assert(n_entries(h) < n_buckets(h));
992
993         if (h->type == HASHMAP_TYPE_ORDERED) {
994                 OrderedHashmap *lh = (OrderedHashmap*) h;
995
996                 new_entry->iterate_next = IDX_NIL;
997                 new_entry->iterate_previous = lh->iterate_list_tail;
998
999                 if (lh->iterate_list_tail != IDX_NIL) {
1000                         struct ordered_hashmap_entry *old_tail;
1001
1002                         old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1003                         assert(old_tail->iterate_next == IDX_NIL);
1004                         old_tail->iterate_next = IDX_PUT;
1005                 }
1006
1007                 lh->iterate_list_tail = IDX_PUT;
1008                 if (lh->iterate_list_head == IDX_NIL)
1009                         lh->iterate_list_head = IDX_PUT;
1010         }
1011
1012         assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1013
1014         n_entries_inc(h);
1015 #ifdef ENABLE_DEBUG_HASHMAP
1016         h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1017 #endif
1018
1019         return 1;
1020 }
1021 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1022         hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1023
1024 /*
1025  * Returns 0 if resize is not needed.
1026  *         1 if successfully resized.
1027  *         -ENOMEM on allocation failure.
1028  */
1029 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1030         struct swap_entries swap;
1031         void *new_storage;
1032         dib_raw_t *old_dibs, *new_dibs;
1033         const struct hashmap_type_info *hi;
1034         unsigned idx, optimal_idx;
1035         unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1036         uint8_t new_shift;
1037         bool rehash_next;
1038
1039         assert(h);
1040
1041         hi = &hashmap_type_info[h->type];
1042         new_n_entries = n_entries(h) + entries_add;
1043
1044         /* overflow? */
1045         if (_unlikely_(new_n_entries < entries_add))
1046                 return -ENOMEM;
1047
1048         /* For direct storage we allow 100% load, because it's tiny. */
1049         if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1050                 return 0;
1051
1052         /*
1053          * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1054          * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1055          */
1056         new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1057         /* overflow? */
1058         if (_unlikely_(new_n_buckets < new_n_entries))
1059                 return -ENOMEM;
1060
1061         if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1062                 return -ENOMEM;
1063
1064         old_n_buckets = n_buckets(h);
1065
1066         if (_likely_(new_n_buckets <= old_n_buckets))
1067                 return 0;
1068
1069         new_shift = log2u_round_up(MAX(
1070                         new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1071                         2 * sizeof(struct direct_storage)));
1072
1073         /* Realloc storage (buckets and DIB array). */
1074         new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1075                               1U << new_shift);
1076         if (!new_storage)
1077                 return -ENOMEM;
1078
1079         /* Must upgrade direct to indirect storage. */
1080         if (!h->has_indirect) {
1081                 memcpy(new_storage, h->direct.storage,
1082                        old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1083                 h->indirect.n_entries = h->n_direct_entries;
1084                 h->indirect.idx_lowest_entry = 0;
1085                 h->n_direct_entries = 0;
1086         }
1087
1088         /* Get a new hash key. If we've just upgraded to indirect storage,
1089          * allow reusing a previously generated key. It's still a different key
1090          * from the shared one that we used for direct storage. */
1091         get_hash_key(h->indirect.hash_key, !h->has_indirect);
1092
1093         h->has_indirect = true;
1094         h->indirect.storage = new_storage;
1095         h->indirect.n_buckets = (1U << new_shift) /
1096                                 (hi->entry_size + sizeof(dib_raw_t));
1097
1098         old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1099         new_dibs = dib_raw_ptr(h);
1100
1101         /*
1102          * Move the DIB array to the new place, replacing valid DIB values with
1103          * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1104          * Note: Overlap is not possible, because we have at least doubled the
1105          * number of buckets and dib_raw_t is smaller than any entry type.
1106          */
1107         for (idx = 0; idx < old_n_buckets; idx++) {
1108                 assert(old_dibs[idx] != DIB_RAW_REHASH);
1109                 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1110                                                               : DIB_RAW_REHASH;
1111         }
1112
1113         /* Zero the area of newly added entries (including the old DIB area) */
1114         memzero(bucket_at(h, old_n_buckets),
1115                (n_buckets(h) - old_n_buckets) * hi->entry_size);
1116
1117         /* The upper half of the new DIB array needs initialization */
1118         memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1119                (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1120
1121         /* Rehash entries that need it */
1122         n_rehashed = 0;
1123         for (idx = 0; idx < old_n_buckets; idx++) {
1124                 if (new_dibs[idx] != DIB_RAW_REHASH)
1125                         continue;
1126
1127                 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1128
1129                 /*
1130                  * Not much to do if by luck the entry hashes to its current
1131                  * location. Just set its DIB.
1132                  */
1133                 if (optimal_idx == idx) {
1134                         new_dibs[idx] = 0;
1135                         n_rehashed++;
1136                         continue;
1137                 }
1138
1139                 new_dibs[idx] = DIB_RAW_FREE;
1140                 bucket_move_entry(h, &swap, idx, IDX_PUT);
1141                 /* bucket_move_entry does not clear the source */
1142                 memzero(bucket_at(h, idx), hi->entry_size);
1143
1144                 do {
1145                         /*
1146                          * Find the new bucket for the current entry. This may make
1147                          * another entry homeless and load it into IDX_PUT.
1148                          */
1149                         rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1150                         n_rehashed++;
1151
1152                         /* Did the current entry displace another one? */
1153                         if (rehash_next)
1154                                 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1155                 } while (rehash_next);
1156         }
1157
1158         assert(n_rehashed == n_entries(h));
1159
1160         return 1;
1161 }
1162
1163 /*
1164  * Finds an entry with a matching key
1165  * Returns: index of the found entry, or IDX_NIL if not found.
1166  */
1167 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1168         struct hashmap_base_entry *e;
1169         unsigned dib, distance;
1170         dib_raw_t *dibs = dib_raw_ptr(h);
1171
1172         assert(idx < n_buckets(h));
1173
1174         for (distance = 0; ; distance++) {
1175                 if (dibs[idx] == DIB_RAW_FREE)
1176                         return IDX_NIL;
1177
1178                 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1179
1180                 if (dib < distance)
1181                         return IDX_NIL;
1182                 if (dib == distance) {
1183                         e = bucket_at(h, idx);
1184                         if (h->hash_ops->compare(e->key, key) == 0)
1185                                 return idx;
1186                 }
1187
1188                 idx = next_idx(h, idx);
1189         }
1190 }
1191 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1192
1193 int hashmap_put(Hashmap *h, const void *key, void *value) {
1194         struct swap_entries swap;
1195         struct plain_hashmap_entry *e;
1196         unsigned hash, idx;
1197
1198         assert(h);
1199
1200         hash = bucket_hash(h, key);
1201         idx = bucket_scan(h, hash, key);
1202         if (idx != IDX_NIL) {
1203                 e = plain_bucket_at(h, idx);
1204                 if (e->value == value)
1205                         return 0;
1206                 return -EEXIST;
1207         }
1208
1209         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1210         e->b.key = key;
1211         e->value = value;
1212         return hashmap_put_boldly(h, hash, &swap, true);
1213 }
1214
1215 int set_put(Set *s, const void *key) {
1216         struct swap_entries swap;
1217         struct hashmap_base_entry *e;
1218         unsigned hash, idx;
1219
1220         assert(s);
1221
1222         hash = bucket_hash(s, key);
1223         idx = bucket_scan(s, hash, key);
1224         if (idx != IDX_NIL)
1225                 return 0;
1226
1227         e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1228         e->key = key;
1229         return hashmap_put_boldly(s, hash, &swap, true);
1230 }
1231
1232 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1233         struct swap_entries swap;
1234         struct plain_hashmap_entry *e;
1235         unsigned hash, idx;
1236
1237         assert(h);
1238
1239         hash = bucket_hash(h, key);
1240         idx = bucket_scan(h, hash, key);
1241         if (idx != IDX_NIL) {
1242                 e = plain_bucket_at(h, idx);
1243 #ifdef ENABLE_DEBUG_HASHMAP
1244                 /* Although the key is equal, the key pointer may have changed,
1245                  * and this would break our assumption for iterating. So count
1246                  * this operation as incompatible with iteration. */
1247                 if (e->b.key != key) {
1248                         h->b.debug.put_count++;
1249                         h->b.debug.rem_count++;
1250                         h->b.debug.last_rem_idx = idx;
1251                 }
1252 #endif
1253                 e->b.key = key;
1254                 e->value = value;
1255                 return 0;
1256         }
1257
1258         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1259         e->b.key = key;
1260         e->value = value;
1261         return hashmap_put_boldly(h, hash, &swap, true);
1262 }
1263
1264 int hashmap_update(Hashmap *h, const void *key, void *value) {
1265         struct plain_hashmap_entry *e;
1266         unsigned hash, idx;
1267
1268         assert(h);
1269
1270         hash = bucket_hash(h, key);
1271         idx = bucket_scan(h, hash, key);
1272         if (idx == IDX_NIL)
1273                 return -ENOENT;
1274
1275         e = plain_bucket_at(h, idx);
1276         e->value = value;
1277         return 0;
1278 }
1279
1280 void *internal_hashmap_get(HashmapBase *h, const void *key) {
1281         struct hashmap_base_entry *e;
1282         unsigned hash, idx;
1283
1284         if (!h)
1285                 return NULL;
1286
1287         hash = bucket_hash(h, key);
1288         idx = bucket_scan(h, hash, key);
1289         if (idx == IDX_NIL)
1290                 return NULL;
1291
1292         e = bucket_at(h, idx);
1293         return entry_value(h, e);
1294 }
1295
1296 void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1297         struct plain_hashmap_entry *e;
1298         unsigned hash, idx;
1299
1300         if (!h)
1301                 return NULL;
1302
1303         hash = bucket_hash(h, key);
1304         idx = bucket_scan(h, hash, key);
1305         if (idx == IDX_NIL)
1306                 return NULL;
1307
1308         e = plain_bucket_at(h, idx);
1309         if (key2)
1310                 *key2 = (void*) e->b.key;
1311
1312         return e->value;
1313 }
1314
1315 bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1316         unsigned hash;
1317
1318         if (!h)
1319                 return false;
1320
1321         hash = bucket_hash(h, key);
1322         return bucket_scan(h, hash, key) != IDX_NIL;
1323 }
1324
1325 void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1326         struct hashmap_base_entry *e;
1327         unsigned hash, idx;
1328         void *data;
1329
1330         if (!h)
1331                 return NULL;
1332
1333         hash = bucket_hash(h, key);
1334         idx = bucket_scan(h, hash, key);
1335         if (idx == IDX_NIL)
1336                 return NULL;
1337
1338         e = bucket_at(h, idx);
1339         data = entry_value(h, e);
1340         remove_entry(h, idx);
1341
1342         return data;
1343 }
1344
1345 void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1346         struct plain_hashmap_entry *e;
1347         unsigned hash, idx;
1348         void *data;
1349
1350         if (!h) {
1351                 if (rkey)
1352                         *rkey = NULL;
1353                 return NULL;
1354         }
1355
1356         hash = bucket_hash(h, key);
1357         idx = bucket_scan(h, hash, key);
1358         if (idx == IDX_NIL) {
1359                 if (rkey)
1360                         *rkey = NULL;
1361                 return NULL;
1362         }
1363
1364         e = plain_bucket_at(h, idx);
1365         data = e->value;
1366         if (rkey)
1367                 *rkey = (void*) e->b.key;
1368
1369         remove_entry(h, idx);
1370
1371         return data;
1372 }
1373
1374 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1375         struct swap_entries swap;
1376         struct plain_hashmap_entry *e;
1377         unsigned old_hash, new_hash, idx;
1378
1379         if (!h)
1380                 return -ENOENT;
1381
1382         old_hash = bucket_hash(h, old_key);
1383         idx = bucket_scan(h, old_hash, old_key);
1384         if (idx == IDX_NIL)
1385                 return -ENOENT;
1386
1387         new_hash = bucket_hash(h, new_key);
1388         if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1389                 return -EEXIST;
1390
1391         remove_entry(h, idx);
1392
1393         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1394         e->b.key = new_key;
1395         e->value = value;
1396         assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1397
1398         return 0;
1399 }
1400
1401 #if 0 /// UNNEEDED by elogind
1402 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1403         struct swap_entries swap;
1404         struct hashmap_base_entry *e;
1405         unsigned old_hash, new_hash, idx;
1406
1407         if (!s)
1408                 return -ENOENT;
1409
1410         old_hash = bucket_hash(s, old_key);
1411         idx = bucket_scan(s, old_hash, old_key);
1412         if (idx == IDX_NIL)
1413                 return -ENOENT;
1414
1415         new_hash = bucket_hash(s, new_key);
1416         if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1417                 return -EEXIST;
1418
1419         remove_entry(s, idx);
1420
1421         e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1422         e->key = new_key;
1423         assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1424
1425         return 0;
1426 }
1427 #endif // 0
1428
1429 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1430         struct swap_entries swap;
1431         struct plain_hashmap_entry *e;
1432         unsigned old_hash, new_hash, idx_old, idx_new;
1433
1434         if (!h)
1435                 return -ENOENT;
1436
1437         old_hash = bucket_hash(h, old_key);
1438         idx_old = bucket_scan(h, old_hash, old_key);
1439         if (idx_old == IDX_NIL)
1440                 return -ENOENT;
1441
1442         old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1443
1444         new_hash = bucket_hash(h, new_key);
1445         idx_new = bucket_scan(h, new_hash, new_key);
1446         if (idx_new != IDX_NIL)
1447                 if (idx_old != idx_new) {
1448                         remove_entry(h, idx_new);
1449                         /* Compensate for a possible backward shift. */
1450                         if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1451                                 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1452                         assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1453                 }
1454
1455         remove_entry(h, idx_old);
1456
1457         e = &bucket_at_swap(&swap, IDX_PUT)->p;
1458         e->b.key = new_key;
1459         e->value = value;
1460         assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1461
1462         return 0;
1463 }
1464
1465 void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1466         struct plain_hashmap_entry *e;
1467         unsigned hash, idx;
1468
1469         if (!h)
1470                 return NULL;
1471
1472         hash = bucket_hash(h, key);
1473         idx = bucket_scan(h, hash, key);
1474         if (idx == IDX_NIL)
1475                 return NULL;
1476
1477         e = plain_bucket_at(h, idx);
1478         if (e->value != value)
1479                 return NULL;
1480
1481         remove_entry(h, idx);
1482
1483         return value;
1484 }
1485
1486 static unsigned find_first_entry(HashmapBase *h) {
1487         Iterator i = ITERATOR_FIRST;
1488
1489         if (!h || !n_entries(h))
1490                 return IDX_NIL;
1491
1492         return hashmap_iterate_entry(h, &i);
1493 }
1494
1495 void *internal_hashmap_first(HashmapBase *h) {
1496         unsigned idx;
1497
1498         idx = find_first_entry(h);
1499         if (idx == IDX_NIL)
1500                 return NULL;
1501
1502         return entry_value(h, bucket_at(h, idx));
1503 }
1504
1505 void *internal_hashmap_first_key(HashmapBase *h) {
1506         struct hashmap_base_entry *e;
1507         unsigned idx;
1508
1509         idx = find_first_entry(h);
1510         if (idx == IDX_NIL)
1511                 return NULL;
1512
1513         e = bucket_at(h, idx);
1514         return (void*) e->key;
1515 }
1516
1517 void *internal_hashmap_steal_first(HashmapBase *h) {
1518         struct hashmap_base_entry *e;
1519         void *data;
1520         unsigned idx;
1521
1522         idx = find_first_entry(h);
1523         if (idx == IDX_NIL)
1524                 return NULL;
1525
1526         e = bucket_at(h, idx);
1527         data = entry_value(h, e);
1528         remove_entry(h, idx);
1529
1530         return data;
1531 }
1532
1533 void *internal_hashmap_steal_first_key(HashmapBase *h) {
1534         struct hashmap_base_entry *e;
1535         void *key;
1536         unsigned idx;
1537
1538         idx = find_first_entry(h);
1539         if (idx == IDX_NIL)
1540                 return NULL;
1541
1542         e = bucket_at(h, idx);
1543         key = (void*) e->key;
1544         remove_entry(h, idx);
1545
1546         return key;
1547 }
1548
1549 unsigned internal_hashmap_size(HashmapBase *h) {
1550
1551         if (!h)
1552                 return 0;
1553
1554         return n_entries(h);
1555 }
1556
1557 unsigned internal_hashmap_buckets(HashmapBase *h) {
1558
1559         if (!h)
1560                 return 0;
1561
1562         return n_buckets(h);
1563 }
1564
1565 int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1566         Iterator i;
1567         unsigned idx;
1568
1569         assert(h);
1570
1571         HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1572                 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1573                 int r;
1574
1575                 r = hashmap_put(h, pe->b.key, pe->value);
1576                 if (r < 0 && r != -EEXIST)
1577                         return r;
1578         }
1579
1580         return 0;
1581 }
1582
1583 int set_merge(Set *s, Set *other) {
1584         Iterator i;
1585         unsigned idx;
1586
1587         assert(s);
1588
1589         HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1590                 struct set_entry *se = set_bucket_at(other, idx);
1591                 int r;
1592
1593                 r = set_put(s, se->b.key);
1594                 if (r < 0)
1595                         return r;
1596         }
1597
1598         return 0;
1599 }
1600
1601 int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1602         int r;
1603
1604         assert(h);
1605
1606         r = resize_buckets(h, entries_add);
1607         if (r < 0)
1608                 return r;
1609
1610         return 0;
1611 }
1612
1613 /*
1614  * The same as hashmap_merge(), but every new item from other is moved to h.
1615  * Keys already in h are skipped and stay in other.
1616  * Returns: 0 on success.
1617  *          -ENOMEM on alloc failure, in which case no move has been done.
1618  */
1619 int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1620         struct swap_entries swap;
1621         struct hashmap_base_entry *e, *n;
1622         Iterator i;
1623         unsigned idx;
1624         int r;
1625
1626         assert(h);
1627
1628         if (!other)
1629                 return 0;
1630
1631         assert(other->type == h->type);
1632
1633         /*
1634          * This reserves buckets for the worst case, where none of other's
1635          * entries are yet present in h. This is preferable to risking
1636          * an allocation failure in the middle of the moving and having to
1637          * rollback or return a partial result.
1638          */
1639         r = resize_buckets(h, n_entries(other));
1640         if (r < 0)
1641                 return r;
1642
1643         HASHMAP_FOREACH_IDX(idx, other, i) {
1644                 unsigned h_hash;
1645
1646                 e = bucket_at(other, idx);
1647                 h_hash = bucket_hash(h, e->key);
1648                 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1649                         continue;
1650
1651                 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1652                 n->key = e->key;
1653                 if (h->type != HASHMAP_TYPE_SET)
1654                         ((struct plain_hashmap_entry*) n)->value =
1655                                 ((struct plain_hashmap_entry*) e)->value;
1656                 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1657
1658                 remove_entry(other, idx);
1659         }
1660
1661         return 0;
1662 }
1663
1664 int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1665         struct swap_entries swap;
1666         unsigned h_hash, other_hash, idx;
1667         struct hashmap_base_entry *e, *n;
1668         int r;
1669
1670         assert(h);
1671
1672         h_hash = bucket_hash(h, key);
1673         if (bucket_scan(h, h_hash, key) != IDX_NIL)
1674                 return -EEXIST;
1675
1676         if (!other)
1677                 return -ENOENT;
1678
1679         assert(other->type == h->type);
1680
1681         other_hash = bucket_hash(other, key);
1682         idx = bucket_scan(other, other_hash, key);
1683         if (idx == IDX_NIL)
1684                 return -ENOENT;
1685
1686         e = bucket_at(other, idx);
1687
1688         n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1689         n->key = e->key;
1690         if (h->type != HASHMAP_TYPE_SET)
1691                 ((struct plain_hashmap_entry*) n)->value =
1692                         ((struct plain_hashmap_entry*) e)->value;
1693         r = hashmap_put_boldly(h, h_hash, &swap, true);
1694         if (r < 0)
1695                 return r;
1696
1697         remove_entry(other, idx);
1698         return 0;
1699 }
1700
1701 HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1702         HashmapBase *copy;
1703         int r;
1704
1705         assert(h);
1706
1707         copy = hashmap_base_new(h->hash_ops, h->type  HASHMAP_DEBUG_SRC_ARGS);
1708         if (!copy)
1709                 return NULL;
1710
1711         switch (h->type) {
1712         case HASHMAP_TYPE_PLAIN:
1713         case HASHMAP_TYPE_ORDERED:
1714                 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1715                 break;
1716         case HASHMAP_TYPE_SET:
1717                 r = set_merge((Set*)copy, (Set*)h);
1718                 break;
1719         default:
1720                 assert_not_reached("Unknown hashmap type");
1721         }
1722
1723         if (r < 0) {
1724                 internal_hashmap_free(copy);
1725                 return NULL;
1726         }
1727
1728         return copy;
1729 }
1730
1731 char **internal_hashmap_get_strv(HashmapBase *h) {
1732         char **sv;
1733         Iterator i;
1734         unsigned idx, n;
1735
1736         sv = new(char*, n_entries(h)+1);
1737         if (!sv)
1738                 return NULL;
1739
1740         n = 0;
1741         HASHMAP_FOREACH_IDX(idx, h, i)
1742                 sv[n++] = entry_value(h, bucket_at(h, idx));
1743         sv[n] = NULL;
1744
1745         return sv;
1746 }
1747
1748 #if 0 /// UNNEEDED by elogind
1749 void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1750         struct ordered_hashmap_entry *e;
1751         unsigned hash, idx;
1752
1753         if (!h)
1754                 return NULL;
1755
1756         hash = bucket_hash(h, key);
1757         idx = bucket_scan(h, hash, key);
1758         if (idx == IDX_NIL)
1759                 return NULL;
1760
1761         e = ordered_bucket_at(h, idx);
1762         if (e->iterate_next == IDX_NIL)
1763                 return NULL;
1764         return ordered_bucket_at(h, e->iterate_next)->p.value;
1765 }
1766 #endif // 0
1767
1768 int set_consume(Set *s, void *value) {
1769         int r;
1770
1771         assert(s);
1772         assert(value);
1773
1774         r = set_put(s, value);
1775         if (r <= 0)
1776                 free(value);
1777
1778         return r;
1779 }
1780
1781 int set_put_strdup(Set *s, const char *p) {
1782         char *c;
1783
1784         assert(s);
1785         assert(p);
1786
1787         if (set_contains(s, (char*) p))
1788                 return 0;
1789
1790         c = strdup(p);
1791         if (!c)
1792                 return -ENOMEM;
1793
1794         return set_consume(s, c);
1795 }
1796
1797 #if 0 /// UNNEEDED by elogind
1798 int set_put_strdupv(Set *s, char **l) {
1799         int n = 0, r;
1800         char **i;
1801
1802         assert(s);
1803
1804         STRV_FOREACH(i, l) {
1805                 r = set_put_strdup(s, *i);
1806                 if (r < 0)
1807                         return r;
1808
1809                 n += r;
1810         }
1811
1812         return n;
1813 }
1814
1815 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1816         const char *p = v;
1817         int r;
1818
1819         assert(s);
1820         assert(v);
1821
1822         for (;;) {
1823                 char *word;
1824
1825                 r = extract_first_word(&p, &word, separators, flags);
1826                 if (r <= 0)
1827                         return r;
1828
1829                 r = set_consume(s, word);
1830                 if (r < 0)
1831                         return r;
1832         }
1833 }
1834 #endif // 0