X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?a=blobdiff_plain;f=src%2Fshared%2Fhashmap.c;h=3762e3ab0deb045a98517b6d5fa7b48cb8d45e6e;hb=0371ca0dac1d70b2e5060a3c4e6fbbc2bdbd8671;hp=dcfbb67228e826b21260d4d995ca6ed3a5494bfb;hpb=a4bcff5ba36115495994e9f9ba66074471de76ab;p=elogind.git diff --git a/src/shared/hashmap.c b/src/shared/hashmap.c index dcfbb6722..3762e3ab0 100644 --- a/src/shared/hashmap.c +++ b/src/shared/hashmap.c @@ -24,11 +24,15 @@ #include #include +#ifdef HAVE_SYS_AUXV_H +#include +#endif + #include "util.h" #include "hashmap.h" #include "macro.h" -#define NBUCKETS 127 +#define INITIAL_N_BUCKETS 31 struct hashmap_entry { const void *key; @@ -42,13 +46,14 @@ struct Hashmap { compare_func_t compare_func; struct hashmap_entry *iterate_list_head, *iterate_list_tail; - unsigned n_entries; + struct hashmap_entry ** buckets; + unsigned n_buckets, n_entries; + + unsigned random_xor; bool from_pool; }; -#define BY_HASH(h) ((struct hashmap_entry**) ((uint8_t*) (h) + ALIGN(sizeof(Hashmap)))) - struct pool { struct pool *next; unsigned n_tiles; @@ -61,9 +66,15 @@ static void *first_hashmap_tile = NULL; static struct pool *first_entry_pool = NULL; static void *first_entry_tile = NULL; -static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) { +static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) { unsigned i; + /* When a tile is released we add it to the list and simply + * place the next pointer at its offset 0. */ + + assert(tile_size >= sizeof(void*)); + assert(at_least > 0); + if (*first_tile) { void *r; @@ -78,7 +89,7 @@ static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t t struct pool *p; n = *first_pool ? (*first_pool)->n_tiles : 0; - n = MAX(512U, n * 2); + n = MAX(at_least, n * 2); size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size); n = (size - ALIGN(sizeof(struct pool))) / tile_size; @@ -103,7 +114,7 @@ static void deallocate_tile(void **first_tile, void *p) { *first_tile = p; } -#ifndef __OPTIMIZE__ +#ifdef VALGRIND static void drop_pool(struct pool *p) { while (p) { @@ -166,6 +177,10 @@ int uint64_compare_func(const void *_a, const void *_b) { return a < b ? -1 : (a > b ? 1 : 0); } +static unsigned bucket_hash(Hashmap *h, const void *p) { + return (h->hash_func(p) ^ h->random_xor) % h->n_buckets; +} + Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) { bool b; Hashmap *h; @@ -173,10 +188,10 @@ Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) { b = is_main_thread(); - size = ALIGN(sizeof(Hashmap)) + NBUCKETS * sizeof(struct hashmap_entry*); + size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*); if (b) { - h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size); + h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8); if (!h) return NULL; @@ -191,23 +206,46 @@ Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) { h->hash_func = hash_func ? hash_func : trivial_hash_func; h->compare_func = compare_func ? compare_func : trivial_compare_func; + h->n_buckets = INITIAL_N_BUCKETS; h->n_entries = 0; h->iterate_list_head = h->iterate_list_tail = NULL; + h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))); + h->from_pool = b; + /* Let's randomize our hash functions a bit so that they are + * harder to guess for clients. For this, start out by cheaply + * using some bits the kernel passed into the process using + * the auxiliary vector. If the hashmap grows later on we will + * rehash everything using a new random XOR mask from + * /dev/random. */ +#ifdef HAVE_SYS_AUXV_H + { + void *auxv; + auxv = (void*) getauxval(AT_RANDOM); + h->random_xor = auxv ? *(unsigned*) auxv : random_u(); + } +#else + h->random_xor = random_u(); +#endif + return h; } int hashmap_ensure_allocated(Hashmap **h, hash_func_t hash_func, compare_func_t compare_func) { + Hashmap *q; + assert(h); if (*h) return 0; - if (!(*h = hashmap_new(hash_func, compare_func))) + q = hashmap_new(hash_func, compare_func); + if (!q) return -ENOMEM; + *h = q; return 0; } @@ -216,11 +254,11 @@ static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) { assert(e); /* Insert into hash table */ - e->bucket_next = BY_HASH(h)[hash]; + e->bucket_next = h->buckets[hash]; e->bucket_previous = NULL; - if (BY_HASH(h)[hash]) - BY_HASH(h)[hash]->bucket_previous = e; - BY_HASH(h)[hash] = e; + if (h->buckets[hash]) + h->buckets[hash]->bucket_previous = e; + h->buckets[hash] = e; /* Insert into iteration list */ e->iterate_previous = h->iterate_list_tail; @@ -260,7 +298,7 @@ static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) { if (e->bucket_previous) e->bucket_previous->bucket_next = e->bucket_next; else - BY_HASH(h)[hash] = e->bucket_next; + h->buckets[hash] = e->bucket_next; assert(h->n_entries >= 1); h->n_entries--; @@ -272,8 +310,7 @@ static void remove_entry(Hashmap *h, struct hashmap_entry *e) { assert(h); assert(e); - hash = h->hash_func(e->key) % NBUCKETS; - + hash = bucket_hash(h, e->key); unlink_entry(h, e, hash); if (h->from_pool) @@ -291,6 +328,9 @@ void hashmap_free(Hashmap*h) { hashmap_clear(h); + if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)))) + free(h->buckets); + if (h->from_pool) deallocate_tile(&first_hashmap_tile, h); else @@ -309,6 +349,17 @@ void hashmap_free_free(Hashmap *h) { hashmap_free(h); } +void hashmap_free_free_free(Hashmap *h) { + + /* Free the hashmap and all data and key objects in it */ + + if (!h) + return; + + hashmap_clear_free_free(h); + hashmap_free(h); +} + void hashmap_clear(Hashmap *h) { if (!h) return; @@ -327,37 +378,108 @@ void hashmap_clear_free(Hashmap *h) { free(p); } +void hashmap_clear_free_free(Hashmap *h) { + if (!h) + return; + + while (h->iterate_list_head) { + void *a, *b; + + a = h->iterate_list_head->value; + b = (void*) h->iterate_list_head->key; + remove_entry(h, h->iterate_list_head); + free(a); + free(b); + } +} + static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) { struct hashmap_entry *e; assert(h); - assert(hash < NBUCKETS); + assert(hash < h->n_buckets); - for (e = BY_HASH(h)[hash]; e; e = e->bucket_next) + for (e = h->buckets[hash]; e; e = e->bucket_next) if (h->compare_func(e->key, key) == 0) return e; return NULL; } +static bool resize_buckets(Hashmap *h) { + struct hashmap_entry **n, *i; + unsigned m, nxor; + + assert(h); + + if (_likely_(h->n_entries*4 < h->n_buckets*3)) + return false; + + /* Increase by four */ + m = (h->n_entries+1)*4-1; + + /* If we hit OOM we simply risk packed hashmaps... */ + n = new0(struct hashmap_entry*, m); + if (!n) + return false; + + /* Let's use a different randomized xor value for the + * extension, so that people cannot guess what we are using + * here forever */ + nxor = random_u(); + + for (i = h->iterate_list_head; i; i = i->iterate_next) { + unsigned hash, x; + + hash = h->hash_func(i->key); + + /* First, drop from old bucket table */ + if (i->bucket_next) + i->bucket_next->bucket_previous = i->bucket_previous; + + if (i->bucket_previous) + i->bucket_previous->bucket_next = i->bucket_next; + else + h->buckets[(hash ^ h->random_xor) % h->n_buckets] = i->bucket_next; + + /* Then, add to new backet table */ + x = (hash ^ nxor) % m; + + i->bucket_next = n[x]; + i->bucket_previous = NULL; + if (n[x]) + n[x]->bucket_previous = i; + n[x] = i; + } + + if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)))) + free(h->buckets); + + h->buckets = n; + h->n_buckets = m; + h->random_xor = nxor; + + return true; +} + int hashmap_put(Hashmap *h, const void *key, void *value) { struct hashmap_entry *e; unsigned hash; assert(h); - hash = h->hash_func(key) % NBUCKETS; - + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (e) { - if (e->value == value) return 0; - return -EEXIST; } + if (resize_buckets(h)) + hash = bucket_hash(h, key); + if (h->from_pool) - e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry)); + e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U); else e = new(struct hashmap_entry, 1); @@ -378,7 +500,7 @@ int hashmap_replace(Hashmap *h, const void *key, void *value) { assert(h); - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (e) { e->key = key; @@ -395,7 +517,7 @@ int hashmap_update(Hashmap *h, const void *key, void *value) { assert(h); - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (!e) return -ENOENT; @@ -411,7 +533,7 @@ void* hashmap_get(Hashmap *h, const void *key) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (!e) return NULL; @@ -426,7 +548,7 @@ void* hashmap_get2(Hashmap *h, const void *key, void **key2) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (!e) return NULL; @@ -443,12 +565,8 @@ bool hashmap_contains(Hashmap *h, const void *key) { if (!h) return false; - hash = h->hash_func(key) % NBUCKETS; - - if (!hash_scan(h, hash, key)) - return false; - - return true; + hash = bucket_hash(h, key); + return !!hash_scan(h, hash, key); } void* hashmap_remove(Hashmap *h, const void *key) { @@ -459,9 +577,9 @@ void* hashmap_remove(Hashmap *h, const void *key) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; - - if (!(e = hash_scan(h, hash, key))) + hash = bucket_hash(h, key); + e = hash_scan(h, hash, key); + if (!e) return NULL; data = e->value; @@ -477,11 +595,12 @@ int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, if (!h) return -ENOENT; - old_hash = h->hash_func(old_key) % NBUCKETS; - if (!(e = hash_scan(h, old_hash, old_key))) + old_hash = bucket_hash(h, old_key); + e = hash_scan(h, old_hash, old_key); + if (!e) return -ENOENT; - new_hash = h->hash_func(new_key) % NBUCKETS; + new_hash = bucket_hash(h, new_key); if (hash_scan(h, new_hash, new_key)) return -EEXIST; @@ -502,13 +621,14 @@ int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_ if (!h) return -ENOENT; - old_hash = h->hash_func(old_key) % NBUCKETS; - if (!(e = hash_scan(h, old_hash, old_key))) + old_hash = bucket_hash(h, old_key); + e = hash_scan(h, old_hash, old_key); + if (!e) return -ENOENT; - new_hash = h->hash_func(new_key) % NBUCKETS; - - if ((k = hash_scan(h, new_hash, new_key))) + new_hash = bucket_hash(h, new_key); + k = hash_scan(h, new_hash, new_key); + if (k) if (e != k) remove_entry(h, k); @@ -529,9 +649,10 @@ void* hashmap_remove_value(Hashmap *h, const void *key, void *value) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); - if (!(e = hash_scan(h, hash, key))) + e = hash_scan(h, hash, key); + if (!e) return NULL; if (e->value != value) @@ -619,9 +740,10 @@ void *hashmap_iterate_skip(Hashmap *h, const void *key, Iterator *i) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); - if (!(e = hash_scan(h, hash, key))) + e = hash_scan(h, hash, key); + if (!e) return NULL; *i = (Iterator) e; @@ -700,6 +822,14 @@ unsigned hashmap_size(Hashmap *h) { return h->n_entries; } +unsigned hashmap_buckets(Hashmap *h) { + + if (!h) + return 0; + + return h->n_buckets; +} + bool hashmap_isempty(Hashmap *h) { if (!h) @@ -719,9 +849,9 @@ int hashmap_merge(Hashmap *h, Hashmap *other) { for (e = other->iterate_list_head; e; e = e->iterate_next) { int r; - if ((r = hashmap_put(h, e->key, e->value)) < 0) - if (r != -EEXIST) - return r; + r = hashmap_put(h, e->key, e->value); + if (r < 0 && r != -EEXIST) + return r; } return 0; @@ -743,13 +873,11 @@ void hashmap_move(Hashmap *h, Hashmap *other) { n = e->iterate_next; - h_hash = h->hash_func(e->key) % NBUCKETS; - + h_hash = bucket_hash(h, e->key); if (hash_scan(h, h_hash, e->key)) continue; - other_hash = other->hash_func(e->key) % NBUCKETS; - + other_hash = bucket_hash(other, e->key); unlink_entry(other, e, other_hash); link_entry(h, e, h_hash); } @@ -764,12 +892,13 @@ int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) { assert(h); - h_hash = h->hash_func(key) % NBUCKETS; + h_hash = bucket_hash(h, key); if (hash_scan(h, h_hash, key)) return -EEXIST; - other_hash = other->hash_func(key) % NBUCKETS; - if (!(e = hash_scan(other, other_hash, key))) + other_hash = bucket_hash(other, key); + e = hash_scan(other, other_hash, key); + if (!e) return -ENOENT; unlink_entry(other, e, other_hash); @@ -783,7 +912,8 @@ Hashmap *hashmap_copy(Hashmap *h) { assert(h); - if (!(copy = hashmap_new(h->hash_func, h->compare_func))) + copy = hashmap_new(h->hash_func, h->compare_func); + if (!copy) return NULL; if (hashmap_merge(copy, h) < 0) { @@ -822,7 +952,7 @@ void *hashmap_next(Hashmap *h, const void *key) { if (!h) return NULL; - hash = h->hash_func(key) % NBUCKETS; + hash = bucket_hash(h, key); e = hash_scan(h, hash, key); if (!e) return NULL;