#include "hashmap.h"
#include "macro.h"
#include "siphash24.h"
+#include "mempool.h"
#define INITIAL_N_BUCKETS 31
bool from_pool:1;
};
-struct pool {
- struct pool *next;
- unsigned n_tiles;
- unsigned n_used;
+struct hashmap_tile {
+ Hashmap h;
+ struct hashmap_entry *initial_buckets[INITIAL_N_BUCKETS];
};
-static struct pool *first_hashmap_pool = NULL;
-static void *first_hashmap_tile = NULL;
-
-static struct pool *first_entry_pool = NULL;
-static void *first_entry_tile = NULL;
-
-static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
- unsigned i;
-
- /* When a tile is released we add it to the list and simply
- * place the next pointer at its offset 0. */
-
- assert(tile_size >= sizeof(void*));
- assert(at_least > 0);
-
- if (*first_tile) {
- void *r;
-
- r = *first_tile;
- *first_tile = * (void**) (*first_tile);
- return r;
- }
-
- if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
- unsigned n;
- size_t size;
- struct pool *p;
-
- n = *first_pool ? (*first_pool)->n_tiles : 0;
- n = MAX(at_least, n * 2);
- size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
- n = (size - ALIGN(sizeof(struct pool))) / tile_size;
-
- p = malloc(size);
- if (!p)
- return NULL;
-
- p->next = *first_pool;
- p->n_tiles = n;
- p->n_used = 0;
-
- *first_pool = p;
- }
-
- i = (*first_pool)->n_used++;
-
- return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
-}
-
-static void deallocate_tile(void **first_tile, void *p) {
- * (void**) p = *first_tile;
- *first_tile = p;
-}
+static DEFINE_MEMPOOL(hashmap_pool, struct hashmap_tile, 8);
+static DEFINE_MEMPOOL(hashmap_entry_pool, struct hashmap_entry, 64);
#ifdef VALGRIND
-static void drop_pool(struct pool *p) {
- while (p) {
- struct pool *n;
- n = p->next;
- free(p);
- p = n;
- }
-}
-
-__attribute__((destructor)) static void cleanup_pool(void) {
+__attribute__((destructor)) static void cleanup_pools(void) {
/* Be nice to valgrind */
- drop_pool(first_hashmap_pool);
- drop_pool(first_entry_pool);
+ mempool_drop(&hashmap_entry_pool);
+ mempool_drop(&hashmap_pool);
}
#endif
Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
bool b;
+ struct hashmap_tile *ht;
Hashmap *h;
- size_t size;
b = is_main_thread();
- size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
-
if (b) {
- h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
- if (!h)
+ ht = mempool_alloc_tile(&hashmap_pool);
+ if (!ht)
return NULL;
- memzero(h, size);
+ memzero(ht, sizeof(struct hashmap_tile));
} else {
- h = malloc0(size);
+ ht = malloc0(sizeof(struct hashmap_tile));
- if (!h)
+ if (!ht)
return NULL;
}
+ h = &ht->h;
h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
h->n_buckets = INITIAL_N_BUCKETS;
h->n_entries = 0;
h->iterate_list_head = h->iterate_list_tail = NULL;
- h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
+ h->buckets = ht->initial_buckets;
h->from_pool = b;
unlink_entry(h, e, hash);
if (h->from_pool)
- deallocate_tile(&first_entry_tile, e);
+ mempool_free_tile(&hashmap_entry_pool, e);
else
free(e);
}
free(h->buckets);
if (h->from_pool)
- deallocate_tile(&first_hashmap_tile, h);
+ mempool_free_tile(&hashmap_pool, container_of(h, struct hashmap_tile, h));
else
free(h);
}
return NULL;
}
-static bool resize_buckets(Hashmap *h) {
+static int resize_buckets(Hashmap *h, unsigned entries_add) {
struct hashmap_entry **n, *i;
- unsigned m;
+ unsigned m, new_n_entries, new_n_buckets;
uint8_t nkey[HASH_KEY_SIZE];
assert(h);
- if (_likely_(h->n_entries*4 < h->n_buckets*3))
- return false;
+ new_n_entries = h->n_entries + entries_add;
+
+ /* overflow? */
+ if (_unlikely_(new_n_entries < entries_add || new_n_entries > UINT_MAX / 4))
+ return -ENOMEM;
+
+ new_n_buckets = new_n_entries * 4 / 3;
+
+ if (_likely_(new_n_buckets <= h->n_buckets))
+ return 0;
- /* Increase by four */
- m = (h->n_entries+1)*4-1;
+ /* Increase by four at least */
+ m = MAX((h->n_entries+1)*4-1, new_n_buckets);
/* If we hit OOM we simply risk packed hashmaps... */
n = new0(struct hashmap_entry*, m);
if (!n)
- return false;
+ return -ENOMEM;
/* Let's use a different randomized hash key for the
* extension, so that people cannot guess what we are using
memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
- return true;
+ return 1;
}
static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash) {
struct hashmap_entry *e;
- if (resize_buckets(h))
+ if (resize_buckets(h, 1) > 0)
hash = bucket_hash(h, key);
if (h->from_pool)
- e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
+ e = mempool_alloc_tile(&hashmap_entry_pool);
else
e = new(struct hashmap_entry, 1);
return 0;
}
-void hashmap_move(Hashmap *h, Hashmap *other) {
+int hashmap_reserve(Hashmap *h, unsigned entries_add) {
+ int r;
+
+ assert(h);
+
+ r = resize_buckets(h, entries_add);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int hashmap_move(Hashmap *h, Hashmap *other) {
struct hashmap_entry *e, *n;
assert(h);
/* The same as hashmap_merge(), but every new item from other
- * is moved to h. This function is guaranteed to succeed. */
+ * is moved to h. */
if (!other)
- return;
+ return 0;
for (e = other->iterate_list_head; e; e = n) {
unsigned h_hash, other_hash;
unlink_entry(other, e, other_hash);
link_entry(h, e, h_hash);
}
+
+ return 0;
}
int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
unsigned hash;
struct hashmap_entry *e;
- assert(h);
assert(key);
if (!h)