+struct pool {
+ struct pool *next;
+ unsigned n_tiles;
+ unsigned n_used;
+};
+
+struct pool *first_hashmap_pool = NULL;
+static void *first_hashmap_tile = NULL;
+
+struct pool *first_entry_pool = NULL;
+static void *first_entry_tile = NULL;
+
+static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size) {
+ unsigned i;
+
+ if (*first_tile) {
+ void *r;
+
+ r = *first_tile;
+ *first_tile = * (void**) (*first_tile);
+ return r;
+ }
+
+ if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
+ unsigned n;
+ size_t size;
+ struct pool *p;
+
+ n = *first_pool ? (*first_pool)->n_tiles : 0;
+ n = MAX(512U, n * 2);
+ size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
+ n = (size - ALIGN(sizeof(struct pool))) / tile_size;
+
+ p = malloc(size);
+ if (!p)
+ return NULL;
+
+ p->next = *first_pool;
+ p->n_tiles = n;
+ p->n_used = 0;
+
+ *first_pool = p;
+ }
+
+ i = (*first_pool)->n_used++;
+
+ return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
+}
+
+static void deallocate_tile(void **first_tile, void *p) {
+ * (void**) p = *first_tile;
+ *first_tile = p;
+}
+
+#ifndef __OPTIMIZE__
+
+static void drop_pool(struct pool *p) {
+ while (p) {
+ struct pool *n;
+ n = p->next;
+ free(p);
+ p = n;
+ }
+}
+
+__attribute__((destructor)) static void cleanup_pool(void) {
+ /* Be nice to valgrind */
+
+ drop_pool(first_hashmap_pool);
+ drop_pool(first_entry_pool);
+}
+
+#endif
+