+ * Returns: The size to actually allocate, including alignment and a
+ * possible following red zone. (A %%\emph{preceding}%%
+ * red zone doesn't concern us: it would either be part of the
+ * previous allocation or chunk header.)
+ */
+
+static size_t round_up(size_t sz, unsigned f)
+{
+ ALIGN(sz); VG( if (f&PF_VALGRIND) sz += REDZONE_SIZE; )
+ return (sz);
+}
+
+/* --- @chunk_bounds@ --- *
+ *
+ * Arguments: @pool_chunk@ = the chunk
+ * @size_t chsz@ = precomputed chunk header size, rounded up
+ * @unsigned char **guard_out@ = where to put the guard address
+ * @unsigned char **start_out@ = where to put the start address
+ * @unsigned char **end_out@ = where to put the limit address
+ *
+ * Returns: ---
+ *
+ * Use: Determine and return the important boundary addresses
+ * describing the allocatable space within the chunk, i.e., the
+ * space following the header.
+ *
+ * The guard address is the address of the first byte following
+ * the chunk header, without any alignment. The start address
+ * is the base address of the first allocatable byte in the
+ * chunk, immediately following the alignment and redzone after
+ * the chunk header. The end address is the address of the
+ * first byte after the chunk.
+ *
+ * The chunk's @u.sz@ member must be active -- if the chunk is
+ * dead, then @u.sz@ should be zero.
+ */
+
+static void chunk_bounds(pool_chunk *chunk, size_t chsz,
+ unsigned char **guard_out,
+ unsigned char **start_out, unsigned char **end_out)
+{
+ *guard_out = (unsigned char *)(chunk + 1);
+ *start_out = (unsigned char *)chunk + chsz;
+ *end_out = chunk->p + chunk->u.sz;
+}
+
+/* --- @init_pool@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to storage for the control block
+ * @arena *a@ = pointer to underlying arena
+ * @pool_chunk *active@ = pointer to initial active chunk
+ * @unsigned f@ = flags (@PF_...@)
+ *
+ * Returns: ---
+ *
+ * Use: Initializes a pool control block. Valgrind is not informed.
+ * This is a common subroutine for @pool_create@ and @pool_sub@.
+ */
+
+static void init_pool(pool *p, arena *a, pool_chunk *active, unsigned f)
+{
+ p->a.ops = &pool_ops;
+ p->active = active; p->resources = 0; p->pa = a; p->f = f; p->lastsz = 0;
+ p->live = 0; p->nlive = p->livesz = 0; p->dead = 0;
+}
+
+/* --- @alloc_chunk@ --- *
+ *
+ * Arguments: @arena *a@ = arena to allocate from
+ * @size_t minsz@ = size of the block intended to be allocated
+ * from this new chunk
+ * @unsigned f@ = flags (@PF_...@)
+ *
+ * Returns: A pointer to the new chunk.
+ *
+ * Use: Subroutine to allocate a chunk. Since a pool control block
+ * is allocated out of its own storage, it's necessary to be
+ * able to do this without reference to a pool. Valgrind is not
+ * informed.
+ *
+ * This code is shared between @pool_create@, @pool_sub@, and
+ * @internal_alloc@.
+ */
+
+static pool_chunk *alloc_chunk(arena *a, size_t minsz, unsigned f)
+{
+ pool_chunk *chunk;
+ size_t sz, chsz = round_up(sizeof(pool_chunk), f);
+
+ /* Allocate the chunk. */
+ sz = chsz + minsz + POOL_CHUNKSZ - 1; sz -= sz%POOL_CHUNKSZ;
+ chunk = x_alloc(a, sz);
+
+ /* Initialize and mark the body as unavailable. */
+ chunk->p = (unsigned char *)chunk + chsz; chunk->u.sz = sz - chsz;
+
+ /* Done. */
+ D( fprintf(stderr, ";; POOL new chunk %p + %lu\n",
+ (void *)chunk, (unsigned long)sz); )
+ return (chunk);
+}
+
+/* --- @report_new_chunk_to_valgrind@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *chunk@ = the new chunk to report
+ *
+ * Returns: ---
+ *
+ * Use: Inform Valgrind about the new chunk, which is surprisingly
+ * involved. This can't be done as part of allocating it
+ * because we need to know the pool control-block address.
+ */
+
+#ifdef HAVE_VALGRIND_VALGRIND_H
+
+static void report_new_chunk_to_valgrind(pool *p, pool_chunk *chunk)
+{
+ unsigned char *guard, *start, *end;
+
+ D( fprintf(stderr, ";; POOL prepare chunk %p\n", (void *)chunk); )
+
+ chunk_bounds(chunk, round_up(sizeof(pool_chunk), PF_VALGRIND),
+ &guard, &start, &end);
+
+ D( fprintf(stderr, ";; POOL \tchunk body %p + %lu\n",
+ (void *)start, (unsigned long)(end - start)); )
+ VALGRIND_MEMPOOL_ALLOC(p, start, end - start);
+
+ D( fprintf(stderr, ";; POOL \telectrify chunk %p + %lu\n",
+ (void *)guard, (unsigned long)(end - guard)); )
+ VALGRIND_MAKE_MEM_NOACCESS(guard, end - guard);
+}
+
+#endif
+
+/* --- @maybe_undo_last_alloc@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to an allocated block
+ *
+ * Returns: Zero on success, %$-1$% if @q@ doesn't refer to the most
+ * recent allocation.
+ *
+ * Use: If @q@ points to the most recently allocated block, then
+ * undo its allocation by adjusting the containing chunk's @p@
+ * and @sz@ members. The actual block contents are preserved,
+ * and Valgrind is not informed. If @q@ does not point to the
+ * most recently allocated block, then return %$-1$% without
+ * doing anything else.
+ */
+
+static int maybe_undo_last_alloc(pool *p, void *q)
+{
+ pool_chunk *chunk = p->active;
+ size_t sz = p->lastsz;
+
+ /* Check the base address. If it's wrong then we fail. */
+ if (q != chunk->p - sz) return (-1);
+
+ /* Adjust the chunk so that the last allocation didn't happen. */
+ chunk->p -= sz; chunk->u.sz += sz;
+ return (0);
+}
+
+/* --- @bury_dead_chunk@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *chunk@ = the chunk to check
+ *
+ * Returns: Zero if the chunk was dead and has now been buried, %$-1$% if
+ * it wasn't dead after all.