.
.\"--------------------------------------------------------------------------
.TH pool 3mLib "7 July 2000" "Straylight/Edgeware" "mLib utilities library"
+.\" @pool_create
+.\" @pool_sub
.\" @pool_alloc
+.\" @pool_allocv
+.\" @POOL_NEW
+.\" @POOL_NEWV
+.\" @pool_realloc
+.\" @pool_reallocv
+.\" @POOL_RENEW
.\" @pool_strdup
-.\" @pool_create
+.\" @pool_free
+.\" @pool_recycle
.\" @pool_destroy
-.\" @pool_sub
+.
.\" @pool_add
.\" @POOL_ADD
+.
.\" @pool_fopen
.\" @pool_fclose
.\" @pool_subarena
.nf
.B "#include <mLib/pool.h>"
.PP
-.B "typedef struct { ...\& } pool;"
-.PP
.ta 2n
.B "typedef struct {"
+.B " arena a;"
+.B " ...\&"
+.B "} pool;"
+.PP
+.B "typedef struct {"
.B " pool_resource *next;"
.BI " void (*destroy)(pool_resource *" r );
.B "} pool_resource;"
.PP
.BI "pool *pool_create(arena *" a );
.BI "pool *pool_sub(pool *" p );
+.BI "void *pool_alloc(pool *" p ", size_t " sz );
+.BI "void *pool_allocv(pool *" p ", size_t " n ", size_t " sz );
+.ta \w'\fBvoid *pool_realloc('u
+.BI "void *pool_realloc(pool *" p ", void *" q ,
+.BI " size_t " sz ", size_t " osz );
+.ta \w'\fBvoid *pool_reallocv('u
+.BI "void *pool_reallocv(pool *" p ", void *" q ,
+.BI " size_t " n ", size_t " on ", size_t " sz );
+.BI "char *pool_strdup(pool *" p ", const char *" s );
+.BI "void pool_free(pool *" p ", void *" q );
+.BI "void pool_recycle(pool *" p );
.BI "void pool_destroy(pool *" p );
+.PP
+.BI "POOL_NEW(" type " *" q ", pool *" p );
+.BI "POOL_NEWV(" type " *" q ", pool *" p ", size_t " n );
+.BI "POOL_RENEWV(" type " *" q ", pool *" p ", size_t " n ", size_t " on );
+.PP
.ta \w'\fBvoid pool_add('u
.BI "void pool_add(pool *" p ", pool_resource *" r ,
.BI " void (*" dfn ")(pool_resource *" r ));
-.BI "void *pool_alloc(pool *" p ", size_t " sz );
-.BI "char *pool_strdup(pool *" p ", const char *" s );
+.ta \w'\fBvoid POOL_ADD('u
+.BI "void POOL_ADD(pool *" p ", pool_resource *" r ,
+.BI " void (*" dfn ")(pool_resource *" r ));
+.PP
.BI "pool_file *pool_fopen(pool *" p ", const char *" file ", const char *" how );
.BI "int pool_fclose(pool_file *" pf );
.BI "subarena *pool_subarena(pool *" p );
.PP
-.ta \w'\fBvoid POOL_ADD('u
-.BI "void POOL_ADD(pool *" p ", pool_resource *" r ,
-.BI " void (*" dfn ")(pool_resource *" r ));
.fi
.
.\"--------------------------------------------------------------------------
is a collection of resources (e.g., memory, files) which may be disposed
of simultaneously.
.PP
-A pool may be a
-.IR "root pool" ,
-in which case it stands on its own, or it may be a
-.IR "subpool"
-of another pool (which may in turn either be a root pool or a subpool of
-another).
-.PP
-Pools manage memory efficiently. Memory is allocated in large chunks
-from an
+Pools manage memory efficiently.
+Memory is allocated in large chunks
+from an underlying
.BR arena (3),
-and given out as necessary to callers. There is no way of freeing
-memory dynamically; instead, the memory allocated by a pool is freed
-when the pool is destroyed. While allocation is rapid, there is waste
+and given out as necessary to callers.
+The most recently allocated block can be resized or freed;
+otherwise, allocation is permanent,
+and the only way to recover the memory
+is to recycle or destroy the entire pool.
+.PP
+While allocation is rapid, there is waste
because the allocator has to ensure that blocks are properly aligned.
-Since pools offer an arena interface, it is possible to build a
+Since pools offer an arena interface,
+it is possible to build a
.BR subarena (3)
-over them. This also enables memory in the subarena to be reclaimed
+over them.
+This also enables memory in the subarena to be reclaimed
when the pool is destroyed.
.PP
-Other resources (e.g., file handles) may be added to the pool. The pool
-will automatically release any resources it has when it's destroyed.
-Attaching resources to an appropriate pool can therefore be a useful way
-of avoiding memory leaks.
+Other resources (e.g., file handles) may be added to the pool.
+The pool will automatically release any resources it has when it's destroyed.
+Attaching resources to an appropriate pool
+can therefore be a useful way of avoiding leaks.
.
-.SS "Creating and destroying pools"
-A new root pool is created using
+.SS "Creating, recycling destroying pools"
+A pool can be created by calling
.BR pool_create ,
-passing it an arena from which it can allocate large memory blocks.
+passing it an arena from which it can allocate its large memory chunks.
+A pool created in this way is called a
+.IR "root pool" .
.PP
-A subpool is created by calling
-.BR pool_sub ,
-naming the parent pool.
+A pool can be
+.I recycled
+by calling the
+.B pool_recycle
+function.
+Recycling a pool releases all of the resources added to the pool,
+and invalidates the memory allocated from the pool.
+However, the chunks from which allocations were made are retained,
+and the pool remains valid:
+further allocations can be made using the pool,
+which will be satisfied using the retained chunks of memory,
+and further resources can be added to the pool.
.PP
-Pools are destroyed by passing them to
-.BR pool_destroy .
-Pools created by
-.B pool_create
-are completely destroyed, since the memory containing the pool structure
-is allocated from the pool itself. Subpools, on the other hand, are
-allocated from a parent pool, and may be reused after being `destroyed'.
+Alternatively, a pool can be
+.I destroyed
+by calling
+.BR pool_destroy ,
+which also releases all of the pool's resources,
+but additionally frees its memory chunks back to the underlying arena.
+When a root pool is destroyed,
+the pool becomes invalid and must not be used for any purpose.
+.PP
+A
+.I subpool
+is created by calling
+.BR pool_sub .
+Subpools are completely destroyed when their parents are destroyed.
+However, a subpool can also be explicitly destroyed,
+by calling
+.BR pool_destroy :
+in this case, a subpool remains valid,
+and can be used for further allocations and resources,
+so the effect is similar to recycling;
+however, it's not as fast,
+because the pool will have to claim chunks of memory
+from the its underlying arena again.
+It's usually better to recycle subpools;
+this feature primarily exists for compatibility with programs
+written before explicit recycling was introduced.
.
.SS "Memory allocation"
-Memory is allocated from a pool by calling
+The primary means for allocating memory from a pool is by calling
.BR pool_alloc ,
-passing it the pool and the size of memory requested. There is an
-interface for copying strings,
-.BR pool_strdup ,
-since this is a common operation. Note that there is no
-.BR pool_free :
-if this is important, either use the pool's arena
-.B p->pa
-directly or create a subpool.
+passing it the pool and the size of memory requested.
+There is also
+.BR pool_allocv ,
+which will safely allocate enough space for a vector of
+.I n
+elements, each of size
+.IR sz ,
+in the manner of
+.BR calloc (3);
+though
+.B pool_alloc
+and
+.B pool_allocv
+do not clear the memory they return.
+.PP
+The
+.B POOL_NEW
+macro sets its argument
+.I q
+to point to a fresh block large enough for the type that
+.I q
+points to,
+allocated from the pool
+.IR p ;
+the
+.B POOL_NEWV
+macro is similar,
+but sets
+.I q
+to point to a fresh block large enough for a vector of
+.I n
+elements,
+each of the type that
+.I q
+points to.
+.PP
+The most recently allocated block can be resized
+using the
+.B pool_realloc
+function,
+passing it the pool address
+.IR p ,
+the original block address
+.IR q ,
+the new block size
+.IR newsz ,
+and the original block size
+.IR oldsz .
+If there is still enough space in the chunk
+(which, in particular, will happen if the block is made smaller)
+then the block address does not change,
+and further allocations are made following the altered block size;
+otherwise, a fresh block is reallocated from a different chunk
+and the contents are copied,
+but the space the occupied in the original chunk is made available
+for future allocations.
+Similar to
+.B pool_allocv
+above,
+the
+.B pool_reallocv
+function will resize a vector
+given the pool
+.IR p ,
+the original vector base address
+.IR q ,
+the new number of elements required
+.IR n ,
+the existing number of elements
+.IR on ,
+and the size of the elements in bytes
+.IR sz .
+And,
+similar to
+.B POOL_NEWV
+above,
+the
+.B POOL_RENEWV
+macro updates its argument
+.I q
+to point to a block large enough for a vector of
+.I n
+elements each of the type that
+.I q
+points to,
+given the existing capacity
+.I on
+of the vector.
+.PP
+The most recently allocated block can also be freed completely,
+by calling
+.BR pool_free .
+.PP
+These functions and macros can also be called on blocks
+other than the most recently allocated block,
+but they are less useful.
+If
+.B pool_realloc
+is called to shrink a block,
+or
+.B pool_free
+is called,
+then there is essentially no effect
+(but see the section on
+.B Valgrind
+below);
+if
+.B pool_realloc
+is called to increate the block size,
+then a fresh block is allocated and the contents copied;
+but the space no longer required is retained.
+.PP
+Finally, the function
+.B pool_strdup
+copies a null-terminated string into memory allocated from the pool.
.PP
A pool provides an
.BR arena (3)
which can be passed to other components to cause them to use the pool
for memory allocation.
.
+.SS "Valgrind"
+The resource pool allocator detects
+when it is running under the control of
+.BR valgrind (1):
+it will describe the block allocations to
+.B valgrind
+so that it can report leaks and errors, and
+it will leave `red zones' between allocations to catch overruns.
+In particular,
+it reports the action of
+.B pool_realloc
+and
+.B pool_free
+(and related functions and macros)
+precisely,
+so it may be worth using these functions
+on blocks other than the one most recently allocated,
+just for the improved diagnostics.
+.
.SS "Other resources"
Pool resources have a header of type
.B pool_resource
#include "config.h"
+#include <assert.h>
#include <string.h>
#ifdef HAVE_VALGRIND_VALGRIND_H
#include "align.h"
#include "alloc.h"
#include "arena.h"
+#include "exc.h"
+#include "macros.h"
#include "pool.h"
+#ifdef DEBUG_POOL
+# include <stdio.h>
+# define D(x) x
+#else
+# define D(x)
+#endif
+
+/*----- Pool memory layout ------------------------------------------------*
+ *
+ * The pool consists of a number of chunks. Each chunk begins with a header,
+ * followed by allocated blocks.
+ *
+ * Within a chunk, blocks are allocated on alignment boundaries, as
+ * determined by the @ALIGN@ macro. Furthermore, if Valgrind is active, then
+ * each allocation is preceded and followed by a guard area called a `red
+ * zone' which is @REDZONE_SIZE@ bytes in size. For a block in the middle of
+ * the chunk, the leading red zone overlaps the previous block's trailing red
+ * zone, and the block's trailing red zone overlaps the following block's
+ * leading red zone. If the block sizes are fully aligned, then the red
+ * zones completely coincide.
+ *
+ * An alternative view of the same situation, which produces the same
+ * allocation addresses, is that, following the chunk header and each
+ * allocated block, there is alignment padding followed by a red zone. This
+ * view is helpful because it makes allocation uniform: to allocate a block,
+ * round up the requested size by aligning it at adding space for the
+ * following red zone; the current allocation pointer is the new block base
+ * address, and the allocation pointer is advanced by the rounded-up size.
+ *
+ * Three categories of chunks are maintained.
+ *
+ * * There is always exactly one %%\emph{active}%% chunk. This is the
+ * chunk from which the most recent allocation, if any, was taken, and
+ * the first chunk to be examined for the next allocation.
+ *
+ * * There are zero or more %%\emph{live}%% chunks, which are not active
+ * but still have space remaining for further allocations. These are
+ * maintained in a max-heap.
+ *
+ * The live chunk with the greatest amount of free space is called the
+ * `live head', and is the first element of the live heap vector. The
+ * idea here is that, in order to decide whether we can satisfy an
+ * allocation from the pool's existing chunks, we need only check the
+ * active chunk and the live head.
+ *
+ * * There are zero or more %%\emph{dead}%% chunks, whose space has been
+ * entirely allocated. The dead chunks are held on a singly-linked list
+ * which is only maintained so that the chunks can be recycled or
+ * released.
+ *
+ * Note that the active chunk is not considered dead even if an
+ * allocation exhausts it: the invariant that the active chunk was the
+ * source of the most recent allocation is more important, since this
+ * allows us to free or reallocate the most recent allocation.
+ *
+ * A live or active chunk maintains an allocation pointer and remaining
+ * size. In a dead chunk, the remaining size -- which is, by definition,
+ * zero, is replaced by the link to the next dead chunk in the list.
+ */
+
/*----- Constants ---------------------------------------------------------*/
#define REDZONE_SIZE (2*ALIGNOF(union align))
+#define MIN_LIVESZ 16
+
+/*----- Arena operations --------------------------------------------------*/
+
+/* Here so that @pool_ops@ is available to @init_pool@ below. */
+
+static void *palloc(arena *a, size_t sz)
+ { pool *p = (pool *)a; return (pool_alloc(p, sz)); }
+
+static void *prealloc(arena *a, void *q, size_t newsz, size_t oldsz)
+ { pool *p = (pool *)a; return (pool_realloc(p, q, newsz, oldsz)); }
+
+static void pfree(arena *a, void *q)
+ { pool *p = (pool *)a; pool_free(p, q); }
+
+static arena_ops pool_ops = { palloc, prealloc, pfree, 0 };
-/*----- Main code ---------------------------------------------------------*/
+/*----- Internal machinery ------------------------------------------------*/
-/* --- @doalloc@ --- *
+/* --- @round_up@ --- *
*
- * Arguments: @arena *a@ = pointer to arena to allocate memory from
- * @pool_chunk **cc@ = pointer to chunk list
- * @size_t sz@ = size of memory wanted
+ * Arguments: @size_t sz@ = an object size
* @unsigned f@ = flags (@PF_...@)
*
- * Returns: Pointer to the allocated block.
+ * Returns: The size to actually allocate, including alignment and a
+ * possible following red zone. (A %%\emph{preceding}%%
+ * red zone doesn't concern us: it would either be part of the
+ * previous allocation or chunk header.)
+ */
+
+static size_t round_up(size_t sz, unsigned f)
+{
+ ALIGN(sz); VG( if (f&PF_VALGRIND) sz += REDZONE_SIZE; )
+ return (sz);
+}
+
+/* --- @chunk_bounds@ --- *
+ *
+ * Arguments: @pool_chunk@ = the chunk
+ * @size_t chsz@ = precomputed chunk header size, rounded up
+ * @unsigned char **guard_out@ = where to put the guard address
+ * @unsigned char **start_out@ = where to put the start address
+ * @unsigned char **end_out@ = where to put the limit address
+ *
+ * Returns: ---
+ *
+ * Use: Determine and return the important boundary addresses
+ * describing the allocatable space within the chunk, i.e., the
+ * space following the header.
+ *
+ * The guard address is the address of the first byte following
+ * the chunk header, without any alignment. The start address
+ * is the base address of the first allocatable byte in the
+ * chunk, immediately following the alignment and redzone after
+ * the chunk header. The end address is the address of the
+ * first byte after the chunk.
+ *
+ * The chunk's @u.sz@ member must be active -- if the chunk is
+ * dead, then @u.sz@ should be zero.
+ */
+
+static void chunk_bounds(pool_chunk *chunk, size_t chsz,
+ unsigned char **guard_out,
+ unsigned char **start_out, unsigned char **end_out)
+{
+ *guard_out = (unsigned char *)(chunk + 1);
+ *start_out = (unsigned char *)chunk + chsz;
+ *end_out = chunk->p + chunk->u.sz;
+}
+
+/* --- @init_pool@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to storage for the control block
+ * @arena *a@ = pointer to underlying arena
+ * @pool_chunk *active@ = pointer to initial active chunk
+ * @unsigned f@ = flags (@PF_...@)
+ *
+ * Returns: ---
+ *
+ * Use: Initializes a pool control block. Valgrind is not informed.
+ * This is a common subroutine for @pool_create@ and @pool_sub@.
+ */
+
+static void init_pool(pool *p, arena *a, pool_chunk *active, unsigned f)
+{
+ p->a.ops = &pool_ops;
+ p->active = active; p->resources = 0; p->pa = a; p->f = f; p->lastsz = 0;
+ p->live = 0; p->nlive = p->livesz = 0; p->dead = 0;
+}
+
+/* --- @alloc_chunk@ --- *
+ *
+ * Arguments: @arena *a@ = arena to allocate from
+ * @size_t minsz@ = size of the block intended to be allocated
+ * from this new chunk
+ * @unsigned f@ = flags (@PF_...@)
+ *
+ * Returns: A pointer to the new chunk.
+ *
+ * Use: Subroutine to allocate a chunk. Since a pool control block
+ * is allocated out of its own storage, it's necessary to be
+ * able to do this without reference to a pool. Valgrind is not
+ * informed.
+ *
+ * This code is shared between @pool_create@, @pool_sub@, and
+ * @internal_alloc@.
+ */
+
+static pool_chunk *alloc_chunk(arena *a, size_t minsz, unsigned f)
+{
+ pool_chunk *chunk;
+ size_t sz, chsz = round_up(sizeof(pool_chunk), f);
+
+ /* Allocate the chunk. */
+ sz = chsz + minsz + POOL_CHUNKSZ - 1; sz -= sz%POOL_CHUNKSZ;
+ chunk = x_alloc(a, sz);
+
+ /* Initialize and mark the body as unavailable. */
+ chunk->p = (unsigned char *)chunk + chsz; chunk->u.sz = sz - chsz;
+
+ /* Done. */
+ D( fprintf(stderr, ";; POOL new chunk %p + %lu\n",
+ (void *)chunk, (unsigned long)sz); )
+ return (chunk);
+}
+
+/* --- @report_new_chunk_to_valgrind@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *chunk@ = the new chunk to report
+ *
+ * Returns: ---
+ *
+ * Use: Inform Valgrind about the new chunk, which is surprisingly
+ * involved. This can't be done as part of allocating it
+ * because we need to know the pool control-block address.
+ */
+
+#ifdef HAVE_VALGRIND_VALGRIND_H
+
+static void report_new_chunk_to_valgrind(pool *p, pool_chunk *chunk)
+{
+ unsigned char *guard, *start, *end;
+
+ D( fprintf(stderr, ";; POOL prepare chunk %p\n", (void *)chunk); )
+
+ chunk_bounds(chunk, round_up(sizeof(pool_chunk), PF_VALGRIND),
+ &guard, &start, &end);
+
+ D( fprintf(stderr, ";; POOL \tchunk body %p + %lu\n",
+ (void *)start, (unsigned long)(end - start)); )
+ VALGRIND_MEMPOOL_ALLOC(p, start, end - start);
+
+ D( fprintf(stderr, ";; POOL \telectrify chunk %p + %lu\n",
+ (void *)guard, (unsigned long)(end - guard)); )
+ VALGRIND_MAKE_MEM_NOACCESS(guard, end - guard);
+}
+
+#endif
+
+/* --- @maybe_undo_last_alloc@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to an allocated block
+ *
+ * Returns: Zero on success, %$-1$% if @q@ doesn't refer to the most
+ * recent allocation.
+ *
+ * Use: If @q@ points to the most recently allocated block, then
+ * undo its allocation by adjusting the containing chunk's @p@
+ * and @sz@ members. The actual block contents are preserved,
+ * and Valgrind is not informed. If @q@ does not point to the
+ * most recently allocated block, then return %$-1$% without
+ * doing anything else.
+ */
+
+static int maybe_undo_last_alloc(pool *p, void *q)
+{
+ pool_chunk *chunk = p->active;
+ size_t sz = p->lastsz;
+
+ /* Check the base address. If it's wrong then we fail. */
+ if (q != chunk->p - sz) return (-1);
+
+ /* Adjust the chunk so that the last allocation didn't happen. */
+ chunk->p -= sz; chunk->u.sz += sz;
+ return (0);
+}
+
+/* --- @bury_dead_chunk@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *chunk@ = the chunk to check
+ *
+ * Returns: Zero if the chunk was dead and has now been buried, %$-1$% if
+ * it wasn't dead after all.
*
- * Use: The basic allocator for resource pools. This is also used
- * during pool creation, hence the slightly bizarre interface.
+ * Use: If the chunk is dead -- it has insufficient space to satisfy
+ * any reasonable request -- then link it onto the dead list and
+ * return zero.
*/
-static void *doalloc(arena *a, pool_chunk **cc, size_t sz, unsigned f)
+static int bury_dead_chunk(pool *p, pool_chunk *chunk)
{
- pool_chunk *c;
- void *p;
- size_t csz, ssz;
- size_t redsz = VG( f&PF_VALGRIND ? REDZONE_SIZE : ) 0;
- VG( size_t sz0 = sz; )
-
- /* --- See if there's enough space --- *
- *
- * The chunks are sorted by available space, so if there's not enough space
- * in the first chunk there isn't enough space anywhere.
+ size_t min = round_up(1, p->f);
+ size_t sz = chunk->u.sz;
+
+ /* If there's enough for the tiniest nonzero allocation then refuse to bury
+ * the chunk.
*/
+ if (sz >= min) return (-1);
+
+ D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to dead list\n",
+ (void *)chunk, (unsigned long)sz); )
+
+ /* Advance the allocation pointer to the end so that we can recycle the
+ * chunk later.
+ */
+ chunk->p += sz;
+
+ /* Link the chunk onto the dead list. */
+ chunk->u.next = p->dead; p->dead = chunk;
+
+ /* Done. */
+ return (0);
+}
+
+/* --- @upheap --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *new_chunk@ = a live chunk to be added
+ *
+ * Returns: ---
+ *
+ * Use: Append @new_chunk@ to the heap of live chunks.
+ *
+ * This is the primitive function for fixing up the heap
+ * underlying @remember_chunk@, but is also used by
+ * @pool_recycle@.
+ */
+
+static void upheap(pool *p, pool_chunk *new_chunk)
+{
+ pool_chunk *chunk;
+ size_t i = p->nlive++, j,
+ chunksz, newsz = new_chunk->u.sz;
- ALIGN(sz); sz += redsz;
- c = *cc;
- if (c && c->left >= sz) {
- p = c->p; VG( if (f&PF_VALGRIND) VALGRIND_MEMPOOL_ALLOC(c, p, sz0); )
- c->p += sz; c->left -= sz;
- *cc = c->next;
+ /* If there's not enough space in the vector for the new chunk then we must
+ * resize it.
+ */
+ if (i >= p->livesz) {
+ if (p->live) { X_RENEWV(p->live, p->pa, 2*i, i); p->livesz = 2*i; }
+ else { X_NEWV(p->live, p->pa, MIN_LIVESZ); p->livesz = MIN_LIVESZ; }
}
- /* --- Failed to find anything --- *
- *
- * I must allocate a new block from the arena, then.
+ /* Work upwards through the live heap to find the right level for the new
+ * chunk.
*/
+ while (i) {
- else {
- ssz = sizeof(pool_chunk);
- ALIGN(ssz);
- csz = (ssz + redsz + sz + POOL_CHUNKSZ - 1); csz -= csz % POOL_CHUNKSZ;
- c = x_alloc(a, csz);
- p = (unsigned char *)c + ssz + redsz;
- VG( if (f&PF_VALGRIND) {
- VALGRIND_CREATE_MEMPOOL(c, REDZONE_SIZE, 0);
- VALGRIND_MEMPOOL_ALLOC(c, p, sz0);
- } )
- c->p = (unsigned char *)p + sz;
- c->left = csz - ssz - redsz - sz;
+ /* Find the parent. */
+ j = (i - 1)/2; chunk = p->live[j]; chunksz = chunk->u.sz;
+
+ /* If the parent doesn't have less space than the new chunk then it's
+ * found its place.
+ */
+ if (chunksz >= newsz) break;
+
+ /* Demote the parent and ascend. */
+ p->live[i] = chunk; i = j;
}
- /* --- Move this chunk in the list so that it's sorted --- */
+ /* We've found the right level. */
+ p->live[i] = new_chunk;
+}
- while (*cc && (*cc)->left > c->left) cc = &(*cc)->next;
- c->next = *cc; *cc = c;
+/* --- @retire_active_chunk@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *old_active@ = the old active chunk
+ *
+ * Returns: ---
+ *
+ * Use: The currently active chunk is about to be replaced. Add the
+ * old active chunk to the live heap or the dead list as
+ * appropriate.
+ */
- /* --- Done --- */
+static void retire_active_chunk(pool *p, pool_chunk *old_active)
+{
+ if (bury_dead_chunk(p, old_active)) {
+ D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to live heap\n",
+ (void *)old_active, (unsigned long)old_active->u.sz); )
+ upheap(p, old_active);
+ }
+}
- return (p);
+/* --- @downheap@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *new_chunk@ = a new live chunk to be added to the
+ * heap to replace the previous head chunk
+ *
+ * Returns: ---
+ *
+ * Use: Replace the current live head with @new_chunk@ in the live
+ * heap.
+ *
+ * This is the primitive function for fixing up the heap
+ * underlying @replace_live_head@.
+ */
+
+static void downheap(pool *p, pool_chunk *new_chunk)
+{
+ pool_chunk *chunk, *left, *right;
+ size_t i = 0, j, l, r, n = p->nlive,
+ leftsz, rightsz, chunksz, newsz = new_chunk->u.sz;
+
+ /* Start at the top and work downwards through the leap to find the right
+ * level for the new chunk.
+ */
+ for (;;) {
+
+ /* Find the two children. */
+ l = 2*i + 1; r = l + 1;
+
+ /* If the left child slot is out of bounds then we can descend no
+ * further.
+ */
+ if (l >= n) break;
+
+ if (r >= n) {
+ /* If the right child slot is out of bounds, then we must compare with
+ * the left only.
+ */
+
+ j = l; chunk = p->live[j]; chunksz = chunk->u.sz;
+ } else {
+ /* Otherwise we compare with the greater child. If they're equal then
+ * choose the right in order to end the search slightly faster.
+ */
+
+ left = p->live[l]; leftsz = left->u.sz;
+ right = p->live[r]; rightsz = right->u.sz;
+ if (leftsz > rightsz) { j = l; chunk = left; chunksz = leftsz; }
+ else { j = r; chunk = right; chunksz = rightsz; }
+ }
+
+ /* If the new chunk is not less than the greater child then it's found
+ * its place.
+ */
+ if (newsz >= chunksz) break;
+
+ /* Promote the larger child and descend. */
+ p->live[i] = chunk; i = j;
+ }
+
+ /* We've found the right level. */
+ p->live[i] = new_chunk;
}
-/* --- @pool_alloc@ --- *
+/* --- @replace_live_head@ --- *
*
- * Arguments: @pool *p@ = pool to allocate from
- * @size_t sz@ = size of block wanted
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *new_chunk@ = a new chunk to be added to the
+ * heap to replace the previous head chunk, or null
*
- * Returns: Pointer to the requested block.
+ * Returns: ---
*
- * Use: Allocates memory from a resource pool. Memory is never freed
- * from pools: it is released when the pool is destroyed.
+ * Use: The old heap head -- the chunk with the largest free space --
+ * has been removed from the heap to be reactivated, and is to
+ * be replaced by @new_chunk@ -- or by nothing, if @new_chunk@
+ * is null. If the @new_chunk@ is completely exhausted, then
+ * instead link it onto the @p->dead@ list and shrink the heap.
*/
-void *pool_alloc(pool *p, size_t sz)
- { return (doalloc(p->pa, &p->c, sz, p->f)); }
+static void replace_live_head(pool *p, pool_chunk *new_chunk)
+{
+ size_t n;
-/* --- @pool_strdup@ --- *
+ /* If there's a new chunk, and it still has some space left, then we can
+ * proceed. If it's exhausted, then put it on the dead list.
+ */
+ if (new_chunk) {
+ if (bury_dead_chunk(p, new_chunk)) {
+ D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to live heap\n",
+ (void *)new_chunk, (unsigned long)new_chunk->u.sz); )
+ downheap(p, new_chunk); return;
+ }
+ }
+
+ /* If there are no other chunks in the heap, then it will be left empty;
+ * otherwise, we replace the head with the chunk with the highest index.
+ */
+ n = --p->nlive;
+ if (n) downheap(p, p->live[n]);
+
+}
+
+/* --- @internal_alloc@ --- *
*
- * Arguments: @pool *p@ = pool to allocate from
- * @const char *s@ = pointer to string
+ * Arguments: @pool *p@ = pointer to the pool
+ * @size_t sz@ = size required
*
- * Returns: A pointer to a copy of the string.
+ * Returns: A pointer to the newly allocated block.
*
- * Use: Allocates a copy of a string.
+ * Use: Allocate and return a block from the pool. No data in any
+ * chunk is modified; Valgrind is not informed. This is a
+ * common subroutine of @pool_alloc@ and @pool_realloc@.
*/
-char *pool_strdup(pool *p, const char *s)
+static void *internal_alloc(pool *p, size_t sz)
{
- size_t sz = strlen(s) + 1;
- char *pp = doalloc(p->pa, &p->c, sz, p->f);
+ pool_chunk *active, *head;
+ void *q;
+ size_t n;
- memcpy(pp, s, sz);
- return (pp);
+ /* Align the size and add on the trailing red zone. The leading red zone
+ * follows the previous allocation or the chunk header.
+ */
+ sz = round_up(sz, p->f);
+
+ /* Try to serve the request from the active chunk. */
+ active = p->active;
+ if (active->u.sz >= sz) goto alloc;
+
+ /* Explore the heap of live chunks. */
+ n = p->nlive;
+ if (n) {
+ /* There's at least one chunk in the heap. Maybe that will help. */
+
+ head = p->live[0];
+
+ /* If the topmost chunk in the heap is large enough, then we'll activate
+ * that one.
+ */
+ if (head->u.sz >= sz) {
+
+ /* We'll take the old heap head chunk, and replace it with the
+ * previously active chunk (or put it on the dead list and shrink).
+ */
+ replace_live_head(p, active);
+
+ /* Allocate from the previous head chunk. */
+ D( fprintf(stderr, ";; POOL activate old live head %p\n",
+ (void *)head); )
+ p->active = active = head;
+ goto alloc;
+ }
+ }
+
+ /* There are no other suitable chunks. Just put the old active chunk on
+ * the heap (or the dead list).
+ */
+ retire_active_chunk(p, active);
+
+ /* There's not enough space in any chunk. Add the old active chunk to
+ * the heap and allocate a fresh one.
+ */
+ p->active = active = alloc_chunk(p->pa, sz, p->f);
+ VG( if (p->f&PF_VALGRIND) report_new_chunk_to_valgrind(p, active); )
+
+alloc:
+ /* Allocate the new block. Save the (aligned, padded) size for use by
+ * @maybe_undo_last_alloc@.
+ */
+ q = active->p; active->p += sz; active->u.sz -= sz;
+ p->lastsz = sz;
+
+ /* All done. */
+ return (q);
}
-/* --- Arena operations --- */
+/* --- @release_resources@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ *
+ * Returns: ---
+ *
+ * Use: Release all of the resources tracked by the pool. The
+ * resource list is %%\emph{not}%% cleared -- there'd be no
+ * point if the pool as a whole is about to be destroyed.
+ */
-static void *palloc(arena *a, size_t sz)
+static void release_resources(pool *p)
{
- pool *p = (pool *)a;
+ pool_resource *r, *rr;
- return (doalloc(p->pa, &p->c, sz, p->f));
+ for (r = p->resources; r; r = rr)
+ { rr = r->next; if (r->destroy) r->destroy(r); }
}
-static void pfree(arena *a, void *p) { return; } /* Trivial */
+/* --- @recycle_chunk@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @pool_chunk *chunk@ = the chunk to be cleared
+ * @size_t chsz@ = precomputed chunk header size, rounded up
+ *
+ * Returns: ---
+ *
+ * Use: Clear a single chunk: reset the allocation pointer and size
+ * to refer to the entire chunk, and inform Valgrind that the
+ * old allocations from the chunk are now invalid. As a special
+ * case, if this chunk is the one that contains the pool control
+ * block, then reallocate space for the control block.
+ *
+ * The chunk must be in its `live' form, with the @sz@ member
+ * active.
+ */
+
+static void recycle_chunk(pool *p, pool_chunk *chunk, size_t chsz)
+{
+ unsigned f = p->f;
+ unsigned char *guard, *start, *end;
+ size_t psz;
+
+ /* Reset the chunk bounds. */
+ chunk_bounds(chunk, chsz, &guard, &start, &end);
+ chunk->p = start; chunk->u.sz = end - start;
+
+ /* Clear Valgrind's ideas about the chunk. */
+ VG( if (f&PF_VALGRIND) {
-static arena_ops pool_ops = { palloc, arena_fakerealloc, pfree, 0 };
+ D( fprintf(stderr, ";; POOL recycle chunk %p\n", (void *)chunk); )
+ D( fprintf(stderr, ";; POOL \trecycle body %p + %zu\n",
+ (void *)start, (unsigned long)(end - start)); )
+ VALGRIND_MEMPOOL_FREE(p, start);
+ VALGRIND_MEMPOOL_ALLOC(p, start, end - start);
+
+ D( fprintf(stderr, ";; POOL \telectrify body %p + %zu\n",
+ (void *)guard, (unsigned long)(guard - start)); )
+ VALGRIND_MAKE_MEM_NOACCESS(guard, end - guard);
+ } )
+
+ /* Check to see if this is the chunk which contains the pool control
+ * block. If so, we need to advance the allocation pointer beyond the
+ * control block. Be sure to tell Valgrind that the control block has
+ * known contents.
+ */
+ if (start == (unsigned char *)p) {
+ psz = round_up(sizeof(pool), f);
+ chunk->p += psz; chunk->u.sz -= psz;
+ VG( if (f&PF_VALGRIND)
+ VALGRIND_MALLOCLIKE_BLOCK(p, sizeof(p), REDZONE_SIZE, 1); )
+ }
+}
+
+/* --- @free_chunk@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool (only used for Valgrind)
+ * @arena *a@ = pointer to underlying arena
+ * @pool_chunk *chunk@ = the chunk to be freed
+ * @unsigned f@ = flags (@PF_...@)
+ * @size_t chsz@ = precomputed chunk header size, rounded up
+ *
+ * Returns: ---
+ *
+ * Use: Free the chunk, handling the Valgrind necessities. The pool control
+ * block isn't necessarily valid so all of the pieces need to be
+ * passed separately.
+ */
+
+static void free_chunk(pool *p, arena *a,
+ pool_chunk *chunk, unsigned f, size_t chsz)
+{
+ VG( unsigned char *guard COMMA *start COMMA *end; )
+
+ VG( if (f&PF_VALGRIND) {
+ chunk_bounds(chunk, chsz, &guard, &start, &end);
+ D( fprintf(stderr, ";; POOL free chunk %p\n", (void *)chunk);
+ fprintf(stderr, ";; POOL \tclear body %p\n", (void *)start); )
+ VALGRIND_MEMPOOL_FREE(p, start);
+ } )
+
+ A_FREE(a, chunk);
+}
+
+/* --- @free_chunks@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @unsigned f@ = flags (@FF_...@)
+ *
+ * Returns: ---
+ *
+ * Use: Free all of the pool's live and dead chunks, returning them
+ * to the underlying arena.
+ *
+ * If @FF_ACTIVE@ is set in @f@, then also free the active
+ * chunk. If @FF_LIVE@ is set, then also free the live heap
+ * vector. (The live chunks are always freed; the flag
+ * additionally frees the underlying vector.)
+ *
+ * If the pool is a root pool, then may no longer be valid; this
+ * is a certainty if @FF_ACTIVE@ is set.
+ */
+
+#define FF_ACTIVE 1u
+#define FF_LIVE 2u
+
+static void free_chunks(pool *p, unsigned f)
+{
+ pool_chunk *chunk, *next_chunk,
+ *active = p->active, **live = p->live, *dead = p->dead;
+ unsigned pf = p->f;
+ size_t i, nlive = p->nlive, chsz = round_up(sizeof(pool_chunk), pf);
+ arena *a = p->pa;
+
+ /* Maybe free the active chunk. */
+ if (f&FF_ACTIVE)
+ free_chunk(p, a, active, pf, chsz);
+
+ /* Free the live chunks. */
+ for (i = 0; i < nlive; i++)
+ free_chunk(p, a, live[i], pf, chsz);
+ if (live && (f&FF_LIVE)) A_FREE(a, live);
+
+ /* Free the dead chunks. */
+ for (chunk = dead; chunk; chunk = next_chunk) {
+ next_chunk = chunk->u.next;
+ free_chunk(p, a, chunk, pf, chsz);
+ }
+}
+
+/* --- @internal_destroy@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to pool to destroy
+ *
+ * Returns: ---
+ *
+ * Use: Destroy a pool, freeing all of the resources and memory held
+ * by it, with the certainty that it will never be used again --
+ * either this is a root pool, or it's a subpool being torn down
+ * by its parent.
+ */
+
+static void internal_destroy(pool *p)
+ { release_resources(p); free_chunks(p, FF_ACTIVE | FF_LIVE); }
+
+/*----- Main operations ---------------------------------------------------*/
/* --- @pool_create@ --- *
*
*
* Returns: A newly created resource pool.
*
- * Use: Creates a resource pool which is not a child of any other
- * resource pool.
+ * Use: Creates a fresh root resource pool, i.e., one which is not a
+ * child of any other pool.
*/
pool *pool_create(arena *a)
{
- pool_chunk *c = 0;
+ pool_chunk *active;
pool *p;
+ size_t psz;
unsigned f = 0;
+ /* Setup. */
VG( if (RUNNING_ON_VALGRIND) f |= PF_VALGRIND; )
- p = doalloc(a, &c, sizeof(pool), f);
- p->c = c; p->r = 0; p->pa = a; p->f = f;
- p->a.ops = &pool_ops;
+
+ /* Allocate the initial chunk. */
+ psz = round_up(sizeof(pool), f);
+ active = alloc_chunk(a, psz, f);
+
+ /* Allocate the pool control block from the chunk. */
+ p = (pool *)active->p; active->p += psz; active->u.sz -= psz;
+
+ /* Tell Valgrind about the pool. */
+ VG( if (f&PF_VALGRIND) {
+ VALGRIND_CREATE_MEMPOOL_EXT(p, REDZONE_SIZE, 0,
+ VALGRIND_MEMPOOL_METAPOOL |
+ VALGRIND_MEMPOOL_AUTO_FREE);
+ report_new_chunk_to_valgrind(p, active);
+ VALGRIND_MALLOCLIKE_BLOCK(p, sizeof(pool), REDZONE_SIZE, 0);
+ } )
+
+ /* Initialize the control block. */
+ init_pool(p, a, active, f);
+
+ /* Done. */
return (p);
}
-/* --- @pool_destroy@ --- *
+/* --- @pool_alloc@, @pool_allocv@ --- *
*
- * Arguments: @pool *p@ = pointer to pool to destroy
+ * Arguments: @pool *p@ = pool to allocate from
+ * @size_t n@ = number of elements to allocate (for
+ * @pool_allocv@)
+ * @size_t sz@ = size of block wanted
*
- * Returns: ---
+ * Returns: Pointer to the requested block.
*
- * Use: Destroys a pool, freeing all of the resources within it. If
- * this is a pool created by @pool_create@, its memory will be
- * deallocated; if it's a subpool or it was initialized by
- * @pool_init@, it is emptied and can be used again.
+ * Use: The @pool_allocv@ function allocates memory for @n@ elements
+ * of @sz@ bytes each from a resource pool. Only the most
+ * recent allocation can usefully be adjusted and/or freed;
+ * other allocations persist until the pool is destroyed. The
+ * @pool_alloc@ function is the same, but with @n@ fixed equal
+ * to 1. If there's not enough memory, the exception
+ * @EXC_NOMEM@ is thrown.
*/
-void pool_destroy(pool *p)
+void *pool_alloc(pool *p, size_t sz)
{
- pool_resource *r, *rr;
- arena *a;
- pool_chunk *c, *cc;
+ void *q = internal_alloc(p, sz);
- /* --- Dispose of all of the resources --- */
+ VG( if (p->f&PF_VALGRIND)
+ VALGRIND_MALLOCLIKE_BLOCK(q, sz, REDZONE_SIZE, 0); )
+ return (q);
+}
- r = p->r;
- while (r) {
- rr = r->next;
- if (r->destroy)
- r->destroy(r);
- r = rr;
+void *pool_allocv(pool *p, size_t n, size_t sz)
+{
+ if (!ALLOCV_SAFE_P(n, sz)) THROW(EXC_NOMEM);
+ else return (pool_alloc(p, n*sz));
+}
+
+/* --- @pool_realloc@, @pool_reallocv@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to block
+ * @size_t n@ = new number of elements (for @pool_reallocv@)
+ * @size_t on@ = old number of elements (for @pool_reallocv@)
+ * @size_t sz@ = size of elements (for @pool_reallocv@)
+ * @size_t newsz@ = the new size required (for @pool_realloc@)
+ * @size_t oldsz@ = existing size allocated (for @pool_realloc@)
+ *
+ * Returns: The new address of the block.
+ *
+ * Use: The @pool_reallocv@ function returns a pointer to a block of
+ * memory, large enough for @n@ elements each of size @sz@,
+ * containing a copy of the first @min(n, on)@ elements
+ * currently in the block at @q@. If the new pointer is not
+ * equal to @q@, then @q@ is invalidated and it is an error to
+ * make further use of it.
+ *
+ * If @q@ points to the most recently allocated block, then the
+ * old storage is reclaimed. Otherwise, if @n@ is smaller than
+ * @on@, the block is shrunk in place, but the excess space is
+ * lost until the pool is destroyed. Otherwise, a fresh block
+ * is allocated and the data copied, and the old block's space
+ * is lost until the pool is destroyed.
+ *
+ * The @pool_realloc@ function is the same, except that @sz@ is
+ * fixed at 1, and @n@ and @on@ are renamed to @newsz@ and
+ * @oldsz@.
+ */
+
+void *pool_realloc(pool *p, void *q, size_t newsz, size_t oldsz)
+{
+ void *qq;
+
+ /* The usual C special cases. */
+ if (!q) return (pool_alloc(p, newsz));
+ else if (!newsz) { pool_free(p, q); return (0); }
+
+ if (!maybe_undo_last_alloc(p, q)) {
+ /* The block being reallocated is the most recently allocated one, and
+ * its allocation has just been undone. We'll retry the allocation.
+ * This will either return the same address again, if there is enough
+ * space in the chunk to satisfy the allocation, or a different address.
+ * In the former case, the existing data is still there and we have
+ * nothing to do (except update Valgrind's ideas about the block);
+ * otherwise, we must copy the data -- but note that, in this case, we
+ * know that the block must be growing.
+ */
+
+ qq = internal_alloc(p, newsz);
+ if (qq != q) {
+ VG( if (p->f&PF_VALGRIND)
+ VALGRIND_MALLOCLIKE_BLOCK(qq, newsz, REDZONE_SIZE, 0); )
+ memcpy(qq, q, oldsz);
+ VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
+ }
+ VG( else if (p->f&PF_VALGRIND)
+ VALGRIND_RESIZEINPLACE_BLOCK(q, oldsz, newsz, REDZONE_SIZE); )
+
+ } else if (newsz < oldsz || (newsz ^ oldsz) < sizeof(union align)) {
+ /* The caller asked us to shrink the block. There's nothing to do here
+ * except update Valgrind's idea about the allocation.
+ */
+
+ qq = q;
+ VG( if (p->f&PF_VALGRIND)
+ VALGRIND_RESIZEINPLACE_BLOCK(q, oldsz, newsz, REDZONE_SIZE); )
+
+ } else {
+ /* We've been asked to grow a block. Just allocate a fresh one and copy
+ * the data.
+ */
+
+ qq = internal_alloc(p, newsz);
+ VG( if (p->f&PF_VALGRIND)
+ VALGRIND_MALLOCLIKE_BLOCK(qq, newsz, REDZONE_SIZE, 0); )
+ memcpy(qq, q, oldsz);
+ VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
}
- p->r = 0;
- /* --- Free all of the memory --- *
- *
- * Since root pools are allocated in their own memory, this will free the
- * root pool block. Subpools are allocated in their parent's memory, so
- * the pool block itself will be left around.
+ /* Return the new pointer. */
+ return (qq);
+}
+
+void *pool_reallocv(pool *p, void *q, size_t n, size_t on, size_t sz)
+{
+ if (!ALLOCV_SAFE_P(n, sz)) THROW(EXC_NOMEM);
+ else return (pool_realloc(p, q, n*sz, on*sz));
+}
+
+/* --- @pool_strdup@ --- *
+ *
+ * Arguments: @pool *p@ = pool to allocate from
+ * @const char *s@ = pointer to string
+ *
+ * Returns: A pointer to a copy of the string.
+ *
+ * Use: Allocates a copy of a string.
+ */
+
+char *pool_strdup(pool *p, const char *s)
+{
+ size_t sz = strlen(s) + 1;
+ char *q;
+
+ q = internal_alloc(p, sz);
+ VG( if (p->f&PF_VALGRIND)
+ VALGRIND_MALLOCLIKE_BLOCK(q, sz, REDZONE_SIZE, 0); )
+ memcpy(q, s, sz);
+ return (q);
+}
+
+/* --- @pool_free@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to block
+ *
+ * Returns: ---
+ *
+ * Use: Try to recycle the memory used by the block allocated at @q@.
+ * If @q@ points to the most recently allocated block, then it
+ * can be reclaimed at no cost. Otherwise, the allocation
+ * persists anyway until the pool as a whole is destroyed --
+ * though the pointer is logically invalidated, and it is an
+ * error to make further use of it.
+ */
+
+void pool_free(pool *p, void *q)
+{
+ /* Check if this was the most recent block. If so, we can actually
+ * reclaim its space.
*/
+ if (!maybe_undo_last_alloc(p, q)) {
- a = p->pa; c = p->c; p->c = 0;
- while (c) {
- cc = c->next;
- VG( if (p->f&PF_VALGRIND) VALGRIND_DESTROY_MEMPOOL(c); )
- x_free(a, c);
- c = cc;
+ /* If we're /not/ running under Valgrind then zero is a properly aligned
+ * length. If we /are/, then this is the precise length of a free block.
+ */
+ p->lastsz = 0;
+
+ /* If we're running under Valgrind, then we should still leave a
+ * trailing red zone for two reasons: firstly, this will help detect
+ * accesses to the free block after a new real block is allocated; and
+ * secondly, this arrangement correctly preserves the last-block
+ * base-address invariant.
+ */
+ VG( if (p->f&PF_VALGRIND) {
+ p->active->p += REDZONE_SIZE;
+ p->active->u.sz -= REDZONE_SIZE;
+ } )
}
+
+ /* Tell Valgrind that the block is free. */
+ VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
}
/* --- @pool_add@ --- *
*/
void pool_add(pool *p, pool_resource *r, void (*dfn)(pool_resource *r))
+ { POOL_ADD(p, r, dfn); }
+
+/* --- @pool_recycle@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ *
+ * Returns: ---
+ *
+ * Use: Release all of the memory and resources held by the pool,
+ * with the expectation that it will be used again. The chunks
+ * from which allocations were satisfied are retained for future
+ * use.
+ */
+
+void pool_recycle(pool *p)
+{
+ pool_chunk *chunk, *next_chunk;
+ size_t i, n, chsz = round_up(sizeof(pool_chunk), p->f);
+
+ /* Release the pool's resources. */
+ release_resources(p); p->resources = 0;
+
+ /* Deal with the heap first; otherwise, we'd have to make a copy of the
+ * live chunks.
+ */
+ n = p->nlive;
+ if (n) {
+ p->nlive = 0;
+ for (i = 0; i < n; i++) {
+ chunk = p->live[i];
+ recycle_chunk(p, chunk, chsz); upheap(p, chunk);
+ }
+ }
+
+ /* Throw in the current active chunk. */
+ chunk = p->active;
+ recycle_chunk(p, chunk, chsz); upheap(p, chunk);
+
+ /* And finally walk the dead list. */
+ for (chunk = p->dead; chunk; chunk = next_chunk) {
+ next_chunk = chunk->u.next; chunk->u.sz = 0;
+ recycle_chunk(p, chunk, chsz); upheap(p, chunk);
+ }
+ p->dead = 0;
+
+ /* Activate the largest chunk. */
+ p->active = p->live[0]; replace_live_head(p, 0); p->lastsz = 0;
+}
+
+/* --- @pool_destroy@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to pool to destroy
+ *
+ * Returns: ---
+ *
+ * Use: Destroys a pool, freeing all of the resources within it. If
+ * this is a root pool, its memory will be deallocated; if it's
+ * a subpool, it is emptied and can be used again.
+ */
+
+void pool_destroy(pool *p)
{
- POOL_ADD(p, r, dfn);
+ if (!(p->f&PF_SUBPOOL)) {
+ /* This is a root pool, so just free all of the chunks and the live heap
+ * vector. The pool control block was allocated from one of these.
+ */
+
+ internal_destroy(p);
+ } else {
+ /* This is a subpool, and the storage for its control block came from its
+ * parent. In principle, the pool could be used again; certainly, it
+ * must be left safe for the parent to destroy it later.
+ */
+
+ release_resources(p); p->resources = 0;
+ recycle_chunk(p, p->active, round_up(sizeof(pool_chunk), p->f));
+ free_chunks(p, 0); p->nlive = 0; p->dead = 0; p->lastsz = 0;
+ }
}
+/*----- Subpools ----------------------------------------------------------*/
+
/* --- @pool_sub@ --- *
*
* Arguments: @pool *p@ = pointer to parent pool
} subpool;
static void subpool_destroy(pool_resource *r)
-{
- subpool *p = (subpool *)r;
- pool_destroy(&p->p);
-}
+ { subpool *p = (subpool *)r; internal_destroy(&p->p); }
pool *pool_sub(pool *p)
{
subpool *pp = pool_alloc(p, sizeof(subpool));
+ pool_chunk *active;
+ arena *a = p->pa;
+ unsigned f = p->f | PF_SUBPOOL;
+
+ /* Allocate the initial active chunk. */
+ active = alloc_chunk(a, 1, f);
+
+ /* Set up the pool control block. */
+ init_pool(&pp->p, a, active, f);
+
+ /* Tell Valgrind about the pool. */
+ VG( if (p->f&PF_VALGRIND) {
+ VALGRIND_CREATE_MEMPOOL_EXT(&pp->p, REDZONE_SIZE, 0,
+ VALGRIND_MEMPOOL_METAPOOL |
+ VALGRIND_MEMPOOL_AUTO_FREE);
+ report_new_chunk_to_valgrind(&pp->p, active);
+ } )
+
+ /* Register the subpool as a resource of its parent. */
POOL_ADD(p, &pp->r, subpool_destroy);
- pp->p.a.ops = &pool_ops;
- pp->p.c = 0;
- pp->p.r = 0;
- pp->p.pa = p->pa;
+
+ /* And we're done. */
return (&pp->p);
}
#define POOL_CHUNKSZ 65536
typedef struct pool_chunk {
- struct pool_chunk *next; /* Next memory chunk in the chain */
unsigned char *p; /* Free area in this chunk */
- size_t left; /* Amount of memory left */
+ union {
+ size_t sz; /* Amount of memory left */
+ struct pool_chunk *next; /* Link to next dead chunk */
+ } u;
} pool_chunk;
typedef struct pool_resource {
typedef struct pool {
arena a; /* The arena for allocating memory */
- pool_chunk *c; /* Pointer to memory chunk list */
- pool_resource *r; /* Pointer to resource list */
+ pool_chunk *active; /* Most recent memory chunk */
+ pool_resource *resources; /* Head of resource list */
arena *pa; /* Arena for real allocation */
unsigned f; /* Flags */
-#define PF_VALGRIND 1u /* Special valgrind hacks active */
+#define PF_VALGRIND 1u /* Special Valgrind hacks active */
+#define PF_SUBPOOL 2u /* Pool is a subpool */
+ size_t lastsz; /* Size of most recent allocation */
+ pool_chunk **live; /* Chunks, max-heap by space left */
+ size_t nlive; /* Number of chunks in the heap */
+ size_t livesz; /* Allocated size of heap vector */
+ pool_chunk *dead; /* List of dead chunks */
} pool;
typedef struct pool_file {
/*----- Basic pool management ---------------------------------------------*/
-/* --- @pool_alloc@ --- *
+/* --- @pool_create@ --- *
+ *
+ * Arguments: @arena *a@ = pointer to an arena to allocate memory from
+ *
+ * Returns: A newly created resource pool.
+ *
+ * Use: Creates a fresh root resource pool, i.e., one which is not a
+ * child of any other pool.
+ */
+
+extern pool *pool_create(arena */*a*/);
+
+/* --- @pool_sub@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to parent pool
+ *
+ * Returns: A new child pool of the parent.
+ *
+ * Use: Creates a subpool. The subpool can either be destroyed on
+ * its own, or will be automatically destroyed at the same time
+ * as the parent.
+ */
+
+extern pool *pool_sub(pool */*p*/);
+
+/* --- @pool_alloc@, @pool_allocv@ --- *
*
* Arguments: @pool *p@ = pool to allocate from
+ * @size_t n@ = number of elements to allocate (for
+ * @pool_allocv@)
* @size_t sz@ = size of block wanted
*
* Returns: Pointer to the requested block.
*
- * Use: Allocates memory from a resource pool. Memory is never freed
- * from pools: it is released when the pool is destroyed.
+ * Use: The @pool_allocv@ function allocates memory for @n@ elements
+ * of @sz@ bytes each from a resource pool. Only the most
+ * recent allocation can usefully be adjusted and/or freed;
+ * other allocations persist until the pool is destroyed. The
+ * @pool_alloc@ function is the same, but with @n@ fixed equal
+ * to 1. If there's not enough memory, the exception
+ * @EXC_NOMEM@ is thrown.
*/
extern void *pool_alloc(pool */*p*/, size_t /*sz*/);
+void *pool_allocv(pool */*p*/, size_t /*n*/, size_t /*sz*/);
+
+/* --- @POOL_NEW@, @POOL_NEWV@ --- *
+ *
+ * Arguments: @type *q@ = pointer to allocate
+ * @pool *p@ = pool to allocate from
+ * @size_t n@ = number of elements to allocate (for @POOL_NEWV@)
+ *
+ * Returns: ---
+ *
+ * Use: The @POOL_NEW@ macro sets @p@ to point to a freshly allocated
+ * block of memory large enough for an object of the type
+ * pointed to by @p@. The @POOL_NEWV@ macro allocated enough
+ * space for a vector of @n@ elements, each of the type pointed
+ * to by @p@. If there is not enough memory, the exception
+ * @EXC_NOMEM@ is thrown.
+ */
+
+#define POOL_NEW(q, p) do { (q) = pool_alloc((p), sizeof(*(q))); } while (0)
+#define POOL_NEWV(q, p, n) \
+ do { (q) = pool_allocv((p), (n), sizeof(*(q))); } while (0)
+
+/* --- @pool_realloc@, @pool_reallocv@ --- *
+ *
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to block
+ * @size_t n@ = new number of elements (for @pool_reallocv@)
+ * @size_t on@ = old number of elements (for @pool_reallocv@)
+ * @size_t sz@ = size of elements (for @pool_reallocv@)
+ * @size_t newsz@ = the new size required (for @pool_realloc@)
+ * @size_t oldsz@ = existing size allocated (for @pool_realloc@)
+ *
+ * Returns: The new address of the block.
+ *
+ * Use: The @pool_reallocv@ function returns a pointer to a block of
+ * memory, large enough for @n@ elements each of size @sz@,
+ * containing a copy of the first @min(n, on)@ elements
+ * currently in the block at @q@. If the new pointer is not
+ * equal to @q@, then @q@ is invalidated and it is an error to
+ * make further use of it.
+ *
+ * If @q@ points to the most recently allocated block, then the
+ * old storage is reclaimed. Otherwise, if @n@ is smaller than
+ * @on@, the block is shrunk in place, but the excess space is
+ * lost until the pool is destroyed. Otherwise, a fresh block
+ * is allocated and the data copied, and the old block's space
+ * is lost until the pool is destroyed.
+ *
+ * The @pool_realloc@ function is the same, except that @sz@ is
+ * fixed at 1, and @n@ and @on@ are renamed to @newsz@ and
+ * @oldsz@.
+ */
+
+extern void *pool_realloc(pool */*p*/, void */*q*/,
+ size_t /*newsz*/, size_t /*oldsz*/);
+extern void *pool_reallocv(pool */*p*/, void */*q*/,
+ size_t /*n*/, size_t /*on*/, size_t /*sz*/);
+
+/* --- @POOL_RENEWV@ --- *
+ *
+ * Arguments: @type *q@ = a pointer to allocate
+ * @pool *q@ = pointer to pool
+ * @size_t n, on@ = new and existing numbers of elements
+ *
+ * Returns: ---
+ *
+ * Use: Adjust @p@ to point to a new block of memory with space for
+ * @n@ elements of the type pointed to by @p@, on the assumption
+ * that @p@ is either null or currently points to a block with
+ * space for @on@ elements.
+ */
+
+#define POOL_RENEWV(q, p, n, on) \
+ do { (q) = pool_reallocv((p), (n), (on), sizeof(*(q))); while (0)
/* --- @pool_strdup@ --- *
*
extern char *pool_strdup(pool */*p*/, const char */*s*/);
-/* --- @pool_create@ --- *
+/* --- @pool_free@ --- *
*
- * Arguments: @arena *a@ = pointer to an arena to allocate memory from
+ * Arguments: @pool *p@ = pointer to the pool
+ * @void *q@ = pointer to block
*
- * Returns: A newly created resource pool.
+ * Returns: ---
*
- * Use: Creates a resource pool which is not a child of any other
- * resource pool.
+ * Use: Try to recycle the memory used by the block allocated at @q@.
+ * If @q@ points to the most recently allocated block, then it
+ * can be reclaimed at no cost. Otherwise, the allocation
+ * persists anyway until the pool as a whole is destroyed --
+ * though the pointer is logically invalidated, and it is an
+ * error to make further use of it.
*/
-extern pool *pool_create(arena */*a*/);
+extern void pool_free(pool */*p*/, void */*q*/);
-/* --- @pool_destroy@ --- *
+/* --- @pool_recycle@ --- *
*
- * Arguments: @pool *p@ = pointer to pool to destroy
+ * Arguments: @pool *p@ = pointer to the pool
*
* Returns: ---
*
- * Use: Destroys a pool, freeing all of the resources within it. If
- * this is a root pool, its memory will be deallocated; if it's
- * a subpool, it is emptied and can be used again.
+ * Use: Release all of the memory and resources held by the pool,
+ * with the expectation that it will be used again. The chunks
+ * from which allocations were satisfied are retained for future
+ * use.
*/
-extern void pool_destroy(pool */*p*/);
+extern void pool_recycle(pool */*p*/);
-/* --- @pool_sub@ --- *
+/* --- @pool_destroy@ --- *
*
- * Arguments: @pool *p@ = pointer to parent pool
+ * Arguments: @pool *p@ = pointer to pool to destroy
*
- * Returns: A new child pool of the parent.
+ * Returns: ---
*
- * Use: Creates a subpool. The subpool can either be destroyed on
- * its own, or will be automatically destroyed at the same time
- * as the parent.
+ * Use: Destroys a pool, freeing all of the resources within it. If
+ * this is a root pool, its memory will be deallocated; if it's
+ * a subpool, it is emptied and can be used again.
*/
-extern pool *pool_sub(pool */*p*/);
+extern void pool_destroy(pool */*p*/);
-/* --- @pool_add@ --- *
+/* --- @pool_add@, @POOL_ADD@ --- *
*
* Arguments: @pool *p@ = pointer to pool to add the resource to
* @pool_resource *r@ = pointer to resource block
#define POOL_ADD(p, rr, dfn) do { \
pool *_p = (p); \
pool_resource *_r = (rr); \
- _r->next = _p->r; \
+ \
+ _r->next = _p->resources; \
_r->destroy = dfn; \
- _p->r = _r; \
+ _p->resources = _r; \
} while (0)
extern void pool_add(pool */*p*/, pool_resource */*r*/,