3 * Resource pool handling
5 * (c) 2000 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of the mLib utilities library.
12 * mLib is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
17 * mLib is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with mLib; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
28 /*----- Header files ------------------------------------------------------*/
35 #ifdef HAVE_VALGRIND_VALGRIND_H
36 # include <valgrind/valgrind.h>
37 # include <valgrind/memcheck.h>
57 /*----- Pool memory layout ------------------------------------------------*
59 * The pool consists of a number of chunks. Each chunk begins with a header,
60 * followed by allocated blocks.
62 * Within a chunk, blocks are allocated on alignment boundaries, as
63 * determined by the @ALIGN@ macro. Furthermore, if Valgrind is active, then
64 * each allocation is preceded and followed by a guard area called a `red
65 * zone' which is @REDZONE_SIZE@ bytes in size. For a block in the middle of
66 * the chunk, the leading red zone overlaps the previous block's trailing red
67 * zone, and the block's trailing red zone overlaps the following block's
68 * leading red zone. If the block sizes are fully aligned, then the red
69 * zones completely coincide.
71 * An alternative view of the same situation, which produces the same
72 * allocation addresses, is that, following the chunk header and each
73 * allocated block, there is alignment padding followed by a red zone. This
74 * view is helpful because it makes allocation uniform: to allocate a block,
75 * round up the requested size by aligning it at adding space for the
76 * following red zone; the current allocation pointer is the new block base
77 * address, and the allocation pointer is advanced by the rounded-up size.
79 * Three categories of chunks are maintained.
81 * * There is always exactly one %%\emph{active}%% chunk. This is the
82 * chunk from which the most recent allocation, if any, was taken, and
83 * the first chunk to be examined for the next allocation.
85 * * There are zero or more %%\emph{live}%% chunks, which are not active
86 * but still have space remaining for further allocations. These are
87 * maintained in a max-heap.
89 * The live chunk with the greatest amount of free space is called the
90 * `live head', and is the first element of the live heap vector. The
91 * idea here is that, in order to decide whether we can satisfy an
92 * allocation from the pool's existing chunks, we need only check the
93 * active chunk and the live head.
95 * * There are zero or more %%\emph{dead}%% chunks, whose space has been
96 * entirely allocated. The dead chunks are held on a singly-linked list
97 * which is only maintained so that the chunks can be recycled or
100 * Note that the active chunk is not considered dead even if an
101 * allocation exhausts it: the invariant that the active chunk was the
102 * source of the most recent allocation is more important, since this
103 * allows us to free or reallocate the most recent allocation.
105 * A live or active chunk maintains an allocation pointer and remaining
106 * size. In a dead chunk, the remaining size -- which is, by definition,
107 * zero, is replaced by the link to the next dead chunk in the list.
110 /*----- Constants ---------------------------------------------------------*/
112 #define REDZONE_SIZE (2*ALIGNOF(union align))
113 #define MIN_LIVESZ 16
115 /*----- Arena operations --------------------------------------------------*/
117 /* Here so that @pool_ops@ is available to @init_pool@ below. */
119 static void *palloc(arena *a, size_t sz)
120 { pool *p = (pool *)a; return (pool_alloc(p, sz)); }
122 static void *prealloc(arena *a, void *q, size_t newsz, size_t oldsz)
123 { pool *p = (pool *)a; return (pool_realloc(p, q, newsz, oldsz)); }
125 static void pfree(arena *a, void *q)
126 { pool *p = (pool *)a; pool_free(p, q); }
128 static arena_ops pool_ops = { palloc, prealloc, pfree, 0 };
130 /*----- Internal machinery ------------------------------------------------*/
132 /* --- @round_up@ --- *
134 * Arguments: @size_t sz@ = an object size
135 * @unsigned f@ = flags (@PF_...@)
137 * Returns: The size to actually allocate, including alignment and a
138 * possible following red zone. (A %%\emph{preceding}%%
139 * red zone doesn't concern us: it would either be part of the
140 * previous allocation or chunk header.)
143 static size_t round_up(size_t sz, unsigned f)
145 ALIGN(sz); VG( if (f&PF_VALGRIND) sz += REDZONE_SIZE; )
149 /* --- @chunk_bounds@ --- *
151 * Arguments: @pool_chunk@ = the chunk
152 * @size_t chsz@ = precomputed chunk header size, rounded up
153 * @unsigned char **guard_out@ = where to put the guard address
154 * @unsigned char **start_out@ = where to put the start address
155 * @unsigned char **end_out@ = where to put the limit address
159 * Use: Determine and return the important boundary addresses
160 * describing the allocatable space within the chunk, i.e., the
161 * space following the header.
163 * The guard address is the address of the first byte following
164 * the chunk header, without any alignment. The start address
165 * is the base address of the first allocatable byte in the
166 * chunk, immediately following the alignment and redzone after
167 * the chunk header. The end address is the address of the
168 * first byte after the chunk.
170 * The chunk's @u.sz@ member must be active -- if the chunk is
171 * dead, then @u.sz@ should be zero.
174 static void chunk_bounds(pool_chunk *chunk, size_t chsz,
175 unsigned char **guard_out,
176 unsigned char **start_out, unsigned char **end_out)
178 *guard_out = (unsigned char *)(chunk + 1);
179 *start_out = (unsigned char *)chunk + chsz;
180 *end_out = chunk->p + chunk->u.sz;
183 /* --- @init_pool@ --- *
185 * Arguments: @pool *p@ = pointer to storage for the control block
186 * @arena *a@ = pointer to underlying arena
187 * @pool_chunk *active@ = pointer to initial active chunk
188 * @unsigned f@ = flags (@PF_...@)
192 * Use: Initializes a pool control block. Valgrind is not informed.
193 * This is a common subroutine for @pool_create@ and @pool_sub@.
196 static void init_pool(pool *p, arena *a, pool_chunk *active, unsigned f)
198 p->a.ops = &pool_ops;
199 p->active = active; p->resources = 0; p->pa = a; p->f = f; p->lastsz = 0;
200 p->live = 0; p->nlive = p->livesz = 0; p->dead = 0;
203 /* --- @alloc_chunk@ --- *
205 * Arguments: @arena *a@ = arena to allocate from
206 * @size_t minsz@ = size of the block intended to be allocated
207 * from this new chunk
208 * @unsigned f@ = flags (@PF_...@)
210 * Returns: A pointer to the new chunk.
212 * Use: Subroutine to allocate a chunk. Since a pool control block
213 * is allocated out of its own storage, it's necessary to be
214 * able to do this without reference to a pool. Valgrind is not
217 * This code is shared between @pool_create@, @pool_sub@, and
221 static pool_chunk *alloc_chunk(arena *a, size_t minsz, unsigned f)
224 size_t sz, chsz = round_up(sizeof(pool_chunk), f);
226 /* Allocate the chunk. */
227 sz = chsz + minsz + POOL_CHUNKSZ - 1; sz -= sz%POOL_CHUNKSZ;
228 chunk = x_alloc(a, sz);
230 /* Initialize and mark the body as unavailable. */
231 chunk->p = (unsigned char *)chunk + chsz; chunk->u.sz = sz - chsz;
234 D( fprintf(stderr, ";; POOL new chunk %p + %lu\n",
235 (void *)chunk, (unsigned long)sz); )
239 /* --- @report_new_chunk_to_valgrind@ --- *
241 * Arguments: @pool *p@ = pointer to the pool
242 * @pool_chunk *chunk@ = the new chunk to report
246 * Use: Inform Valgrind about the new chunk, which is surprisingly
247 * involved. This can't be done as part of allocating it
248 * because we need to know the pool control-block address.
251 #ifdef HAVE_VALGRIND_VALGRIND_H
253 static void report_new_chunk_to_valgrind(pool *p, pool_chunk *chunk)
255 unsigned char *guard, *start, *end;
257 D( fprintf(stderr, ";; POOL prepare chunk %p\n", (void *)chunk); )
259 chunk_bounds(chunk, round_up(sizeof(pool_chunk), PF_VALGRIND),
260 &guard, &start, &end);
262 D( fprintf(stderr, ";; POOL \tchunk body %p + %lu\n",
263 (void *)start, (unsigned long)(end - start)); )
264 VALGRIND_MEMPOOL_ALLOC(p, start, end - start);
266 D( fprintf(stderr, ";; POOL \telectrify chunk %p + %lu\n",
267 (void *)guard, (unsigned long)(end - guard)); )
268 VALGRIND_MAKE_MEM_NOACCESS(guard, end - guard);
273 /* --- @maybe_undo_last_alloc@ --- *
275 * Arguments: @pool *p@ = pointer to the pool
276 * @void *q@ = pointer to an allocated block
278 * Returns: Zero on success, %$-1$% if @q@ doesn't refer to the most
281 * Use: If @q@ points to the most recently allocated block, then
282 * undo its allocation by adjusting the containing chunk's @p@
283 * and @sz@ members. The actual block contents are preserved,
284 * and Valgrind is not informed. If @q@ does not point to the
285 * most recently allocated block, then return %$-1$% without
286 * doing anything else.
289 static int maybe_undo_last_alloc(pool *p, void *q)
291 pool_chunk *chunk = p->active;
292 size_t sz = p->lastsz;
294 /* Check the base address. If it's wrong then we fail. */
295 if (q != chunk->p - sz) return (-1);
297 /* Adjust the chunk so that the last allocation didn't happen. */
298 chunk->p -= sz; chunk->u.sz += sz;
302 /* --- @bury_dead_chunk@ --- *
304 * Arguments: @pool *p@ = pointer to the pool
305 * @pool_chunk *chunk@ = the chunk to check
307 * Returns: Zero if the chunk was dead and has now been buried, %$-1$% if
308 * it wasn't dead after all.
310 * Use: If the chunk is dead -- it has insufficient space to satisfy
311 * any reasonable request -- then link it onto the dead list and
315 static int bury_dead_chunk(pool *p, pool_chunk *chunk)
317 size_t min = round_up(1, p->f);
318 size_t sz = chunk->u.sz;
320 /* If there's enough for the tiniest nonzero allocation then refuse to bury
323 if (sz >= min) return (-1);
325 D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to dead list\n",
326 (void *)chunk, (unsigned long)sz); )
328 /* Advance the allocation pointer to the end so that we can recycle the
333 /* Link the chunk onto the dead list. */
334 chunk->u.next = p->dead; p->dead = chunk;
342 * Arguments: @pool *p@ = pointer to the pool
343 * @pool_chunk *new_chunk@ = a live chunk to be added
347 * Use: Append @new_chunk@ to the heap of live chunks.
349 * This is the primitive function for fixing up the heap
350 * underlying @remember_chunk@, but is also used by
354 static void upheap(pool *p, pool_chunk *new_chunk)
357 size_t i = p->nlive++, j,
358 chunksz, newsz = new_chunk->u.sz;
360 /* If there's not enough space in the vector for the new chunk then we must
363 if (i >= p->livesz) {
364 if (p->live) { X_RENEWV(p->live, p->pa, 2*i, i); p->livesz = 2*i; }
365 else { X_NEWV(p->live, p->pa, MIN_LIVESZ); p->livesz = MIN_LIVESZ; }
368 /* Work upwards through the live heap to find the right level for the new
373 /* Find the parent. */
374 j = (i - 1)/2; chunk = p->live[j]; chunksz = chunk->u.sz;
376 /* If the parent doesn't have less space than the new chunk then it's
379 if (chunksz >= newsz) break;
381 /* Demote the parent and ascend. */
382 p->live[i] = chunk; i = j;
385 /* We've found the right level. */
386 p->live[i] = new_chunk;
389 /* --- @retire_active_chunk@ --- *
391 * Arguments: @pool *p@ = pointer to the pool
392 * @pool_chunk *old_active@ = the old active chunk
396 * Use: The currently active chunk is about to be replaced. Add the
397 * old active chunk to the live heap or the dead list as
401 static void retire_active_chunk(pool *p, pool_chunk *old_active)
403 if (bury_dead_chunk(p, old_active)) {
404 D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to live heap\n",
405 (void *)old_active, (unsigned long)old_active->u.sz); )
406 upheap(p, old_active);
410 /* --- @downheap@ --- *
412 * Arguments: @pool *p@ = pointer to the pool
413 * @pool_chunk *new_chunk@ = a new live chunk to be added to the
414 * heap to replace the previous head chunk
418 * Use: Replace the current live head with @new_chunk@ in the live
421 * This is the primitive function for fixing up the heap
422 * underlying @replace_live_head@.
425 static void downheap(pool *p, pool_chunk *new_chunk)
427 pool_chunk *chunk, *left, *right;
428 size_t i = 0, j, l, r, n = p->nlive,
429 leftsz, rightsz, chunksz, newsz = new_chunk->u.sz;
431 /* Start at the top and work downwards through the leap to find the right
432 * level for the new chunk.
436 /* Find the two children. */
437 l = 2*i + 1; r = l + 1;
439 /* If the left child slot is out of bounds then we can descend no
445 /* If the right child slot is out of bounds, then we must compare with
449 j = l; chunk = p->live[j]; chunksz = chunk->u.sz;
451 /* Otherwise we compare with the greater child. If they're equal then
452 * choose the right in order to end the search slightly faster.
455 left = p->live[l]; leftsz = left->u.sz;
456 right = p->live[r]; rightsz = right->u.sz;
457 if (leftsz > rightsz) { j = l; chunk = left; chunksz = leftsz; }
458 else { j = r; chunk = right; chunksz = rightsz; }
461 /* If the new chunk is not less than the greater child then it's found
464 if (newsz >= chunksz) break;
466 /* Promote the larger child and descend. */
467 p->live[i] = chunk; i = j;
470 /* We've found the right level. */
471 p->live[i] = new_chunk;
474 /* --- @replace_live_head@ --- *
476 * Arguments: @pool *p@ = pointer to the pool
477 * @pool_chunk *new_chunk@ = a new chunk to be added to the
478 * heap to replace the previous head chunk, or null
482 * Use: The old heap head -- the chunk with the largest free space --
483 * has been removed from the heap to be reactivated, and is to
484 * be replaced by @new_chunk@ -- or by nothing, if @new_chunk@
485 * is null. If the @new_chunk@ is completely exhausted, then
486 * instead link it onto the @p->dead@ list and shrink the heap.
489 static void replace_live_head(pool *p, pool_chunk *new_chunk)
493 /* If there's a new chunk, and it still has some space left, then we can
494 * proceed. If it's exhausted, then put it on the dead list.
497 if (bury_dead_chunk(p, new_chunk)) {
498 D( fprintf(stderr, ";; POOL move active chunk %p (%lu) to live heap\n",
499 (void *)new_chunk, (unsigned long)new_chunk->u.sz); )
500 downheap(p, new_chunk); return;
504 /* If there are no other chunks in the heap, then it will be left empty;
505 * otherwise, we replace the head with the chunk with the highest index.
508 if (n) downheap(p, p->live[n]);
512 /* --- @internal_alloc@ --- *
514 * Arguments: @pool *p@ = pointer to the pool
515 * @size_t sz@ = size required
517 * Returns: A pointer to the newly allocated block.
519 * Use: Allocate and return a block from the pool. No data in any
520 * chunk is modified; Valgrind is not informed. This is a
521 * common subroutine of @pool_alloc@ and @pool_realloc@.
524 static void *internal_alloc(pool *p, size_t sz)
526 pool_chunk *active, *head;
530 /* Align the size and add on the trailing red zone. The leading red zone
531 * follows the previous allocation or the chunk header.
533 sz = round_up(sz, p->f);
535 /* Try to serve the request from the active chunk. */
537 if (active->u.sz >= sz) goto alloc;
539 /* Explore the heap of live chunks. */
542 /* There's at least one chunk in the heap. Maybe that will help. */
546 /* If the topmost chunk in the heap is large enough, then we'll activate
549 if (head->u.sz >= sz) {
551 /* We'll take the old heap head chunk, and replace it with the
552 * previously active chunk (or put it on the dead list and shrink).
554 replace_live_head(p, active);
556 /* Allocate from the previous head chunk. */
557 D( fprintf(stderr, ";; POOL activate old live head %p\n",
559 p->active = active = head;
564 /* There are no other suitable chunks. Just put the old active chunk on
565 * the heap (or the dead list).
567 retire_active_chunk(p, active);
569 /* There's not enough space in any chunk. Add the old active chunk to
570 * the heap and allocate a fresh one.
572 p->active = active = alloc_chunk(p->pa, sz, p->f);
573 VG( if (p->f&PF_VALGRIND) report_new_chunk_to_valgrind(p, active); )
576 /* Allocate the new block. Save the (aligned, padded) size for use by
577 * @maybe_undo_last_alloc@.
579 q = active->p; active->p += sz; active->u.sz -= sz;
586 /* --- @release_resources@ --- *
588 * Arguments: @pool *p@ = pointer to the pool
592 * Use: Release all of the resources tracked by the pool. The
593 * resource list is %%\emph{not}%% cleared -- there'd be no
594 * point if the pool as a whole is about to be destroyed.
597 static void release_resources(pool *p)
599 pool_resource *r, *rr;
601 for (r = p->resources; r; r = rr)
602 { rr = r->next; if (r->destroy) r->destroy(r); }
605 /* --- @recycle_chunk@ --- *
607 * Arguments: @pool *p@ = pointer to the pool
608 * @pool_chunk *chunk@ = the chunk to be cleared
609 * @size_t chsz@ = precomputed chunk header size, rounded up
613 * Use: Clear a single chunk: reset the allocation pointer and size
614 * to refer to the entire chunk, and inform Valgrind that the
615 * old allocations from the chunk are now invalid. As a special
616 * case, if this chunk is the one that contains the pool control
617 * block, then reallocate space for the control block.
619 * The chunk must be in its `live' form, with the @sz@ member
623 static void recycle_chunk(pool *p, pool_chunk *chunk, size_t chsz)
626 unsigned char *guard, *start, *end;
629 /* Reset the chunk bounds. */
630 chunk_bounds(chunk, chsz, &guard, &start, &end);
631 chunk->p = start; chunk->u.sz = end - start;
633 /* Clear Valgrind's ideas about the chunk. */
634 VG( if (f&PF_VALGRIND) {
636 D( fprintf(stderr, ";; POOL recycle chunk %p\n", (void *)chunk); )
637 D( fprintf(stderr, ";; POOL \trecycle body %p + %zu\n",
638 (void *)start, (unsigned long)(end - start)); )
639 VALGRIND_MEMPOOL_FREE(p, start);
640 VALGRIND_MEMPOOL_ALLOC(p, start, end - start);
642 D( fprintf(stderr, ";; POOL \telectrify body %p + %zu\n",
643 (void *)guard, (unsigned long)(guard - start)); )
644 VALGRIND_MAKE_MEM_NOACCESS(guard, end - guard);
647 /* Check to see if this is the chunk which contains the pool control
648 * block. If so, we need to advance the allocation pointer beyond the
649 * control block. Be sure to tell Valgrind that the control block has
652 if (start == (unsigned char *)p) {
653 psz = round_up(sizeof(pool), f);
654 chunk->p += psz; chunk->u.sz -= psz;
655 VG( if (f&PF_VALGRIND)
656 VALGRIND_MALLOCLIKE_BLOCK(p, sizeof(p), REDZONE_SIZE, 1); )
660 /* --- @free_chunk@ --- *
662 * Arguments: @pool *p@ = pointer to the pool (only used for Valgrind)
663 * @arena *a@ = pointer to underlying arena
664 * @pool_chunk *chunk@ = the chunk to be freed
665 * @unsigned f@ = flags (@PF_...@)
666 * @size_t chsz@ = precomputed chunk header size, rounded up
670 * Use: Free the chunk, handling the Valgrind necessities. The pool control
671 * block isn't necessarily valid so all of the pieces need to be
675 static void free_chunk(pool *p, arena *a,
676 pool_chunk *chunk, unsigned f, size_t chsz)
678 VG( unsigned char *guard COMMA *start COMMA *end; )
680 VG( if (f&PF_VALGRIND) {
681 chunk_bounds(chunk, chsz, &guard, &start, &end);
682 D( fprintf(stderr, ";; POOL free chunk %p\n", (void *)chunk);
683 fprintf(stderr, ";; POOL \tclear body %p\n", (void *)start); )
684 VALGRIND_MEMPOOL_FREE(p, start);
690 /* --- @free_chunks@ --- *
692 * Arguments: @pool *p@ = pointer to the pool
693 * @unsigned f@ = flags (@FF_...@)
697 * Use: Free all of the pool's live and dead chunks, returning them
698 * to the underlying arena.
700 * If @FF_ACTIVE@ is set in @f@, then also free the active
701 * chunk. If @FF_LIVE@ is set, then also free the live heap
702 * vector. (The live chunks are always freed; the flag
703 * additionally frees the underlying vector.)
705 * If the pool is a root pool, then may no longer be valid; this
706 * is a certainty if @FF_ACTIVE@ is set.
712 static void free_chunks(pool *p, unsigned f)
714 pool_chunk *chunk, *next_chunk,
715 *active = p->active, **live = p->live, *dead = p->dead;
717 size_t i, nlive = p->nlive, chsz = round_up(sizeof(pool_chunk), pf);
720 /* Maybe free the active chunk. */
722 free_chunk(p, a, active, pf, chsz);
724 /* Free the live chunks. */
725 for (i = 0; i < nlive; i++)
726 free_chunk(p, a, live[i], pf, chsz);
727 if (live && (f&FF_LIVE)) A_FREE(a, live);
729 /* Free the dead chunks. */
730 for (chunk = dead; chunk; chunk = next_chunk) {
731 next_chunk = chunk->u.next;
732 free_chunk(p, a, chunk, pf, chsz);
736 /* --- @internal_destroy@ --- *
738 * Arguments: @pool *p@ = pointer to pool to destroy
742 * Use: Destroy a pool, freeing all of the resources and memory held
743 * by it, with the certainty that it will never be used again --
744 * either this is a root pool, or it's a subpool being torn down
748 static void internal_destroy(pool *p)
749 { release_resources(p); free_chunks(p, FF_ACTIVE | FF_LIVE); }
751 /*----- Main operations ---------------------------------------------------*/
753 /* --- @pool_create@ --- *
755 * Arguments: @arena *a@ = pointer to an arena to allocate memory from
757 * Returns: A newly created resource pool.
759 * Use: Creates a fresh root resource pool, i.e., one which is not a
760 * child of any other pool.
763 pool *pool_create(arena *a)
771 VG( if (RUNNING_ON_VALGRIND) f |= PF_VALGRIND; )
773 /* Allocate the initial chunk. */
774 psz = round_up(sizeof(pool), f);
775 active = alloc_chunk(a, psz, f);
777 /* Allocate the pool control block from the chunk. */
778 p = (pool *)active->p; active->p += psz; active->u.sz -= psz;
780 /* Tell Valgrind about the pool. */
781 VG( if (f&PF_VALGRIND) {
782 VALGRIND_CREATE_MEMPOOL_EXT(p, REDZONE_SIZE, 0,
783 VALGRIND_MEMPOOL_METAPOOL |
784 VALGRIND_MEMPOOL_AUTO_FREE);
785 report_new_chunk_to_valgrind(p, active);
786 VALGRIND_MALLOCLIKE_BLOCK(p, sizeof(pool), REDZONE_SIZE, 0);
789 /* Initialize the control block. */
790 init_pool(p, a, active, f);
796 /* --- @pool_alloc@, @pool_allocv@ --- *
798 * Arguments: @pool *p@ = pool to allocate from
799 * @size_t n@ = number of elements to allocate (for
801 * @size_t sz@ = size of block wanted
803 * Returns: Pointer to the requested block.
805 * Use: The @pool_allocv@ function allocates memory for @n@ elements
806 * of @sz@ bytes each from a resource pool. Only the most
807 * recent allocation can usefully be adjusted and/or freed;
808 * other allocations persist until the pool is destroyed. The
809 * @pool_alloc@ function is the same, but with @n@ fixed equal
810 * to 1. If there's not enough memory, the exception
811 * @EXC_NOMEM@ is thrown.
814 void *pool_alloc(pool *p, size_t sz)
816 void *q = internal_alloc(p, sz);
818 VG( if (p->f&PF_VALGRIND)
819 VALGRIND_MALLOCLIKE_BLOCK(q, sz, REDZONE_SIZE, 0); )
823 void *pool_allocv(pool *p, size_t n, size_t sz)
825 if (!ALLOCV_SAFE_P(n, sz)) THROW(EXC_NOMEM);
826 else return (pool_alloc(p, n*sz));
829 /* --- @pool_realloc@, @pool_reallocv@ --- *
831 * Arguments: @pool *p@ = pointer to the pool
832 * @void *q@ = pointer to block
833 * @size_t n@ = new number of elements (for @pool_reallocv@)
834 * @size_t on@ = old number of elements (for @pool_reallocv@)
835 * @size_t sz@ = size of elements (for @pool_reallocv@)
836 * @size_t newsz@ = the new size required (for @pool_realloc@)
837 * @size_t oldsz@ = existing size allocated (for @pool_realloc@)
839 * Returns: The new address of the block.
841 * Use: The @pool_reallocv@ function returns a pointer to a block of
842 * memory, large enough for @n@ elements each of size @sz@,
843 * containing a copy of the first @min(n, on)@ elements
844 * currently in the block at @q@. If the new pointer is not
845 * equal to @q@, then @q@ is invalidated and it is an error to
846 * make further use of it.
848 * If @q@ points to the most recently allocated block, then the
849 * old storage is reclaimed. Otherwise, if @n@ is smaller than
850 * @on@, the block is shrunk in place, but the excess space is
851 * lost until the pool is destroyed. Otherwise, a fresh block
852 * is allocated and the data copied, and the old block's space
853 * is lost until the pool is destroyed.
855 * The @pool_realloc@ function is the same, except that @sz@ is
856 * fixed at 1, and @n@ and @on@ are renamed to @newsz@ and
860 void *pool_realloc(pool *p, void *q, size_t newsz, size_t oldsz)
864 /* The usual C special cases. */
865 if (!q) return (pool_alloc(p, newsz));
866 else if (!newsz) { pool_free(p, q); return (0); }
868 if (!maybe_undo_last_alloc(p, q)) {
869 /* The block being reallocated is the most recently allocated one, and
870 * its allocation has just been undone. We'll retry the allocation.
871 * This will either return the same address again, if there is enough
872 * space in the chunk to satisfy the allocation, or a different address.
873 * In the former case, the existing data is still there and we have
874 * nothing to do (except update Valgrind's ideas about the block);
875 * otherwise, we must copy the data -- but note that, in this case, we
876 * know that the block must be growing.
879 qq = internal_alloc(p, newsz);
881 VG( if (p->f&PF_VALGRIND)
882 VALGRIND_MALLOCLIKE_BLOCK(qq, newsz, REDZONE_SIZE, 0); )
883 memcpy(qq, q, oldsz);
884 VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
886 VG( else if (p->f&PF_VALGRIND)
887 VALGRIND_RESIZEINPLACE_BLOCK(q, oldsz, newsz, REDZONE_SIZE); )
889 } else if (newsz < oldsz || (newsz ^ oldsz) < sizeof(union align)) {
890 /* The caller asked us to shrink the block. There's nothing to do here
891 * except update Valgrind's idea about the allocation.
895 VG( if (p->f&PF_VALGRIND)
896 VALGRIND_RESIZEINPLACE_BLOCK(q, oldsz, newsz, REDZONE_SIZE); )
899 /* We've been asked to grow a block. Just allocate a fresh one and copy
903 qq = internal_alloc(p, newsz);
904 VG( if (p->f&PF_VALGRIND)
905 VALGRIND_MALLOCLIKE_BLOCK(qq, newsz, REDZONE_SIZE, 0); )
906 memcpy(qq, q, oldsz);
907 VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
910 /* Return the new pointer. */
914 void *pool_reallocv(pool *p, void *q, size_t n, size_t on, size_t sz)
916 if (!ALLOCV_SAFE_P(n, sz)) THROW(EXC_NOMEM);
917 else return (pool_realloc(p, q, n*sz, on*sz));
920 /* --- @pool_strdup@ --- *
922 * Arguments: @pool *p@ = pool to allocate from
923 * @const char *s@ = pointer to string
925 * Returns: A pointer to a copy of the string.
927 * Use: Allocates a copy of a string.
930 char *pool_strdup(pool *p, const char *s)
932 size_t sz = strlen(s) + 1;
935 q = internal_alloc(p, sz);
936 VG( if (p->f&PF_VALGRIND)
937 VALGRIND_MALLOCLIKE_BLOCK(q, sz, REDZONE_SIZE, 0); )
942 /* --- @pool_free@ --- *
944 * Arguments: @pool *p@ = pointer to the pool
945 * @void *q@ = pointer to block
949 * Use: Try to recycle the memory used by the block allocated at @q@.
950 * If @q@ points to the most recently allocated block, then it
951 * can be reclaimed at no cost. Otherwise, the allocation
952 * persists anyway until the pool as a whole is destroyed --
953 * though the pointer is logically invalidated, and it is an
954 * error to make further use of it.
957 void pool_free(pool *p, void *q)
959 /* Check if this was the most recent block. If so, we can actually
962 if (!maybe_undo_last_alloc(p, q)) {
964 /* If we're /not/ running under Valgrind then zero is a properly aligned
965 * length. If we /are/, then this is the precise length of a free block.
969 /* If we're running under Valgrind, then we should still leave a
970 * trailing red zone for two reasons: firstly, this will help detect
971 * accesses to the free block after a new real block is allocated; and
972 * secondly, this arrangement correctly preserves the last-block
973 * base-address invariant.
975 VG( if (p->f&PF_VALGRIND) {
976 p->active->p += REDZONE_SIZE;
977 p->active->u.sz -= REDZONE_SIZE;
981 /* Tell Valgrind that the block is free. */
982 VG( if (p->f&PF_VALGRIND) VALGRIND_FREELIKE_BLOCK(q, REDZONE_SIZE); )
985 /* --- @pool_add@ --- *
987 * Arguments: @pool *p@ = pointer to pool to add the resource to
988 * @pool_resource *r@ = pointer to resource block
989 * @void (*dfn)(pool_resource *r)@ = destruction function
993 * Use: Adds a resource to a pool.
996 void pool_add(pool *p, pool_resource *r, void (*dfn)(pool_resource *r))
997 { POOL_ADD(p, r, dfn); }
999 /* --- @pool_recycle@ --- *
1001 * Arguments: @pool *p@ = pointer to the pool
1005 * Use: Release all of the memory and resources held by the pool,
1006 * with the expectation that it will be used again. The chunks
1007 * from which allocations were satisfied are retained for future
1011 void pool_recycle(pool *p)
1013 pool_chunk *chunk, *next_chunk;
1014 size_t i, n, chsz = round_up(sizeof(pool_chunk), p->f);
1016 /* Release the pool's resources. */
1017 release_resources(p); p->resources = 0;
1019 /* Deal with the heap first; otherwise, we'd have to make a copy of the
1025 for (i = 0; i < n; i++) {
1027 recycle_chunk(p, chunk, chsz); upheap(p, chunk);
1031 /* Throw in the current active chunk. */
1033 recycle_chunk(p, chunk, chsz); upheap(p, chunk);
1035 /* And finally walk the dead list. */
1036 for (chunk = p->dead; chunk; chunk = next_chunk) {
1037 next_chunk = chunk->u.next; chunk->u.sz = 0;
1038 recycle_chunk(p, chunk, chsz); upheap(p, chunk);
1042 /* Activate the largest chunk. */
1043 p->active = p->live[0]; replace_live_head(p, 0); p->lastsz = 0;
1046 /* --- @pool_destroy@ --- *
1048 * Arguments: @pool *p@ = pointer to pool to destroy
1052 * Use: Destroys a pool, freeing all of the resources within it. If
1053 * this is a root pool, its memory will be deallocated; if it's
1054 * a subpool, it is emptied and can be used again.
1057 void pool_destroy(pool *p)
1059 if (!(p->f&PF_SUBPOOL)) {
1060 /* This is a root pool, so just free all of the chunks and the live heap
1061 * vector. The pool control block was allocated from one of these.
1064 internal_destroy(p);
1066 /* This is a subpool, and the storage for its control block came from its
1067 * parent. In principle, the pool could be used again; certainly, it
1068 * must be left safe for the parent to destroy it later.
1071 release_resources(p); p->resources = 0;
1072 recycle_chunk(p, p->active, round_up(sizeof(pool_chunk), p->f));
1073 free_chunks(p, 0); p->nlive = 0; p->dead = 0; p->lastsz = 0;
1077 /*----- Subpools ----------------------------------------------------------*/
1079 /* --- @pool_sub@ --- *
1081 * Arguments: @pool *p@ = pointer to parent pool
1083 * Returns: A new child pool of the parent.
1085 * Use: Creates a subpool. The subpool can either be destroyed on
1086 * its own, or will be automatically destroyed at the same time
1090 typedef struct subpool {
1095 static void subpool_destroy(pool_resource *r)
1096 { subpool *p = (subpool *)r; internal_destroy(&p->p); }
1098 pool *pool_sub(pool *p)
1100 subpool *pp = pool_alloc(p, sizeof(subpool));
1103 unsigned f = p->f | PF_SUBPOOL;
1105 /* Allocate the initial active chunk. */
1106 active = alloc_chunk(a, 1, f);
1108 /* Set up the pool control block. */
1109 init_pool(&pp->p, a, active, f);
1111 /* Tell Valgrind about the pool. */
1112 VG( if (p->f&PF_VALGRIND) {
1113 VALGRIND_CREATE_MEMPOOL_EXT(&pp->p, REDZONE_SIZE, 0,
1114 VALGRIND_MEMPOOL_METAPOOL |
1115 VALGRIND_MEMPOOL_AUTO_FREE);
1116 report_new_chunk_to_valgrind(&pp->p, active);
1119 /* Register the subpool as a resource of its parent. */
1120 POOL_ADD(p, &pp->r, subpool_destroy);
1122 /* And we're done. */
1126 /*----- That's all, folks -------------------------------------------------*/