3 * Allocation of known-size blocks
5 * (c) 1998 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of the mLib utilities library.
12 * mLib is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
17 * mLib is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with mLib; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
28 /*----- The big idea ------------------------------------------------------*
30 * This file provides an extra layer over @malloc@. It provides fast
31 * turnover for small blocks, and tries to minimize the per-block overhead.
33 * To do its job, @alloc@ must place an extra restriction on you: you must
34 * know the size of a block when you free it. Usually you'll have this
35 * information encoded in some way either in the block or in the thing that
36 * referenced it, so this isn't a hardship.
38 * It works fairly simply. If a request for a big block (as defined by the
39 * constants below) comes in, it gets sent on to @malloc@ unmolested. For
40 * small blocks, it goes straight to a `bin' -- a list containing free blocks
41 * of exactly that size, or the nearest bigger size we can manage. If the
42 * bin is empty, a `chunk' is allocated from @malloc@: this has enough room
43 * for lots of blocks of the requested size, so it ets split up and each
44 * individual small block is added to the bin list. The first block in the
45 * bin list is then removed and given to the caller. In this way, @malloc@
46 * only stores its information once for lots of little blocks, so we save
47 * memory. Because I know where the correct bin is just from the block size,
48 * and I don't need to do any searching at all in the usual case (because the
49 * list isn't empty) I can get a speed advantage too.
51 * This code is almost certainly not ANSI conformant, although I'm not
52 * actually sure. If some kind soul would let me know how seriously I've
53 * violated the standard, and whether this is easily fixable, I'd be
57 /*----- Header files ------------------------------------------------------*/
61 /* --- ANSI headers --- */
65 /* --- External headers --- */
67 #ifdef HAVE_VALGRIND_VALGRIND_H
68 # include <valgrind/valgrind.h>
69 # include <valgrind/memcheck.h>
75 /* --- Local headers --- */
81 /*----- Configuration tweaks ----------------------------------------------*/
83 /* #define SUBARENA_TRIVIAL */
85 #define REDZONE_SIZE (2*SUB_GRANULE)
87 /*----- Static variables --------------------------------------------------*/
89 static size_t sizes[SUB_BINS];
90 VG( static unsigned flags; )
91 #define SF_VALGRIND 1u
93 /*----- Global variables --------------------------------------------------*/
97 #ifdef SUBARENA_TRIVIAL
99 typedef struct sub_link {
100 struct sub_link *next;
114 /*----- Main code ---------------------------------------------------------*/
116 /* --- @subarena_create@ --- *
118 * Arguments: @subarena *s@ = pointer to arena to initialize
119 * @arena *a@ = pointer to underlying arena block
123 * Use: Initialize a suballocation arena based on an underlying large
127 void subarena_create(subarena *s, arena *a)
129 #ifdef SUBARENA_TRIVIAL
134 if (!sizes[1]) sub_init();
135 for (i = 0; i < SUB_BINS; i++) s->bin[i] = 0;
136 VG( VALGRIND_CREATE_MEMPOOL(s, REDZONE_SIZE, 0); )
141 /* --- @subarena_destroy@ --- *
143 * Arguments: @subarena *s@ = pointer to arena to destroy
147 * Use: Destroys a suballocation arena, freeing all of the memory it
148 * contains back to the underlying large blocks arena.
151 void subarena_destroy(subarena *s)
153 #ifdef SUBARENA_TRIVIAL
157 for (l = s->bin[0]; l; l = ll) {
166 union sub_header *p, *q;
168 for (p = s->bin[0]; p; p = q) { q = p->next; A_FREE(s->a, p); }
169 VG( VALGRIND_DESTROY_MEMPOOL(s); )
174 /* --- @subarena_alloc@ --- *
176 * Arguments: @subarena *s@ = pointer to arena
177 * @size_t s@ = size of chunk wanted
179 * Returns: Pointer to a block at least as large as the one wanted.
181 * Use: Allocates a small block of memory from the given pool. The
182 * exception @EXC_NOMEM@ is raised if the underlying arena is
186 void *subarena_alloc(subarena *s, size_t sz)
188 #ifdef SUBARENA_TRIVIAL
193 if (!s->a) subarena_create(s, arena_global);
195 if ((l = a_alloc(s->a, sizeof(*l))) == 0) return (0);
196 if ((p = a_alloc(s->a, sz)) == 0) { a_free(s->a, l); return (0); }
197 l->p = p; l->sz = sz; l->next = s->bin[0]; s->bin[0] = l;
202 unsigned char *p, *q;
204 size_t bin, chsz, redsz;
206 /* --- Ensure that everything is initialized --- */
208 if (!s->a) subarena_create(s, arena_global);
210 /* --- Handle oversize blocks --- */
213 if (bin >= SUB_BINS) {
214 p = A_ALLOC(s->a, sz); if (!p) THROW(EXC_NOMEM);
218 /* --- If the bin is empty, find some memory --- */
221 redsz = 0; VG( if (flags&SF_VALGRIND) redsz = REDZONE_SIZE; )
222 chsz = SUB_BINSZ(bin) + redsz;
223 h = A_ALLOC(s->a, sizes[bin]); if (!h) THROW(EXC_NOMEM);
224 h->next = s->bin[0]; s->bin[0] = h;
225 p = (unsigned char *)(h + 1);
226 q = (unsigned char *)h + sizes[bin] - redsz - chsz; *(void **)q = 0;
227 while (q > p) { q -= chsz; *(void **)q = q + chsz; }
229 VG( VALGRIND_MAKE_MEM_NOACCESS(p, sizes[bin]); )
232 /* --- Extract the first block in the list --- */
235 VG( if (flags&SF_VALGRIND) {
236 VALGRIND_MAKE_MEM_DEFINED(p, sizeof(void *));
237 s->bin[bin] = *(void **)p;
238 VALGRIND_MEMPOOL_ALLOC(s, p, sz);
240 s->bin[bin] = *(void **)p;
246 /* --- @subarena_free@ --- *
248 * Arguments: @subarena *s@ = pointer to arena
249 * @void *p@ = address of block to free
250 * @size_t s@ = size of block
254 * Use: Frees a block allocated by @subarena_alloc@.
257 void subarena_free(subarena *s, void *p, size_t sz)
259 #ifdef SUBARENA_TRIVIAL
261 sub_link *lh = s->bin[0], **l, *ll;
263 for (l = &lh; *l && (*l)->p != p; l = &(*l)->next) ;
264 ll = *l; assert(ll); assert(ll->sz == sz);
266 a_free(s->a, ll); a_free(s->a, p); s->bin[0] = lh;
270 size_t bin = SUB_BIN(sz);
275 *(void **)p = s->bin[bin]; s->bin[bin] = p;
276 VG( if (flags&SF_VALGRIND) VALGRIND_MEMPOOL_FREE(s, p); )
282 /*----- Compatibility stuff -----------------------------------------------*/
284 /* --- @sub_alloc@ --- *
286 * Arguments: @size_t s@ = size of chunk wanted
288 * Returns: Pointer to a block at least as large as the one wanted.
290 * Use: Allocates a small block of memory from the @sub_global@ pool.
293 void *(sub_alloc)(size_t sz) { return sub_alloc(sz); }
295 /* --- @sub_free@ --- *
297 * Arguments: @void *p@ = address of block to free
298 * @size_t s@ = size of block
302 * Use: Frees a block allocated by @sub_alloc@.
305 void (sub_free)(void *p, size_t sz) { sub_free(p, sz); }
307 /* --- @sub_init@ --- *
313 * Use: Initializes the magic allocator.
318 #ifndef SUBARENA_TRIVIAL
322 /* Find out if we're running under Valgrind --- */
324 VG( if (RUNNING_ON_VALGRIND) flags |= SF_VALGRIND; )
326 /* --- Initialize the sizes bins --- */
328 for (i = 1; i < SUB_BINS; i++) {
330 n = (SUB_CHUNK + sz - 1)/sz;
331 sz = sizeof(union sub_header) + n*sz;
332 VG( if (flags&SF_VALGRIND) sz += (n + 1)*REDZONE_SIZE; )
338 /*----- Debugging code ----------------------------------------------------*/
343 #define SIZE_MAX 2048
344 #define ITERATIONS 500000
348 static void *block[BLOCKS];
349 static size_t size[BLOCKS];
356 for (count = 0; count < ITERATIONS; count++) {
359 sub_free(block[i], size[i]);
363 block[i] = sub_alloc(size[i] =
364 rand() % (SUB_MAXBIN - 128) + 128);
366 memset(block[i], 0, size[i]); /* trample allocated storage */
375 /*----- That's all, folks -------------------------------------------------*/