3 * Allocation of known-size blocks
5 * (c) 1998 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of the mLib utilities library.
12 * mLib is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
17 * mLib is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with mLib; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
28 /*----- The big idea ------------------------------------------------------*
30 * This file provides an extra layer over @malloc@. It provides fast
31 * turnover for small blocks, and tries to minimize the per-block overhead.
33 * To do its job, @alloc@ must place an extra restriction on you: you must
34 * know the size of a block when you free it. Usually you'll have this
35 * information encoded in some way either in the block or in the thing that
36 * referenced it, so this isn't a hardship.
38 * It works fairly simply. If a request for a big block (as defined by the
39 * constants below) comes in, it gets sent on to @malloc@ unmolested. For
40 * small blocks, it goes straight to a `bin' -- a list containing free blocks
41 * of exactly that size, or the nearest bigger size we can manage. If the
42 * bin is empty, a `chunk' is allocated from @malloc@: this has enough room
43 * for lots of blocks of the requested size, so it ets split up and each
44 * individual small block is added to the bin list. The first block in the
45 * bin list is then removed and given to the caller. In this way, @malloc@
46 * only stores its information once for lots of little blocks, so we save
47 * memory. Because I know where the correct bin is just from the block size,
48 * and I don't need to do any searching at all in the usual case (because the
49 * list isn't empty) I can get a speed advantage too.
51 * This code is almost certainly not ANSI conformant, although I'm not
52 * actually sure. If some kind soul would let me know how seriously I've
53 * violated the standard, and whether this is easily fixable, I'd be
57 /*----- Header files ------------------------------------------------------*/
61 /* --- ANSI headers --- */
68 /* --- External headers --- */
70 #ifdef HAVE_VALGRIND_VALGRIND_H
71 # include <valgrind/valgrind.h>
72 # include <valgrind/memcheck.h>
78 /* --- Local headers --- */
84 /*----- Configuration tweaks ----------------------------------------------*/
86 /* #define SUBARENA_TRIVIAL */
88 #define REDZONE_SIZE (2*SUB_GRANULE)
90 /*----- Static variables --------------------------------------------------*/
92 static size_t sizes[SUB_BINS];
93 VG( static unsigned flags; )
94 #define SF_VALGRIND 1u
96 /*----- Global variables --------------------------------------------------*/
100 #ifdef SUBARENA_TRIVIAL
102 typedef struct sub_link {
103 struct sub_link *next;
117 /*----- Main code ---------------------------------------------------------*/
119 /* --- @subarena_create@ --- *
121 * Arguments: @subarena *s@ = pointer to arena to initialize
122 * @arena *a@ = pointer to underlying arena block
126 * Use: Initialize a suballocation arena based on an underlying large
130 void subarena_create(subarena *s, arena *a)
132 #ifdef SUBARENA_TRIVIAL
137 if (!sizes[1]) sub_init();
138 for (i = 0; i < SUB_BINS; i++) s->bin[i] = 0;
139 VG( VALGRIND_CREATE_MEMPOOL(s, REDZONE_SIZE, 0); )
144 /* --- @subarena_destroy@ --- *
146 * Arguments: @subarena *s@ = pointer to arena to destroy
150 * Use: Destroys a suballocation arena, freeing all of the memory it
151 * contains back to the underlying large blocks arena.
154 void subarena_destroy(subarena *s)
156 #ifdef SUBARENA_TRIVIAL
160 for (l = s->bin[0]; l; l = ll) {
169 union sub_header *p, *q;
171 for (p = s->bin[0]; p; p = q) { q = p->next; A_FREE(s->a, p); }
172 VG( VALGRIND_DESTROY_MEMPOOL(s); )
177 /* --- @subarena_alloc@ --- *
179 * Arguments: @subarena *s@ = pointer to arena
180 * @size_t s@ = size of chunk wanted
182 * Returns: Pointer to a block at least as large as the one wanted.
184 * Use: Allocates a small block of memory from the given pool. The
185 * exception @EXC_NOMEM@ is raised if the underlying arena is
189 void *subarena_alloc(subarena *s, size_t sz)
191 #ifdef SUBARENA_TRIVIAL
196 if (!s->a) subarena_create(s, arena_global);
198 if ((l = a_alloc(s->a, sizeof(*l))) == 0) return (0);
199 if ((p = a_alloc(s->a, sz)) == 0) { a_free(s->a, l); return (0); }
200 l->p = p; l->sz = sz; l->next = s->bin[0]; s->bin[0] = l;
205 unsigned char *p, *q;
207 size_t bin, chsz, redsz;
209 /* --- Ensure that everything is initialized --- */
211 if (!s->a) subarena_create(s, arena_global);
213 /* --- Handle oversize blocks --- */
216 if (bin >= SUB_BINS) {
217 p = A_ALLOC(s->a, sz); if (!p) THROW(EXC_NOMEM);
221 /* --- If the bin is empty, find some memory --- */
224 redsz = 0; VG( if (flags&SF_VALGRIND) redsz = REDZONE_SIZE; )
225 chsz = SUB_BINSZ(bin) + redsz;
226 h = A_ALLOC(s->a, sizes[bin]); if (!h) THROW(EXC_NOMEM);
227 h->next = s->bin[0]; s->bin[0] = h;
228 p = (unsigned char *)(h + 1);
229 q = (unsigned char *)h + sizes[bin] - redsz - chsz; *(void **)q = 0;
230 while (q > p) { q -= chsz; *(void **)q = q + chsz; }
232 VG( VALGRIND_MAKE_MEM_NOACCESS(p, sizes[bin]); )
235 /* --- Extract the first block in the list --- */
238 VG( if (flags&SF_VALGRIND) {
239 VALGRIND_MAKE_MEM_DEFINED(p, sizeof(void *));
240 s->bin[bin] = *(void **)p;
241 VALGRIND_MEMPOOL_ALLOC(s, p, sz);
243 s->bin[bin] = *(void **)p;
249 /* --- @subarena_free@ --- *
251 * Arguments: @subarena *s@ = pointer to arena
252 * @void *p@ = address of block to free
253 * @size_t s@ = size of block
257 * Use: Frees a block allocated by @subarena_alloc@.
260 void subarena_free(subarena *s, void *p, size_t sz)
262 #ifdef SUBARENA_TRIVIAL
264 sub_link *lh = s->bin[0], **l, *ll;
266 for (l = &lh; *l && (*l)->p != p; l = &(*l)->next) ;
267 ll = *l; assert(ll); assert(ll->sz == sz);
269 a_free(s->a, ll); a_free(s->a, p); s->bin[0] = lh;
273 size_t bin = SUB_BIN(sz);
278 *(void **)p = s->bin[bin]; s->bin[bin] = p;
279 VG( if (flags&SF_VALGRIND) VALGRIND_MEMPOOL_FREE(s, p); )
285 /*----- Compatibility stuff -----------------------------------------------*/
287 /* --- @sub_alloc@ --- *
289 * Arguments: @size_t s@ = size of chunk wanted
291 * Returns: Pointer to a block at least as large as the one wanted.
293 * Use: Allocates a small block of memory from the @sub_global@ pool.
296 void *(sub_alloc)(size_t sz) { return sub_alloc(sz); }
298 /* --- @sub_free@ --- *
300 * Arguments: @void *p@ = address of block to free
301 * @size_t s@ = size of block
305 * Use: Frees a block allocated by @sub_alloc@.
308 void (sub_free)(void *p, size_t sz) { sub_free(p, sz); }
310 /* --- @sub_init@ --- *
316 * Use: Initializes the magic allocator.
321 #ifndef SUBARENA_TRIVIAL
325 /* Find out if we're running under Valgrind --- */
327 VG( if (RUNNING_ON_VALGRIND) flags |= SF_VALGRIND; )
329 /* --- Initialize the sizes bins --- */
331 for (i = 1; i < SUB_BINS; i++) {
333 n = (SUB_CHUNK + sz - 1)/sz;
334 sz = sizeof(union sub_header) + n*sz;
335 VG( if (flags&SF_VALGRIND) sz += (n + 1)*REDZONE_SIZE; )
341 /*----- Debugging code ----------------------------------------------------*/
346 #define SIZE_MAX 2048
347 #define ITERATIONS 500000
351 static void *block[BLOCKS];
352 static size_t size[BLOCKS];
359 for (count = 0; count < ITERATIONS; count++) {
362 sub_free(block[i], size[i]);
366 block[i] = sub_alloc(size[i] =
367 rand() % (SUB_MAXBIN - 128) + 128);
369 memset(block[i], 0, size[i]); /* trample allocated storage */
378 /*----- That's all, folks -------------------------------------------------*/