4 * Very simple linked-list based malloc()/free().
11 struct free_arena_header __malloc_head =
23 static void *__malloc_from_block(struct free_arena_header *fp, size_t size)
26 struct free_arena_header *nfp, *na;
30 /* We need the 2* to account for the larger requirements of a free block */
31 if ( fsize >= size+2*sizeof(struct arena_header) ) {
32 /* Bigger block than required -- split block */
33 nfp = (struct free_arena_header *)((char *)fp + size);
36 nfp->a.type = ARENA_TYPE_FREE;
37 nfp->a.size = fsize-size;
38 fp->a.type = ARENA_TYPE_USED;
41 /* Insert into all-block chain */
47 /* Replace current block on free chain */
48 nfp->next_free = fp->next_free;
49 nfp->prev_free = fp->prev_free;
50 fp->next_free->prev_free = nfp;
51 fp->prev_free->next_free = nfp;
53 /* Allocate the whole block */
54 fp->a.type = ARENA_TYPE_USED;
56 /* Remove from free chain */
57 fp->next_free->prev_free = fp->prev_free;
58 fp->prev_free->next_free = fp->next_free;
61 return (void *)(&fp->a + 1);
64 static struct free_arena_header *
65 __free_block(struct free_arena_header *ah)
67 struct free_arena_header *pah, *nah;
71 if ( pah->a.type == ARENA_TYPE_FREE &&
72 (char *)pah+pah->a.size == (char *)ah ) {
73 /* Coalesce into the previous block */
74 pah->a.size += ah->a.size;
79 ah->a.type = ARENA_TYPE_DEAD;
85 /* Need to add this block to the free chain */
86 ah->a.type = ARENA_TYPE_FREE;
88 ah->next_free = __malloc_head.next_free;
89 ah->prev_free = &__malloc_head;
90 __malloc_head.next_free = ah;
91 ah->next_free->prev_free = ah;
94 /* In either of the previous cases, we might be able to merge
95 with the subsequent block... */
96 if ( nah->a.type == ARENA_TYPE_FREE &&
97 (char *)ah+ah->a.size == (char *)nah ) {
98 ah->a.size += nah->a.size;
100 /* Remove the old block from the chains */
101 nah->next_free->prev_free = nah->prev_free;
102 nah->prev_free->next_free = nah->next_free;
103 ah->a.next = nah->a.next;
104 nah->a.next->a.prev = ah;
107 nah->a.type = ARENA_TYPE_DEAD;
111 /* Return the block that contains the called block */
115 void *malloc(size_t size)
117 struct free_arena_header *fp;
118 struct free_arena_header *pah;
124 /* Add the obligatory arena header, and round up */
125 size = (size+2*sizeof(struct arena_header)-1) & ARENA_SIZE_MASK;
127 for ( fp = __malloc_head.next_free ; fp->a.type != ARENA_TYPE_HEAD ;
128 fp = fp->next_free ) {
129 if ( fp->a.size >= size ) {
130 /* Found fit -- allocate out of this block */
131 return __malloc_from_block(fp, size);
135 /* Nothing found... need to request a block from the kernel */
136 fsize = (size+MALLOC_CHUNK_MASK) & ~MALLOC_CHUNK_MASK;
137 fp = (struct free_arena_header *)
138 mmap(NULL, fsize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
140 if ( fp == (struct free_arena_header *)MAP_FAILED ) {
141 return NULL; /* Failed to get a block */
144 /* Insert the block into the management chains. We need to set
145 up the size and the main block list pointer, the rest of
146 the work is logically identical to free(). */
147 fp->a.type = ARENA_TYPE_FREE;
150 /* We need to insert this into the main block list in the proper
151 place -- this list is required to be sorted. Since we most likely
152 get memory assignments in ascending order, search backwards for
154 for ( pah = __malloc_head.a.prev ; pah->a.type != ARENA_TYPE_HEAD ;
155 pah = pah->a.prev ) {
160 /* Now pah points to the node that should be the predecessor of
162 fp->a.next = pah->a.next;
165 fp->a.next->a.prev = fp;
168 /* Insert into the free chain and coalesce with adjacent blocks */
169 fp = __free_block(fp);
171 /* Now we can allocate from this block */
172 return __malloc_from_block(fp, size);
177 struct free_arena_header *ah;
182 ah = (struct free_arena_header *)
183 ((struct arena_header *)ptr - 1);
186 assert( ah->a.type == ARENA_TYPE_USED );
191 /* Here we could insert code to return memory to the system. */