2 * Stack-less Just-In-Time compiler
4 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* ------------------------------------------------------------------------ */
29 /* ------------------------------------------------------------------------ */
31 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
33 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
35 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
37 static SLJIT_INLINE void allocator_grab_lock(void)
39 /* Always successful. */
42 static SLJIT_INLINE void allocator_release_lock(void)
44 /* Always successful. */
47 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
49 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
51 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
53 /* Always successful. */
56 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
58 /* Always successful. */
61 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
63 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */
67 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
69 static HANDLE allocator_mutex = 0;
71 static SLJIT_INLINE void allocator_grab_lock(void)
73 /* No idea what to do if an error occures. Static mutexes should never fail... */
75 allocator_mutex = CreateMutex(NULL, TRUE, NULL);
77 WaitForSingleObject(allocator_mutex, INFINITE);
80 static SLJIT_INLINE void allocator_release_lock(void)
82 ReleaseMutex(allocator_mutex);
85 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
87 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
89 static HANDLE global_mutex = 0;
91 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
93 /* No idea what to do if an error occures. Static mutexes should never fail... */
95 global_mutex = CreateMutex(NULL, TRUE, NULL);
97 WaitForSingleObject(global_mutex, INFINITE);
100 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
102 ReleaseMutex(global_mutex);
105 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
109 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
113 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER;
115 static SLJIT_INLINE void allocator_grab_lock(void)
117 pthread_mutex_lock(&allocator_mutex);
120 static SLJIT_INLINE void allocator_release_lock(void)
122 pthread_mutex_unlock(&allocator_mutex);
125 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
127 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
131 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
133 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
135 pthread_mutex_lock(&global_mutex);
138 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
140 pthread_mutex_unlock(&global_mutex);
143 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
147 /* ------------------------------------------------------------------------ */
149 /* ------------------------------------------------------------------------ */
151 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
156 /* Provides mmap function. */
157 #include <sys/mman.h>
158 /* For detecting the page size. */
165 /* Some old systems does not have MAP_ANON. */
166 static sljit_s32 dev_zero = -1;
168 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
170 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
172 dev_zero = open("/dev/zero", O_RDWR);
176 #else /* SLJIT_SINGLE_THREADED */
180 static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER;
182 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
184 pthread_mutex_lock(&dev_zero_mutex);
185 /* The dev_zero might be initialized by another thread during the waiting. */
187 dev_zero = open("/dev/zero", O_RDWR);
189 pthread_mutex_unlock(&dev_zero_mutex);
193 #endif /* SLJIT_SINGLE_THREADED */
199 #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */
201 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
203 /* Planning to make it even more clever in the future. */
204 static sljit_sw sljit_page_align = 0;
206 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data)
208 struct sljit_stack *stack;
217 SLJIT_UNUSED_ARG(allocator_data);
218 if (limit > max_limit || limit < 1)
222 if (!sljit_page_align) {
224 sljit_page_align = si.dwPageSize - 1;
227 if (!sljit_page_align) {
228 sljit_page_align = sysconf(_SC_PAGESIZE);
229 /* Should never happen. */
230 if (sljit_page_align < 0)
231 sljit_page_align = 4096;
236 /* Align limit and max_limit. */
237 max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
239 stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack), allocator_data);
244 base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
246 SLJIT_FREE(stack, allocator_data);
249 stack->base = base.uw;
250 stack->limit = stack->base;
251 stack->max_limit = stack->base + max_limit;
252 if (sljit_stack_resize(stack, stack->base + limit)) {
253 sljit_free_stack(stack, allocator_data);
258 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
261 if (open_dev_zero()) {
262 SLJIT_FREE(stack, allocator_data);
266 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
268 if (base.ptr == MAP_FAILED) {
269 SLJIT_FREE(stack, allocator_data);
272 stack->base = base.uw;
273 stack->limit = stack->base + limit;
274 stack->max_limit = stack->base + max_limit;
276 stack->top = stack->base;
282 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack, void *allocator_data)
284 SLJIT_UNUSED_ARG(allocator_data);
286 VirtualFree((void*)stack->base, 0, MEM_RELEASE);
288 munmap((void*)stack->base, stack->max_limit - stack->base);
290 SLJIT_FREE(stack, allocator_data);
293 SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit)
295 sljit_uw aligned_old_limit;
296 sljit_uw aligned_new_limit;
298 if ((new_limit > stack->max_limit) || (new_limit < stack->base))
301 aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align;
302 aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align;
303 if (aligned_new_limit != aligned_old_limit) {
304 if (aligned_new_limit > aligned_old_limit) {
305 if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE))
309 if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT))
313 stack->limit = new_limit;
316 if (new_limit >= stack->limit) {
317 stack->limit = new_limit;
320 aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align;
321 aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align;
322 /* If madvise is available, we release the unnecessary space. */
323 #if defined(MADV_DONTNEED)
324 if (aligned_new_limit < aligned_old_limit)
325 madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MADV_DONTNEED);
326 #elif defined(POSIX_MADV_DONTNEED)
327 if (aligned_new_limit < aligned_old_limit)
328 posix_madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, POSIX_MADV_DONTNEED);
330 stack->limit = new_limit;
335 #endif /* SLJIT_UTIL_STACK */