chiark / gitweb /
[PATCH] Fix TDB cross compilation
[elogind.git] / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    Samba database functions
4    Copyright (C) Anton Blanchard                   2001
5    
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10    
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15    
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #if HAVE_CONFIG_H
21 #include <config.h>
22 #endif
23
24 #if STANDALONE
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <unistd.h>
28 #include <string.h>
29 #include <fcntl.h>
30 #include <errno.h>
31 #include <sys/stat.h>
32 #include <time.h>
33 #include <signal.h>
34 #include "tdb.h"
35 #include "spinlock.h"
36
37 #define DEBUG
38 #else
39 #include "includes.h"
40 #endif
41
42 #ifdef USE_SPINLOCKS
43
44 /*
45  * ARCH SPECIFIC
46  */
47
48 #if defined(SPARC_SPINLOCKS)
49
50 static inline int __spin_trylock(spinlock_t *lock)
51 {
52         unsigned int result;
53
54         asm volatile("ldstub    [%1], %0"
55                 : "=r" (result)
56                 : "r" (lock)
57                 : "memory");
58
59         return (result == 0) ? 0 : EBUSY;
60 }
61
62 static inline void __spin_unlock(spinlock_t *lock)
63 {
64         asm volatile("":::"memory");
65         *lock = 0;
66 }
67
68 static inline void __spin_lock_init(spinlock_t *lock)
69 {
70         *lock = 0;
71 }
72
73 static inline int __spin_is_locked(spinlock_t *lock)
74 {
75         return (*lock != 0);
76 }
77
78 #elif defined(POWERPC_SPINLOCKS) 
79
80 static inline int __spin_trylock(spinlock_t *lock)
81 {
82         unsigned int result;
83
84         __asm__ __volatile__(
85 "1:     lwarx           %0,0,%1\n\
86         cmpwi           0,%0,0\n\
87         li              %0,0\n\
88         bne-            2f\n\
89         li              %0,1\n\
90         stwcx.          %0,0,%1\n\
91         bne-            1b\n\
92         isync\n\
93 2:"     : "=&r"(result)
94         : "r"(lock)
95         : "cr0", "memory");
96
97         return (result == 1) ? 0 : EBUSY;
98 }
99
100 static inline void __spin_unlock(spinlock_t *lock)
101 {
102         asm volatile("eieio":::"memory");
103         *lock = 0;
104 }
105
106 static inline void __spin_lock_init(spinlock_t *lock)
107 {
108         *lock = 0;
109 }
110
111 static inline int __spin_is_locked(spinlock_t *lock)
112 {
113         return (*lock != 0);
114 }
115
116 #elif defined(INTEL_SPINLOCKS) 
117
118 static inline int __spin_trylock(spinlock_t *lock)
119 {
120         int oldval;
121
122         asm volatile("xchgl %0,%1"
123                 : "=r" (oldval), "=m" (*lock)
124                 : "0" (0)
125                 : "memory");
126
127         return oldval > 0 ? 0 : EBUSY;
128 }
129
130 static inline void __spin_unlock(spinlock_t *lock)
131 {
132         asm volatile("":::"memory");
133         *lock = 1;
134 }
135
136 static inline void __spin_lock_init(spinlock_t *lock)
137 {
138         *lock = 1;
139 }
140
141 static inline int __spin_is_locked(spinlock_t *lock)
142 {
143         return (*lock != 1);
144 }
145
146 #elif defined(MIPS_SPINLOCKS) 
147
148 static inline unsigned int load_linked(unsigned long addr)
149 {
150         unsigned int res;
151
152         __asm__ __volatile__("ll\t%0,(%1)"
153                 : "=r" (res)
154                 : "r" (addr));
155
156         return res;
157 }
158
159 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
160 {
161         unsigned int res;
162
163         __asm__ __volatile__("sc\t%0,(%2)"
164                 : "=r" (res)
165                 : "0" (value), "r" (addr));
166         return res;
167 }
168
169 static inline int __spin_trylock(spinlock_t *lock)
170 {
171         unsigned int mw;
172
173         do {
174                 mw = load_linked(lock);
175                 if (mw) 
176                         return EBUSY;
177         } while (!store_conditional(lock, 1));
178
179         asm volatile("":::"memory");
180
181         return 0;
182 }
183
184 static inline void __spin_unlock(spinlock_t *lock)
185 {
186         asm volatile("":::"memory");
187         *lock = 0;
188 }
189
190 static inline void __spin_lock_init(spinlock_t *lock)
191 {
192         *lock = 0;
193 }
194
195 static inline int __spin_is_locked(spinlock_t *lock)
196 {
197         return (*lock != 0);
198 }
199
200 #else
201 #error Need to implement spinlock code in spinlock.c
202 #endif
203
204 /*
205  * OS SPECIFIC
206  */
207
208 static void yield_cpu(void)
209 {
210         struct timespec tm;
211
212 #ifdef USE_SCHED_YIELD
213         sched_yield();
214 #else
215         /* Linux will busy loop for delays < 2ms on real time tasks */
216         tm.tv_sec = 0;
217         tm.tv_nsec = 2000000L + 1;
218         nanosleep(&tm, NULL);
219 #endif
220 }
221
222 static int this_is_smp(void)
223 {
224         return 0;
225 }
226
227 /*
228  * GENERIC
229  */
230
231 static int smp_machine = 0;
232
233 static inline void __spin_lock(spinlock_t *lock)
234 {
235         int ntries = 0;
236
237         while(__spin_trylock(lock)) {
238                 while(__spin_is_locked(lock)) {
239                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
240                                 continue;
241                         yield_cpu();
242                 }
243         }
244 }
245
246 static void __read_lock(tdb_rwlock_t *rwlock)
247 {
248         int ntries = 0;
249
250         while(1) {
251                 __spin_lock(&rwlock->lock);
252
253                 if (!(rwlock->count & RWLOCK_BIAS)) {
254                         rwlock->count++;
255                         __spin_unlock(&rwlock->lock);
256                         return;
257                 }
258         
259                 __spin_unlock(&rwlock->lock);
260
261                 while(rwlock->count & RWLOCK_BIAS) {
262                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
263                                 continue;
264                         yield_cpu();
265                 }
266         }
267 }
268
269 static void __write_lock(tdb_rwlock_t *rwlock)
270 {
271         int ntries = 0;
272
273         while(1) {
274                 __spin_lock(&rwlock->lock);
275
276                 if (rwlock->count == 0) {
277                         rwlock->count |= RWLOCK_BIAS;
278                         __spin_unlock(&rwlock->lock);
279                         return;
280                 }
281
282                 __spin_unlock(&rwlock->lock);
283
284                 while(rwlock->count != 0) {
285                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
286                                 continue;
287                         yield_cpu();
288                 }
289         }
290 }
291
292 static void __write_unlock(tdb_rwlock_t *rwlock)
293 {
294         __spin_lock(&rwlock->lock);
295
296 #ifdef DEBUG
297         if (!(rwlock->count & RWLOCK_BIAS))
298                 fprintf(stderr, "bug: write_unlock\n");
299 #endif
300
301         rwlock->count &= ~RWLOCK_BIAS;
302         __spin_unlock(&rwlock->lock);
303 }
304
305 static void __read_unlock(tdb_rwlock_t *rwlock)
306 {
307         __spin_lock(&rwlock->lock);
308
309 #ifdef DEBUG
310         if (!rwlock->count)
311                 fprintf(stderr, "bug: read_unlock\n");
312
313         if (rwlock->count & RWLOCK_BIAS)
314                 fprintf(stderr, "bug: read_unlock\n");
315 #endif
316
317         rwlock->count--;
318         __spin_unlock(&rwlock->lock);
319 }
320
321 /* TDB SPECIFIC */
322
323 /* lock a list in the database. list -1 is the alloc list */
324 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
325 {
326         tdb_rwlock_t *rwlocks;
327
328         if (!tdb->map_ptr) return -1;
329         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
330
331         switch(rw_type) {
332         case F_RDLCK:
333                 __read_lock(&rwlocks[list+1]);
334                 break;
335
336         case F_WRLCK:
337                 __write_lock(&rwlocks[list+1]);
338                 break;
339
340         default:
341                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
342         }
343         return 0;
344 }
345
346 /* unlock the database. */
347 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
348 {
349         tdb_rwlock_t *rwlocks;
350
351         if (!tdb->map_ptr) return -1;
352         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
353
354         switch(rw_type) {
355         case F_RDLCK:
356                 __read_unlock(&rwlocks[list+1]);
357                 break;
358
359         case F_WRLCK:
360                 __write_unlock(&rwlocks[list+1]);
361                 break;
362
363         default:
364                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
365         }
366
367         return 0;
368 }
369
370 int tdb_create_rwlocks(int fd, unsigned int hash_size)
371 {
372         unsigned size, i;
373         tdb_rwlock_t *rwlocks;
374
375         size = (hash_size + 1) * sizeof(tdb_rwlock_t);
376         rwlocks = malloc(size);
377         if (!rwlocks)
378                 return -1;
379
380         for(i = 0; i < hash_size+1; i++) {
381                 __spin_lock_init(&rwlocks[i].lock);
382                 rwlocks[i].count = 0;
383         }
384
385         /* Write it out (appending to end) */
386         if (write(fd, rwlocks, size) != size) {
387                 free(rwlocks);
388                 return -1;
389         }
390         smp_machine = this_is_smp();
391         free(rwlocks);
392         return 0;
393 }
394
395 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
396 {
397         tdb_rwlock_t *rwlocks;
398         unsigned i;
399
400         if (tdb->header.rwlocks == 0) return 0;
401         if (!tdb->map_ptr) return -1;
402
403         /* We're mmapped here */
404         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
405         for(i = 0; i < tdb->header.hash_size+1; i++) {
406                 __spin_lock_init(&rwlocks[i].lock);
407                 rwlocks[i].count = 0;
408         }
409         return 0;
410 }
411 #else
412 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
413 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
414 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
415
416 /* Non-spinlock version: remove spinlock pointer */
417 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
418 {
419         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
420                                 - (char *)&tdb->header);
421
422         tdb->header.rwlocks = 0;
423         if (lseek(tdb->fd, off, SEEK_SET) != off
424             || write(tdb->fd, (void *)&tdb->header.rwlocks,
425                      sizeof(tdb->header.rwlocks)) 
426             != sizeof(tdb->header.rwlocks))
427                 return -1;
428         return 0;
429 }
430 #endif