chiark / gitweb /
[PATCH] add debian config files.
[elogind.git] / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    Samba database functions
4    Copyright (C) Anton Blanchard                   2001
5    
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10    
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15    
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 /* udev defines */
21 #define STANDALONE
22 #define TDB_DEBUG
23 #define HAVE_MMAP       1
24
25 #if HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #ifdef STANDALONE
30 #define _KLIBC_HAS_ARCH_SIG_ATOMIC_T
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
42 #include "../udev.h"
43 #include "../logging.h"
44
45 #else
46 #include "includes.h"
47 #endif
48
49 #ifdef USE_SPINLOCKS
50
51 /*
52  * ARCH SPECIFIC
53  */
54
55 #if defined(SPARC_SPINLOCKS)
56
57 static inline int __spin_trylock(spinlock_t *lock)
58 {
59         unsigned int result;
60
61         asm volatile("ldstub    [%1], %0"
62                 : "=r" (result)
63                 : "r" (lock)
64                 : "memory");
65
66         return (result == 0) ? 0 : EBUSY;
67 }
68
69 static inline void __spin_unlock(spinlock_t *lock)
70 {
71         asm volatile("":::"memory");
72         *lock = 0;
73 }
74
75 static inline void __spin_lock_init(spinlock_t *lock)
76 {
77         *lock = 0;
78 }
79
80 static inline int __spin_is_locked(spinlock_t *lock)
81 {
82         return (*lock != 0);
83 }
84
85 #elif defined(POWERPC_SPINLOCKS) 
86
87 static inline int __spin_trylock(spinlock_t *lock)
88 {
89         unsigned int result;
90
91         __asm__ __volatile__(
92 "1:     lwarx           %0,0,%1\n\
93         cmpwi           0,%0,0\n\
94         li              %0,0\n\
95         bne-            2f\n\
96         li              %0,1\n\
97         stwcx.          %0,0,%1\n\
98         bne-            1b\n\
99         isync\n\
100 2:"     : "=&r"(result)
101         : "r"(lock)
102         : "cr0", "memory");
103
104         return (result == 1) ? 0 : EBUSY;
105 }
106
107 static inline void __spin_unlock(spinlock_t *lock)
108 {
109         asm volatile("eieio":::"memory");
110         *lock = 0;
111 }
112
113 static inline void __spin_lock_init(spinlock_t *lock)
114 {
115         *lock = 0;
116 }
117
118 static inline int __spin_is_locked(spinlock_t *lock)
119 {
120         return (*lock != 0);
121 }
122
123 #elif defined(INTEL_SPINLOCKS) 
124
125 static inline int __spin_trylock(spinlock_t *lock)
126 {
127         int oldval;
128
129         asm volatile("xchgl %0,%1"
130                 : "=r" (oldval), "=m" (*lock)
131                 : "0" (0)
132                 : "memory");
133
134         return oldval > 0 ? 0 : EBUSY;
135 }
136
137 static inline void __spin_unlock(spinlock_t *lock)
138 {
139         asm volatile("":::"memory");
140         *lock = 1;
141 }
142
143 static inline void __spin_lock_init(spinlock_t *lock)
144 {
145         *lock = 1;
146 }
147
148 static inline int __spin_is_locked(spinlock_t *lock)
149 {
150         return (*lock != 1);
151 }
152
153 #elif defined(MIPS_SPINLOCKS) 
154
155 static inline unsigned int load_linked(unsigned long addr)
156 {
157         unsigned int res;
158
159         __asm__ __volatile__("ll\t%0,(%1)"
160                 : "=r" (res)
161                 : "r" (addr));
162
163         return res;
164 }
165
166 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
167 {
168         unsigned int res;
169
170         __asm__ __volatile__("sc\t%0,(%2)"
171                 : "=r" (res)
172                 : "0" (value), "r" (addr));
173         return res;
174 }
175
176 static inline int __spin_trylock(spinlock_t *lock)
177 {
178         unsigned int mw;
179
180         do {
181                 mw = load_linked(lock);
182                 if (mw) 
183                         return EBUSY;
184         } while (!store_conditional(lock, 1));
185
186         asm volatile("":::"memory");
187
188         return 0;
189 }
190
191 static inline void __spin_unlock(spinlock_t *lock)
192 {
193         asm volatile("":::"memory");
194         *lock = 0;
195 }
196
197 static inline void __spin_lock_init(spinlock_t *lock)
198 {
199         *lock = 0;
200 }
201
202 static inline int __spin_is_locked(spinlock_t *lock)
203 {
204         return (*lock != 0);
205 }
206
207 #else
208 #error Need to implement spinlock code in spinlock.c
209 #endif
210
211 /*
212  * OS SPECIFIC
213  */
214
215 static void yield_cpu(void)
216 {
217         struct timespec tm;
218
219 #ifdef USE_SCHED_YIELD
220         sched_yield();
221 #else
222         /* Linux will busy loop for delays < 2ms on real time tasks */
223         tm.tv_sec = 0;
224         tm.tv_nsec = 2000000L + 1;
225         nanosleep(&tm, NULL);
226 #endif
227 }
228
229 static int this_is_smp(void)
230 {
231         return 0;
232 }
233
234 /*
235  * GENERIC
236  */
237
238 static int smp_machine = 0;
239
240 static inline void __spin_lock(spinlock_t *lock)
241 {
242         int ntries = 0;
243
244         while(__spin_trylock(lock)) {
245                 while(__spin_is_locked(lock)) {
246                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
247                                 continue;
248                         yield_cpu();
249                 }
250         }
251 }
252
253 static void __read_lock(tdb_rwlock_t *rwlock)
254 {
255         int ntries = 0;
256
257         while(1) {
258                 __spin_lock(&rwlock->lock);
259
260                 if (!(rwlock->count & RWLOCK_BIAS)) {
261                         rwlock->count++;
262                         __spin_unlock(&rwlock->lock);
263                         return;
264                 }
265         
266                 __spin_unlock(&rwlock->lock);
267
268                 while(rwlock->count & RWLOCK_BIAS) {
269                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
270                                 continue;
271                         yield_cpu();
272                 }
273         }
274 }
275
276 static void __write_lock(tdb_rwlock_t *rwlock)
277 {
278         int ntries = 0;
279
280         while(1) {
281                 __spin_lock(&rwlock->lock);
282
283                 if (rwlock->count == 0) {
284                         rwlock->count |= RWLOCK_BIAS;
285                         __spin_unlock(&rwlock->lock);
286                         return;
287                 }
288
289                 __spin_unlock(&rwlock->lock);
290
291                 while(rwlock->count != 0) {
292                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
293                                 continue;
294                         yield_cpu();
295                 }
296         }
297 }
298
299 static void __write_unlock(tdb_rwlock_t *rwlock)
300 {
301         __spin_lock(&rwlock->lock);
302
303         if (!(rwlock->count & RWLOCK_BIAS))
304                 dbg("bug: write_unlock");
305
306         rwlock->count &= ~RWLOCK_BIAS;
307         __spin_unlock(&rwlock->lock);
308 }
309
310 static void __read_unlock(tdb_rwlock_t *rwlock)
311 {
312         __spin_lock(&rwlock->lock);
313
314         if (!rwlock->count)
315                 dbg("bug: read_unlock");
316
317         if (rwlock->count & RWLOCK_BIAS)
318                 dbg("bug: read_unlock");
319
320         rwlock->count--;
321         __spin_unlock(&rwlock->lock);
322 }
323
324 /* TDB SPECIFIC */
325
326 /* lock a list in the database. list -1 is the alloc list */
327 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
328 {
329         tdb_rwlock_t *rwlocks;
330
331         if (!tdb->map_ptr) return -1;
332         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
333
334         switch(rw_type) {
335         case F_RDLCK:
336                 __read_lock(&rwlocks[list+1]);
337                 break;
338
339         case F_WRLCK:
340                 __write_lock(&rwlocks[list+1]);
341                 break;
342
343         default:
344                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
345         }
346         return 0;
347 }
348
349 /* unlock the database. */
350 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
351 {
352         tdb_rwlock_t *rwlocks;
353
354         if (!tdb->map_ptr) return -1;
355         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
356
357         switch(rw_type) {
358         case F_RDLCK:
359                 __read_unlock(&rwlocks[list+1]);
360                 break;
361
362         case F_WRLCK:
363                 __write_unlock(&rwlocks[list+1]);
364                 break;
365
366         default:
367                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
368         }
369
370         return 0;
371 }
372
373 int tdb_create_rwlocks(int fd, unsigned int hash_size)
374 {
375         unsigned size, i;
376         tdb_rwlock_t *rwlocks;
377
378         size = (hash_size + 1) * sizeof(tdb_rwlock_t);
379         rwlocks = malloc(size);
380         if (!rwlocks)
381                 return -1;
382
383         for(i = 0; i < hash_size+1; i++) {
384                 __spin_lock_init(&rwlocks[i].lock);
385                 rwlocks[i].count = 0;
386         }
387
388         /* Write it out (appending to end) */
389         if (write(fd, rwlocks, size) != size) {
390                 free(rwlocks);
391                 return -1;
392         }
393         smp_machine = this_is_smp();
394         free(rwlocks);
395         return 0;
396 }
397
398 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
399 {
400         tdb_rwlock_t *rwlocks;
401         unsigned i;
402
403         if (tdb->header.rwlocks == 0) return 0;
404         if (!tdb->map_ptr) return -1;
405
406         /* We're mmapped here */
407         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
408         for(i = 0; i < tdb->header.hash_size+1; i++) {
409                 __spin_lock_init(&rwlocks[i].lock);
410                 rwlocks[i].count = 0;
411         }
412         return 0;
413 }
414 #else
415 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
416 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
417 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
418
419 /* Non-spinlock version: remove spinlock pointer */
420 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
421 {
422         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
423                                 - (char *)&tdb->header);
424
425         tdb->header.rwlocks = 0;
426         if (lseek(tdb->fd, off, SEEK_SET) != off
427             || write(tdb->fd, (void *)&tdb->header.rwlocks,
428                      sizeof(tdb->header.rwlocks)) 
429             != sizeof(tdb->header.rwlocks))
430                 return -1;
431         return 0;
432 }
433 #endif