chiark / gitweb /
4b17b1d9431e6e5022fe1d59e096c07e34945f46
[elogind.git] / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    Samba database functions
4    Copyright (C) Anton Blanchard                   2001
5    
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10    
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15    
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 /* udev defines */
21 #define STANDALONE
22 #define TDB_DEBUG
23 #define HAVE_MMAP       1
24
25
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29
30 #ifdef STANDALONE
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
42
43 #define DEBUG
44 #else
45 #include "includes.h"
46 #endif
47
48 #ifdef USE_SPINLOCKS
49
50 /*
51  * ARCH SPECIFIC
52  */
53
54 #if defined(SPARC_SPINLOCKS)
55
56 static inline int __spin_trylock(spinlock_t *lock)
57 {
58         unsigned int result;
59
60         asm volatile("ldstub    [%1], %0"
61                 : "=r" (result)
62                 : "r" (lock)
63                 : "memory");
64
65         return (result == 0) ? 0 : EBUSY;
66 }
67
68 static inline void __spin_unlock(spinlock_t *lock)
69 {
70         asm volatile("":::"memory");
71         *lock = 0;
72 }
73
74 static inline void __spin_lock_init(spinlock_t *lock)
75 {
76         *lock = 0;
77 }
78
79 static inline int __spin_is_locked(spinlock_t *lock)
80 {
81         return (*lock != 0);
82 }
83
84 #elif defined(POWERPC_SPINLOCKS) 
85
86 static inline int __spin_trylock(spinlock_t *lock)
87 {
88         unsigned int result;
89
90         __asm__ __volatile__(
91 "1:     lwarx           %0,0,%1\n\
92         cmpwi           0,%0,0\n\
93         li              %0,0\n\
94         bne-            2f\n\
95         li              %0,1\n\
96         stwcx.          %0,0,%1\n\
97         bne-            1b\n\
98         isync\n\
99 2:"     : "=&r"(result)
100         : "r"(lock)
101         : "cr0", "memory");
102
103         return (result == 1) ? 0 : EBUSY;
104 }
105
106 static inline void __spin_unlock(spinlock_t *lock)
107 {
108         asm volatile("eieio":::"memory");
109         *lock = 0;
110 }
111
112 static inline void __spin_lock_init(spinlock_t *lock)
113 {
114         *lock = 0;
115 }
116
117 static inline int __spin_is_locked(spinlock_t *lock)
118 {
119         return (*lock != 0);
120 }
121
122 #elif defined(INTEL_SPINLOCKS) 
123
124 static inline int __spin_trylock(spinlock_t *lock)
125 {
126         int oldval;
127
128         asm volatile("xchgl %0,%1"
129                 : "=r" (oldval), "=m" (*lock)
130                 : "0" (0)
131                 : "memory");
132
133         return oldval > 0 ? 0 : EBUSY;
134 }
135
136 static inline void __spin_unlock(spinlock_t *lock)
137 {
138         asm volatile("":::"memory");
139         *lock = 1;
140 }
141
142 static inline void __spin_lock_init(spinlock_t *lock)
143 {
144         *lock = 1;
145 }
146
147 static inline int __spin_is_locked(spinlock_t *lock)
148 {
149         return (*lock != 1);
150 }
151
152 #elif defined(MIPS_SPINLOCKS) 
153
154 static inline unsigned int load_linked(unsigned long addr)
155 {
156         unsigned int res;
157
158         __asm__ __volatile__("ll\t%0,(%1)"
159                 : "=r" (res)
160                 : "r" (addr));
161
162         return res;
163 }
164
165 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
166 {
167         unsigned int res;
168
169         __asm__ __volatile__("sc\t%0,(%2)"
170                 : "=r" (res)
171                 : "0" (value), "r" (addr));
172         return res;
173 }
174
175 static inline int __spin_trylock(spinlock_t *lock)
176 {
177         unsigned int mw;
178
179         do {
180                 mw = load_linked(lock);
181                 if (mw) 
182                         return EBUSY;
183         } while (!store_conditional(lock, 1));
184
185         asm volatile("":::"memory");
186
187         return 0;
188 }
189
190 static inline void __spin_unlock(spinlock_t *lock)
191 {
192         asm volatile("":::"memory");
193         *lock = 0;
194 }
195
196 static inline void __spin_lock_init(spinlock_t *lock)
197 {
198         *lock = 0;
199 }
200
201 static inline int __spin_is_locked(spinlock_t *lock)
202 {
203         return (*lock != 0);
204 }
205
206 #else
207 #error Need to implement spinlock code in spinlock.c
208 #endif
209
210 /*
211  * OS SPECIFIC
212  */
213
214 static void yield_cpu(void)
215 {
216         struct timespec tm;
217
218 #ifdef USE_SCHED_YIELD
219         sched_yield();
220 #else
221         /* Linux will busy loop for delays < 2ms on real time tasks */
222         tm.tv_sec = 0;
223         tm.tv_nsec = 2000000L + 1;
224         nanosleep(&tm, NULL);
225 #endif
226 }
227
228 static int this_is_smp(void)
229 {
230         return 0;
231 }
232
233 /*
234  * GENERIC
235  */
236
237 static int smp_machine = 0;
238
239 static inline void __spin_lock(spinlock_t *lock)
240 {
241         int ntries = 0;
242
243         while(__spin_trylock(lock)) {
244                 while(__spin_is_locked(lock)) {
245                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
246                                 continue;
247                         yield_cpu();
248                 }
249         }
250 }
251
252 static void __read_lock(tdb_rwlock_t *rwlock)
253 {
254         int ntries = 0;
255
256         while(1) {
257                 __spin_lock(&rwlock->lock);
258
259                 if (!(rwlock->count & RWLOCK_BIAS)) {
260                         rwlock->count++;
261                         __spin_unlock(&rwlock->lock);
262                         return;
263                 }
264         
265                 __spin_unlock(&rwlock->lock);
266
267                 while(rwlock->count & RWLOCK_BIAS) {
268                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
269                                 continue;
270                         yield_cpu();
271                 }
272         }
273 }
274
275 static void __write_lock(tdb_rwlock_t *rwlock)
276 {
277         int ntries = 0;
278
279         while(1) {
280                 __spin_lock(&rwlock->lock);
281
282                 if (rwlock->count == 0) {
283                         rwlock->count |= RWLOCK_BIAS;
284                         __spin_unlock(&rwlock->lock);
285                         return;
286                 }
287
288                 __spin_unlock(&rwlock->lock);
289
290                 while(rwlock->count != 0) {
291                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
292                                 continue;
293                         yield_cpu();
294                 }
295         }
296 }
297
298 static void __write_unlock(tdb_rwlock_t *rwlock)
299 {
300         __spin_lock(&rwlock->lock);
301
302 #ifdef DEBUG
303         if (!(rwlock->count & RWLOCK_BIAS))
304                 fprintf(stderr, "bug: write_unlock\n");
305 #endif
306
307         rwlock->count &= ~RWLOCK_BIAS;
308         __spin_unlock(&rwlock->lock);
309 }
310
311 static void __read_unlock(tdb_rwlock_t *rwlock)
312 {
313         __spin_lock(&rwlock->lock);
314
315 #ifdef DEBUG
316         if (!rwlock->count)
317                 fprintf(stderr, "bug: read_unlock\n");
318
319         if (rwlock->count & RWLOCK_BIAS)
320                 fprintf(stderr, "bug: read_unlock\n");
321 #endif
322
323         rwlock->count--;
324         __spin_unlock(&rwlock->lock);
325 }
326
327 /* TDB SPECIFIC */
328
329 /* lock a list in the database. list -1 is the alloc list */
330 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
331 {
332         tdb_rwlock_t *rwlocks;
333
334         if (!tdb->map_ptr) return -1;
335         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
336
337         switch(rw_type) {
338         case F_RDLCK:
339                 __read_lock(&rwlocks[list+1]);
340                 break;
341
342         case F_WRLCK:
343                 __write_lock(&rwlocks[list+1]);
344                 break;
345
346         default:
347                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
348         }
349         return 0;
350 }
351
352 /* unlock the database. */
353 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
354 {
355         tdb_rwlock_t *rwlocks;
356
357         if (!tdb->map_ptr) return -1;
358         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
359
360         switch(rw_type) {
361         case F_RDLCK:
362                 __read_unlock(&rwlocks[list+1]);
363                 break;
364
365         case F_WRLCK:
366                 __write_unlock(&rwlocks[list+1]);
367                 break;
368
369         default:
370                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
371         }
372
373         return 0;
374 }
375
376 int tdb_create_rwlocks(int fd, unsigned int hash_size)
377 {
378         unsigned size, i;
379         tdb_rwlock_t *rwlocks;
380
381         size = (hash_size + 1) * sizeof(tdb_rwlock_t);
382         rwlocks = malloc(size);
383         if (!rwlocks)
384                 return -1;
385
386         for(i = 0; i < hash_size+1; i++) {
387                 __spin_lock_init(&rwlocks[i].lock);
388                 rwlocks[i].count = 0;
389         }
390
391         /* Write it out (appending to end) */
392         if (write(fd, rwlocks, size) != size) {
393                 free(rwlocks);
394                 return -1;
395         }
396         smp_machine = this_is_smp();
397         free(rwlocks);
398         return 0;
399 }
400
401 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
402 {
403         tdb_rwlock_t *rwlocks;
404         unsigned i;
405
406         if (tdb->header.rwlocks == 0) return 0;
407         if (!tdb->map_ptr) return -1;
408
409         /* We're mmapped here */
410         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
411         for(i = 0; i < tdb->header.hash_size+1; i++) {
412                 __spin_lock_init(&rwlocks[i].lock);
413                 rwlocks[i].count = 0;
414         }
415         return 0;
416 }
417 #else
418 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
419 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
420 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
421
422 /* Non-spinlock version: remove spinlock pointer */
423 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
424 {
425         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
426                                 - (char *)&tdb->header);
427
428         tdb->header.rwlocks = 0;
429         if (lseek(tdb->fd, off, SEEK_SET) != off
430             || write(tdb->fd, (void *)&tdb->header.rwlocks,
431                      sizeof(tdb->header.rwlocks)) 
432             != sizeof(tdb->header.rwlocks))
433                 return -1;
434         return 0;
435 }
436 #endif