2 Unix SMB/CIFS implementation.
3 Samba database functions
4 Copyright (C) Anton Blanchard 2001
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
53 #if defined(SPARC_SPINLOCKS)
55 static inline int __spin_trylock(spinlock_t *lock)
59 asm volatile("ldstub [%1], %0"
64 return (result == 0) ? 0 : EBUSY;
67 static inline void __spin_unlock(spinlock_t *lock)
69 asm volatile("":::"memory");
73 static inline void __spin_lock_init(spinlock_t *lock)
78 static inline int __spin_is_locked(spinlock_t *lock)
83 #elif defined(POWERPC_SPINLOCKS)
85 static inline int __spin_trylock(spinlock_t *lock)
102 return (result == 1) ? 0 : EBUSY;
105 static inline void __spin_unlock(spinlock_t *lock)
107 asm volatile("eieio":::"memory");
111 static inline void __spin_lock_init(spinlock_t *lock)
116 static inline int __spin_is_locked(spinlock_t *lock)
121 #elif defined(INTEL_SPINLOCKS)
123 static inline int __spin_trylock(spinlock_t *lock)
127 asm volatile("xchgl %0,%1"
128 : "=r" (oldval), "=m" (*lock)
132 return oldval > 0 ? 0 : EBUSY;
135 static inline void __spin_unlock(spinlock_t *lock)
137 asm volatile("":::"memory");
141 static inline void __spin_lock_init(spinlock_t *lock)
146 static inline int __spin_is_locked(spinlock_t *lock)
151 #elif defined(MIPS_SPINLOCKS)
153 static inline unsigned int load_linked(unsigned long addr)
157 __asm__ __volatile__("ll\t%0,(%1)"
164 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
168 __asm__ __volatile__("sc\t%0,(%2)"
170 : "0" (value), "r" (addr));
174 static inline int __spin_trylock(spinlock_t *lock)
179 mw = load_linked(lock);
182 } while (!store_conditional(lock, 1));
184 asm volatile("":::"memory");
189 static inline void __spin_unlock(spinlock_t *lock)
191 asm volatile("":::"memory");
195 static inline void __spin_lock_init(spinlock_t *lock)
200 static inline int __spin_is_locked(spinlock_t *lock)
206 #error Need to implement spinlock code in spinlock.c
213 static void yield_cpu(void)
217 #ifdef USE_SCHED_YIELD
220 /* Linux will busy loop for delays < 2ms on real time tasks */
222 tm.tv_nsec = 2000000L + 1;
223 nanosleep(&tm, NULL);
227 static int this_is_smp(void)
236 static int smp_machine = 0;
238 static inline void __spin_lock(spinlock_t *lock)
242 while(__spin_trylock(lock)) {
243 while(__spin_is_locked(lock)) {
244 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
251 static void __read_lock(tdb_rwlock_t *rwlock)
256 __spin_lock(&rwlock->lock);
258 if (!(rwlock->count & RWLOCK_BIAS)) {
260 __spin_unlock(&rwlock->lock);
264 __spin_unlock(&rwlock->lock);
266 while(rwlock->count & RWLOCK_BIAS) {
267 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
274 static void __write_lock(tdb_rwlock_t *rwlock)
279 __spin_lock(&rwlock->lock);
281 if (rwlock->count == 0) {
282 rwlock->count |= RWLOCK_BIAS;
283 __spin_unlock(&rwlock->lock);
287 __spin_unlock(&rwlock->lock);
289 while(rwlock->count != 0) {
290 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
297 static void __write_unlock(tdb_rwlock_t *rwlock)
299 __spin_lock(&rwlock->lock);
301 if (!(rwlock->count & RWLOCK_BIAS))
302 dbg("bug: write_unlock");
304 rwlock->count &= ~RWLOCK_BIAS;
305 __spin_unlock(&rwlock->lock);
308 static void __read_unlock(tdb_rwlock_t *rwlock)
310 __spin_lock(&rwlock->lock);
313 dbg("bug: read_unlock");
315 if (rwlock->count & RWLOCK_BIAS)
316 dbg("bug: read_unlock");
319 __spin_unlock(&rwlock->lock);
324 /* lock a list in the database. list -1 is the alloc list */
325 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
327 tdb_rwlock_t *rwlocks;
329 if (!tdb->map_ptr) return -1;
330 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
334 __read_lock(&rwlocks[list+1]);
338 __write_lock(&rwlocks[list+1]);
342 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
347 /* unlock the database. */
348 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
350 tdb_rwlock_t *rwlocks;
352 if (!tdb->map_ptr) return -1;
353 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
357 __read_unlock(&rwlocks[list+1]);
361 __write_unlock(&rwlocks[list+1]);
365 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
371 int tdb_create_rwlocks(int fd, unsigned int hash_size)
374 tdb_rwlock_t *rwlocks;
376 size = (hash_size + 1) * sizeof(tdb_rwlock_t);
377 rwlocks = malloc(size);
381 for(i = 0; i < hash_size+1; i++) {
382 __spin_lock_init(&rwlocks[i].lock);
383 rwlocks[i].count = 0;
386 /* Write it out (appending to end) */
387 if (write(fd, rwlocks, size) != size) {
391 smp_machine = this_is_smp();
396 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
398 tdb_rwlock_t *rwlocks;
401 if (tdb->header.rwlocks == 0) return 0;
402 if (!tdb->map_ptr) return -1;
404 /* We're mmapped here */
405 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
406 for(i = 0; i < tdb->header.hash_size+1; i++) {
407 __spin_lock_init(&rwlocks[i].lock);
408 rwlocks[i].count = 0;
413 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
414 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
415 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
417 /* Non-spinlock version: remove spinlock pointer */
418 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
420 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
421 - (char *)&tdb->header);
423 tdb->header.rwlocks = 0;
424 if (lseek(tdb->fd, off, SEEK_SET) != off
425 || write(tdb->fd, (void *)&tdb->header.rwlocks,
426 sizeof(tdb->header.rwlocks))
427 != sizeof(tdb->header.rwlocks))