chiark / gitweb /
eglibc (2.11.3-4+deb6u3) squeeze-lts; urgency=medium
[eglibc.git] / nptl / sysdeps / pthread / bits / libc-lock.h
1 /* libc-internal interface for mutex locks.  NPTL version.
2    Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public License as
7    published by the Free Software Foundation; either version 2.1 of the
8    License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; see the file COPYING.LIB.  If not,
17    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18    Boston, MA 02111-1307, USA.  */
19
20 #ifndef _BITS_LIBC_LOCK_H
21 #define _BITS_LIBC_LOCK_H 1
22
23 #include <pthread.h>
24 #define __need_NULL
25 #include <stddef.h>
26
27
28 /* Fortunately Linux now has a mean to do locking which is realtime
29    safe without the aid of the thread library.  We also need no fancy
30    options like error checking mutexes etc.  We only need simple
31    locks, maybe recursive.  This can be easily and cheaply implemented
32    using futexes.  We will use them everywhere except in ld.so since
33    ld.so might be used on old kernels with a different libc.so.  */
34 #ifdef _LIBC
35 # include <lowlevellock.h>
36 # include <tls.h>
37 # include <pthread-functions.h>
38 # include <errno.h> /* For EBUSY.  */
39 # include <gnu/option-groups.h> /* For __OPTION_EGLIBC_BIG_MACROS.  */
40 #endif
41
42 /* Mutex type.  */
43 #if defined _LIBC || defined _IO_MTSAFE_IO
44 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
45 typedef pthread_mutex_t __libc_lock_t;
46 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
47 # else
48 typedef int __libc_lock_t;
49 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
50 # endif
51 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
52 # ifdef __USE_UNIX98
53 typedef pthread_rwlock_t __libc_rwlock_t;
54 # else
55 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
56 # endif
57 #else
58 typedef struct __libc_lock_opaque__ __libc_lock_t;
59 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
60 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
61 #endif
62
63 /* Type for key to thread-specific data.  */
64 typedef pthread_key_t __libc_key_t;
65
66 /* Define a lock variable NAME with storage class CLASS.  The lock must be
67    initialized with __libc_lock_init before it can be used (or define it
68    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
69    declare a lock defined in another module.  In public structure
70    definitions you must use a pointer to the lock structure (i.e., NAME
71    begins with a `*'), because its storage size will not be known outside
72    of libc.  */
73 #define __libc_lock_define(CLASS,NAME) \
74   CLASS __libc_lock_t NAME;
75 #define __libc_rwlock_define(CLASS,NAME) \
76   CLASS __libc_rwlock_t NAME;
77 #define __libc_lock_define_recursive(CLASS,NAME) \
78   CLASS __libc_lock_recursive_t NAME;
79 #define __rtld_lock_define_recursive(CLASS,NAME) \
80   CLASS __rtld_lock_recursive_t NAME;
81
82 /* Define an initialized lock variable NAME with storage class CLASS.
83
84    For the C library we take a deeper look at the initializer.  For
85    this implementation all fields are initialized to zero.  Therefore
86    we don't initialize the variable which allows putting it into the
87    BSS section.  (Except on PA-RISC and other odd architectures, where
88    initialized locks must be set to one due to the lack of normal
89    atomic operations.) */
90
91 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
92 # if LLL_LOCK_INITIALIZER == 0
93 #  define __libc_lock_define_initialized(CLASS,NAME) \
94   CLASS __libc_lock_t NAME;
95 # else
96 #  define __libc_lock_define_initialized(CLASS,NAME) \
97   CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
98 # endif
99 #else
100 # if __LT_SPINLOCK_INIT == 0
101 #  define __libc_lock_define_initialized(CLASS,NAME) \
102   CLASS __libc_lock_t NAME;
103 # else
104 #  define __libc_lock_define_initialized(CLASS,NAME) \
105   CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
106 # endif
107 #endif
108
109 #define __libc_rwlock_define_initialized(CLASS,NAME) \
110   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
111
112 /* Define an initialized recursive lock variable NAME with storage
113    class CLASS.  */
114 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
115 # if LLL_LOCK_INITIALIZER == 0
116 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
117   CLASS __libc_lock_recursive_t NAME;
118 # else
119 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
120   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
121 # endif
122 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
123   { LLL_LOCK_INITIALIZER, 0, NULL }
124 #else
125 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
126   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
127 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
128   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
129 #endif
130
131 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
132   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
133 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
134   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
135
136 #define __rtld_lock_initialize(NAME) \
137   (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
138
139 /* If we check for a weakly referenced symbol and then perform a
140    normal jump to it te code generated for some platforms in case of
141    PIC is unnecessarily slow.  What would happen is that the function
142    is first referenced as data and then it is called indirectly
143    through the PLT.  We can make this a direct jump.  */
144 #ifdef __PIC__
145 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
146   (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
147                     _fn != NULL ? (*_fn) ARGS : ELSE; }))
148 #else
149 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
150   (FUNC != NULL ? FUNC ARGS : ELSE)
151 #endif
152
153 /* Call thread functions through the function pointer table.  */
154 #if defined SHARED && !defined NOT_IN_libc
155 # define PTFAVAIL(NAME) __libc_pthread_functions_init
156 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
157   (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
158 # define __libc_ptf_call_always(FUNC, ARGS) \
159   PTHFCT_CALL (ptr_##FUNC, ARGS)
160 #else
161 # define PTFAVAIL(NAME) (NAME != NULL)
162 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
163   __libc_maybe_call (FUNC, ARGS, ELSE)
164 # define __libc_ptf_call_always(FUNC, ARGS) \
165   FUNC ARGS
166 #endif
167
168
169 /* Initialize the named lock variable, leaving it in a consistent, unlocked
170    state.  */
171 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
172 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
173 #else
174 # define __libc_lock_init(NAME) \
175   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
176 #endif
177 #if defined SHARED && !defined NOT_IN_libc
178 /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
179    inefficient.  */
180 # define __libc_rwlock_init(NAME) \
181   (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
182 #else
183 # define __libc_rwlock_init(NAME) \
184   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
185 #endif
186
187 /* Same as last but this time we initialize a recursive mutex.  */
188 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
189 # define __libc_lock_init_recursive(NAME) \
190   ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
191 #else
192 # define __libc_lock_init_recursive(NAME) \
193   do {                                                                        \
194     if (__pthread_mutex_init != NULL)                                         \
195       {                                                                       \
196         pthread_mutexattr_t __attr;                                           \
197         __pthread_mutexattr_init (&__attr);                                   \
198         __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
199         __pthread_mutex_init (&(NAME).mutex, &__attr);                        \
200         __pthread_mutexattr_destroy (&__attr);                                \
201       }                                                                       \
202   } while (0)
203 #endif
204
205 #define __rtld_lock_init_recursive(NAME) \
206   do {                                                                        \
207     if (__pthread_mutex_init != NULL)                                         \
208       {                                                                       \
209         pthread_mutexattr_t __attr;                                           \
210         __pthread_mutexattr_init (&__attr);                                   \
211         __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
212         __pthread_mutex_init (&(NAME).mutex, &__attr);                        \
213         __pthread_mutexattr_destroy (&__attr);                                \
214       }                                                                       \
215   } while (0)
216
217 /* Finalize the named lock variable, which must be locked.  It cannot be
218    used again until __libc_lock_init is called again on it.  This must be
219    called on a lock variable before the containing storage is reused.  */
220 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
221 # define __libc_lock_fini(NAME) ((void) 0)
222 #else
223 # define __libc_lock_fini(NAME) \
224   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
225 #endif
226 #if defined SHARED && !defined NOT_IN_libc
227 # define __libc_rwlock_fini(NAME) ((void) 0)
228 #else
229 # define __libc_rwlock_fini(NAME) \
230   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
231 #endif
232
233 /* Finalize recursive named lock.  */
234 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
235 # define __libc_lock_fini_recursive(NAME) ((void) 0)
236 #else
237 # define __libc_lock_fini_recursive(NAME) \
238   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
239 #endif
240
241 /* Lock the named lock variable.  */
242 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
243 # if __OPTION_EGLIBC_BIG_MACROS != 1
244 /* EGLIBC: Declare wrapper function for a big macro if either
245    !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
246    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
247 extern void __libc_lock_lock_fn (__libc_lock_t *);
248 libc_hidden_proto (__libc_lock_lock_fn);
249 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
250 # if __OPTION_EGLIBC_BIG_MACROS
251 # define __libc_lock_lock(NAME) \
252   ({ lll_lock (NAME, LLL_PRIVATE); 0; })
253 # else
254 #  define __libc_lock_lock(NAME)                \
255   __libc_lock_lock_fn (&(NAME))
256 # endif /* __OPTION_EGLIBC_BIG_MACROS */
257 #else
258 # define __libc_lock_lock(NAME) \
259   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
260 #endif
261 #define __libc_rwlock_rdlock(NAME) \
262   __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
263 #define __libc_rwlock_wrlock(NAME) \
264   __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
265
266 /* Lock the recursive named lock variable.  */
267 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
268 # if __OPTION_EGLIBC_BIG_MACROS != 1
269 /* EGLIBC: Declare wrapper function for a big macro if either
270    !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
271    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
272 extern void __libc_lock_lock_recursive_fn (__libc_lock_recursive_t *);
273 libc_hidden_proto (__libc_lock_lock_recursive_fn);
274 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
275 # if __OPTION_EGLIBC_BIG_MACROS
276 # define __libc_lock_lock_recursive(NAME) \
277   do {                                                                        \
278     void *self = THREAD_SELF;                                                 \
279     if ((NAME).owner != self)                                                 \
280       {                                                                       \
281         lll_lock ((NAME).lock, LLL_PRIVATE);                                  \
282         (NAME).owner = self;                                                  \
283       }                                                                       \
284     ++(NAME).cnt;                                                             \
285   } while (0)
286 # else
287 # define __libc_lock_lock_recursive(NAME)                               \
288   __libc_lock_lock_recursive_fn (&(NAME))
289 # endif /* __OPTION_EGLIBC_BIG_MACROS */
290 #else
291 # define __libc_lock_lock_recursive(NAME) \
292   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
293 #endif
294
295 /* Try to lock the named lock variable.  */
296 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
297 # if __OPTION_EGLIBC_BIG_MACROS != 1
298 /* EGLIBC: Declare wrapper function for a big macro if either
299    !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
300    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
301 extern int __libc_lock_trylock_fn (__libc_lock_t *);
302 libc_hidden_proto (__libc_lock_trylock_fn);
303 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
304 # if __OPTION_EGLIBC_BIG_MACROS
305 # define __libc_lock_trylock(NAME) \
306   lll_trylock (NAME)
307 # else
308 # define __libc_lock_trylock(NAME) \
309   __libc_lock_trylock_fn (&(NAME))
310 # endif /* __OPTION_EGLIBC_BIG_MACROS */
311 #else
312 # define __libc_lock_trylock(NAME) \
313   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
314 #endif
315 #define __libc_rwlock_tryrdlock(NAME) \
316   __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
317 #define __libc_rwlock_trywrlock(NAME) \
318   __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
319
320 /* Try to lock the recursive named lock variable.  */
321 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
322 # if __OPTION_EGLIBC_BIG_MACROS != 1
323 /* EGLIBC: Declare wrapper function for a big macro if either
324    !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
325    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
326 extern int __libc_lock_trylock_recursive_fn (__libc_lock_recursive_t *);
327 libc_hidden_proto (__libc_lock_trylock_recursive_fn);
328 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
329 # if __OPTION_EGLIBC_BIG_MACROS
330 # define __libc_lock_trylock_recursive(NAME) \
331   ({                                                                          \
332     int result = 0;                                                           \
333     void *self = THREAD_SELF;                                                 \
334     if ((NAME).owner != self)                                                 \
335       {                                                                       \
336         if (lll_trylock ((NAME).lock) == 0)                                   \
337           {                                                                   \
338             (NAME).owner = self;                                              \
339             (NAME).cnt = 1;                                                   \
340           }                                                                   \
341         else                                                                  \
342           result = EBUSY;                                                     \
343       }                                                                       \
344     else                                                                      \
345       ++(NAME).cnt;                                                           \
346     result;                                                                   \
347   })
348 # else
349 # define __libc_lock_trylock_recursive(NAME) \
350   __libc_lock_trylock_recursive_fn (&(NAME))
351 # endif /* __OPTION_EGLIBC_BIG_MACROS */
352 #else
353 # define __libc_lock_trylock_recursive(NAME) \
354   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
355 #endif
356
357 #define __rtld_lock_trylock_recursive(NAME) \
358   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
359
360 /* Unlock the named lock variable.  */
361 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
362 # if __OPTION_EGLIBC_BIG_MACROS != 1
363 /* EGLIBC: Declare wrapper function for a big macro if either
364    !__OPTION_EGLIBC_BIG_MACROS, or we are using a back door from
365    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
366 extern void __libc_lock_unlock_fn (__libc_lock_t *);
367 libc_hidden_proto (__libc_lock_unlock_fn);
368 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
369 # if __OPTION_EGLIBC_BIG_MACROS
370 # define __libc_lock_unlock(NAME) \
371   lll_unlock (NAME, LLL_PRIVATE)
372 # else
373 # define __libc_lock_unlock(NAME) \
374   __libc_lock_unlock_fn (&(NAME))
375 # endif /* __OPTION_EGLIBC_BIG_MACROS */
376 #else
377 # define __libc_lock_unlock(NAME) \
378   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
379 #endif
380 #define __libc_rwlock_unlock(NAME) \
381   __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
382
383 /* Unlock the recursive named lock variable.  */
384 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
385 # if __OPTION_EGLIBC_BIG_MACROS != 1
386 /* EGLIBC: Declare wrapper function for a big macro if either
387    !__OPTION_EGLIBC_BIG_MACROS, or we are using a back door from
388    small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2).  */
389 extern void __libc_lock_unlock_recursive_fn (__libc_lock_recursive_t *);
390 libc_hidden_proto (__libc_lock_unlock_recursive_fn);
391 # endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
392 # if __OPTION_EGLIBC_BIG_MACROS
393 /* We do no error checking here.  */
394 # define __libc_lock_unlock_recursive(NAME) \
395   do {                                                                        \
396     if (--(NAME).cnt == 0)                                                    \
397       {                                                                       \
398         (NAME).owner = NULL;                                                  \
399         lll_unlock ((NAME).lock, LLL_PRIVATE);                                \
400       }                                                                       \
401   } while (0)
402 # else
403 # define __libc_lock_unlock_recursive(NAME) \
404   __libc_lock_unlock_recursive_fn (&(NAME))
405 # endif /* __OPTION_EGLIBC_BIG_MACROS */
406 #else
407 # define __libc_lock_unlock_recursive(NAME) \
408   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
409 #endif
410
411 #if defined _LIBC && defined SHARED
412 # define __rtld_lock_default_lock_recursive(lock) \
413   ++((pthread_mutex_t *)(lock))->__data.__count;
414
415 # define __rtld_lock_default_unlock_recursive(lock) \
416   --((pthread_mutex_t *)(lock))->__data.__count;
417
418 # define __rtld_lock_lock_recursive(NAME) \
419   GL(dl_rtld_lock_recursive) (&(NAME).mutex)
420
421 # define __rtld_lock_unlock_recursive(NAME) \
422   GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
423 #else
424 # define __rtld_lock_lock_recursive(NAME) \
425   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
426
427 # define __rtld_lock_unlock_recursive(NAME) \
428   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
429 #endif
430
431 /* Define once control variable.  */
432 #if PTHREAD_ONCE_INIT == 0
433 /* Special case for static variables where we can avoid the initialization
434    if it is zero.  */
435 # define __libc_once_define(CLASS, NAME) \
436   CLASS pthread_once_t NAME
437 #else
438 # define __libc_once_define(CLASS, NAME) \
439   CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
440 #endif
441
442 /* Call handler iff the first call.  */
443 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
444   do {                                                                        \
445     if (PTFAVAIL (__pthread_once))                                            \
446       __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),               \
447                                                INIT_FUNCTION));               \
448     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                           \
449       INIT_FUNCTION ();                                                       \
450       (ONCE_CONTROL) |= 2;                                                    \
451     }                                                                         \
452   } while (0)
453
454
455 /* Note that for I/O cleanup handling we are using the old-style
456    cancel handling.  It does not have to be integrated with C++ snce
457    no C++ code is called in the middle.  The old-style handling is
458    faster and the support is not going away.  */
459 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
460                                    void (*routine) (void *), void *arg);
461 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
462                                   int execute);
463 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
464                                          void (*routine) (void *), void *arg);
465 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
466                                           int execute);
467
468 /* Start critical region with cleanup.  */
469 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
470   { struct _pthread_cleanup_buffer _buffer;                                   \
471     int _avail;                                                               \
472     if (DOIT) {                                                               \
473       _avail = PTFAVAIL (_pthread_cleanup_push_defer);                        \
474       if (_avail) {                                                           \
475         __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
476                                                               ARG));          \
477       } else {                                                                \
478         _buffer.__routine = (FCT);                                            \
479         _buffer.__arg = (ARG);                                                \
480       }                                                                       \
481     } else {                                                                  \
482       _avail = 0;                                                             \
483     }
484
485 /* End critical region with cleanup.  */
486 #define __libc_cleanup_region_end(DOIT) \
487     if (_avail) {                                                             \
488       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
489     } else if (DOIT)                                                          \
490       _buffer.__routine (_buffer.__arg);                                      \
491   }
492
493 /* Sometimes we have to exit the block in the middle.  */
494 #define __libc_cleanup_end(DOIT) \
495     if (_avail) {                                                             \
496       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
497     } else if (DOIT)                                                          \
498       _buffer.__routine (_buffer.__arg)
499
500
501 /* Normal cleanup handling, based on C cleanup attribute.  */
502 __extern_inline void
503 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
504 {
505   if (f->__do_it)
506     f->__cancel_routine (f->__cancel_arg);
507 }
508
509 #define __libc_cleanup_push(fct, arg) \
510   do {                                                                        \
511     struct __pthread_cleanup_frame __clframe                                  \
512       __attribute__ ((__cleanup__ (__libc_cleanup_routine)))                  \
513       = { .__cancel_routine = (fct), .__cancel_arg = (arg),                   \
514           .__do_it = 1 };
515
516 #define __libc_cleanup_pop(execute) \
517     __clframe.__do_it = (execute);                                            \
518   } while (0)
519
520
521 /* Create thread-specific key.  */
522 #define __libc_key_create(KEY, DESTRUCTOR) \
523   __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
524
525 /* Get thread-specific data.  */
526 #define __libc_getspecific(KEY) \
527   __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
528
529 /* Set thread-specific data.  */
530 #define __libc_setspecific(KEY, VALUE) \
531   __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
532
533
534 /* Register handlers to execute before and after `fork'.  Note that the
535    last parameter is NULL.  The handlers registered by the libc are
536    never removed so this is OK.  */
537 #define __libc_atfork(PREPARE, PARENT, CHILD) \
538   __register_atfork (PREPARE, PARENT, CHILD, NULL)
539 extern int __register_atfork (void (*__prepare) (void),
540                               void (*__parent) (void),
541                               void (*__child) (void),
542                               void *__dso_handle);
543
544 /* Functions that are used by this file and are internal to the GNU C
545    library.  */
546
547 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
548                                  __const pthread_mutexattr_t *__mutex_attr);
549
550 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
551
552 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
553
554 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
555
556 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
557
558 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
559
560 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
561
562 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
563                                         int __kind);
564
565 #ifdef __USE_UNIX98
566 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
567                                   __const pthread_rwlockattr_t *__attr);
568
569 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
570
571 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
572
573 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
574
575 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
576
577 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
578
579 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
580 #endif
581
582 extern int __pthread_key_create (pthread_key_t *__key,
583                                  void (*__destr_function) (void *));
584
585 extern int __pthread_setspecific (pthread_key_t __key,
586                                   __const void *__pointer);
587
588 extern void *__pthread_getspecific (pthread_key_t __key);
589
590 extern int __pthread_once (pthread_once_t *__once_control,
591                            void (*__init_routine) (void));
592
593 extern int __pthread_atfork (void (*__prepare) (void),
594                              void (*__parent) (void),
595                              void (*__child) (void));
596
597
598
599 /* Make the pthread functions weak so that we can elide them from
600    single-threaded processes.  */
601 #ifndef __NO_WEAK_PTHREAD_ALIASES
602 # ifdef weak_extern
603 #  if _LIBC
604 #   include <bp-sym.h>
605 #  else
606 #   define BP_SYM(sym) sym
607 #  endif
608 weak_extern (BP_SYM (__pthread_mutex_init))
609 weak_extern (BP_SYM (__pthread_mutex_destroy))
610 weak_extern (BP_SYM (__pthread_mutex_lock))
611 weak_extern (BP_SYM (__pthread_mutex_trylock))
612 weak_extern (BP_SYM (__pthread_mutex_unlock))
613 weak_extern (BP_SYM (__pthread_mutexattr_init))
614 weak_extern (BP_SYM (__pthread_mutexattr_destroy))
615 weak_extern (BP_SYM (__pthread_mutexattr_settype))
616 weak_extern (BP_SYM (__pthread_rwlock_init))
617 weak_extern (BP_SYM (__pthread_rwlock_destroy))
618 weak_extern (BP_SYM (__pthread_rwlock_rdlock))
619 weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
620 weak_extern (BP_SYM (__pthread_rwlock_wrlock))
621 weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
622 weak_extern (BP_SYM (__pthread_rwlock_unlock))
623 weak_extern (BP_SYM (__pthread_key_create))
624 weak_extern (BP_SYM (__pthread_setspecific))
625 weak_extern (BP_SYM (__pthread_getspecific))
626 weak_extern (BP_SYM (__pthread_once))
627 weak_extern (__pthread_initialize)
628 weak_extern (__pthread_atfork)
629 weak_extern (BP_SYM (_pthread_cleanup_push_defer))
630 weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
631 weak_extern (BP_SYM (pthread_setcancelstate))
632 # else
633 #  pragma weak __pthread_mutex_init
634 #  pragma weak __pthread_mutex_destroy
635 #  pragma weak __pthread_mutex_lock
636 #  pragma weak __pthread_mutex_trylock
637 #  pragma weak __pthread_mutex_unlock
638 #  pragma weak __pthread_mutexattr_init
639 #  pragma weak __pthread_mutexattr_destroy
640 #  pragma weak __pthread_mutexattr_settype
641 #  pragma weak __pthread_rwlock_destroy
642 #  pragma weak __pthread_rwlock_rdlock
643 #  pragma weak __pthread_rwlock_tryrdlock
644 #  pragma weak __pthread_rwlock_wrlock
645 #  pragma weak __pthread_rwlock_trywrlock
646 #  pragma weak __pthread_rwlock_unlock
647 #  pragma weak __pthread_key_create
648 #  pragma weak __pthread_setspecific
649 #  pragma weak __pthread_getspecific
650 #  pragma weak __pthread_once
651 #  pragma weak __pthread_initialize
652 #  pragma weak __pthread_atfork
653 #  pragma weak _pthread_cleanup_push_defer
654 #  pragma weak _pthread_cleanup_pop_restore
655 #  pragma weak pthread_setcancelstate
656 # endif
657 #endif
658
659 #endif  /* bits/libc-lock.h */