1 /* Copyright (C) 2002-2004, 2006, 2007, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 #include <tls.h> /* For tcbhead_t. */
24 typedef int8_t atomic8_t;
25 typedef uint8_t uatomic8_t;
26 typedef int_fast8_t atomic_fast8_t;
27 typedef uint_fast8_t uatomic_fast8_t;
29 typedef int16_t atomic16_t;
30 typedef uint16_t uatomic16_t;
31 typedef int_fast16_t atomic_fast16_t;
32 typedef uint_fast16_t uatomic_fast16_t;
34 typedef int32_t atomic32_t;
35 typedef uint32_t uatomic32_t;
36 typedef int_fast32_t atomic_fast32_t;
37 typedef uint_fast32_t uatomic_fast32_t;
39 typedef int64_t atomic64_t;
40 typedef uint64_t uatomic64_t;
41 typedef int_fast64_t atomic_fast64_t;
42 typedef uint_fast64_t uatomic_fast64_t;
44 typedef intptr_t atomicptr_t;
45 typedef uintptr_t uatomicptr_t;
46 typedef intmax_t atomic_max_t;
47 typedef uintmax_t uatomic_max_t;
52 # define LOCK_PREFIX /* nothing */
54 # define LOCK_PREFIX "lock;"
59 #if __GNUC_PREREQ (4, 1)
60 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
61 __sync_val_compare_and_swap (mem, oldval, newval)
62 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
63 (! __sync_bool_compare_and_swap (mem, oldval, newval))
65 # define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
66 ({ __typeof (*mem) ret; \
67 __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \
68 : "=a" (ret), "=m" (*mem) \
69 : "q" (newval), "m" (*mem), "0" (oldval)); \
72 # define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
73 ({ __typeof (*mem) ret; \
74 __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \
75 : "=a" (ret), "=m" (*mem) \
76 : "r" (newval), "m" (*mem), "0" (oldval)); \
79 # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
80 ({ __typeof (*mem) ret; \
81 __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \
82 : "=a" (ret), "=m" (*mem) \
83 : "r" (newval), "m" (*mem), "0" (oldval)); \
88 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
89 ({ __typeof (*mem) ret; \
90 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
93 "0:\tcmpxchgb %b2, %1" \
94 : "=a" (ret), "=m" (*mem) \
95 : "q" (newval), "m" (*mem), "0" (oldval), \
96 "i" (offsetof (tcbhead_t, multiple_threads))); \
99 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
100 ({ __typeof (*mem) ret; \
101 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
104 "0:\tcmpxchgw %w2, %1" \
105 : "=a" (ret), "=m" (*mem) \
106 : "r" (newval), "m" (*mem), "0" (oldval), \
107 "i" (offsetof (tcbhead_t, multiple_threads))); \
110 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
111 ({ __typeof (*mem) ret; \
112 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
115 "0:\tcmpxchgl %2, %1" \
116 : "=a" (ret), "=m" (*mem) \
117 : "r" (newval), "m" (*mem), "0" (oldval), \
118 "i" (offsetof (tcbhead_t, multiple_threads))); \
121 /* XXX We do not really need 64-bit compare-and-exchange. At least
122 not in the moment. Using it would mean causing portability
123 problems since not many other 32-bit architectures have support for
124 such an operation. So don't define any code for now. If it is
125 really going to be used the code below can be used on Intel Pentium
126 and later, but NOT on i486. */
128 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
129 ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
130 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
131 ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
134 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
135 ({ __typeof (*mem) ret; \
136 __asm __volatile ("xchgl %2, %%ebx\n\t" \
137 LOCK_PREFIX "cmpxchg8b %1\n\t" \
139 : "=A" (ret), "=m" (*mem) \
140 : "DS" (((unsigned long long int) (newval)) \
142 "c" (((unsigned long long int) (newval)) >> 32), \
143 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
145 "d" (((unsigned long long int) (oldval)) >> 32)); \
148 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
149 ({ __typeof (*mem) ret; \
150 __asm __volatile ("xchgl %2, %%ebx\n\t" \
151 "cmpl $0, %%gs:%P7\n\t" \
154 "0:\tcmpxchg8b %1\n\t" \
156 : "=A" (ret), "=m" (*mem) \
157 : "DS" (((unsigned long long int) (newval)) \
159 "c" (((unsigned long long int) (newval)) >> 32), \
160 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
162 "d" (((unsigned long long int) (oldval)) >> 32), \
163 "i" (offsetof (tcbhead_t, multiple_threads))); \
166 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
167 ({ __typeof (*mem) ret; \
168 __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \
169 : "=A" (ret), "=m" (*mem) \
170 : "b" (((unsigned long long int) (newval)) \
172 "c" (((unsigned long long int) (newval)) >> 32), \
173 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
175 "d" (((unsigned long long int) (oldval)) >> 32)); \
178 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
179 ({ __typeof (*mem) ret; \
180 __asm __volatile ("cmpl $0, %%gs:%P7\n\t" \
184 : "=A" (ret), "=m" (*mem) \
185 : "b" (((unsigned long long int) (newval)) \
187 "c" (((unsigned long long int) (newval)) >> 32), \
188 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
190 "d" (((unsigned long long int) (oldval)) >> 32), \
191 "i" (offsetof (tcbhead_t, multiple_threads))); \
197 /* Note that we need no lock prefix. */
198 #define atomic_exchange_acq(mem, newvalue) \
199 ({ __typeof (*mem) result; \
200 if (sizeof (*mem) == 1) \
201 __asm __volatile ("xchgb %b0, %1" \
202 : "=q" (result), "=m" (*mem) \
203 : "0" (newvalue), "m" (*mem)); \
204 else if (sizeof (*mem) == 2) \
205 __asm __volatile ("xchgw %w0, %1" \
206 : "=r" (result), "=m" (*mem) \
207 : "0" (newvalue), "m" (*mem)); \
208 else if (sizeof (*mem) == 4) \
209 __asm __volatile ("xchgl %0, %1" \
210 : "=r" (result), "=m" (*mem) \
211 : "0" (newvalue), "m" (*mem)); \
220 #define __arch_exchange_and_add_body(lock, pfx, mem, value) \
221 ({ __typeof (*mem) __result; \
222 __typeof (value) __addval = (value); \
223 if (sizeof (*mem) == 1) \
224 __asm __volatile (lock "xaddb %b0, %1" \
225 : "=q" (__result), "=m" (*mem) \
226 : "0" (__addval), "m" (*mem), \
227 "i" (offsetof (tcbhead_t, multiple_threads))); \
228 else if (sizeof (*mem) == 2) \
229 __asm __volatile (lock "xaddw %w0, %1" \
230 : "=r" (__result), "=m" (*mem) \
231 : "0" (__addval), "m" (*mem), \
232 "i" (offsetof (tcbhead_t, multiple_threads))); \
233 else if (sizeof (*mem) == 4) \
234 __asm __volatile (lock "xaddl %0, %1" \
235 : "=r" (__result), "=m" (*mem) \
236 : "0" (__addval), "m" (*mem), \
237 "i" (offsetof (tcbhead_t, multiple_threads))); \
240 __typeof (mem) __memp = (mem); \
241 __typeof (*mem) __tmpval; \
242 __result = *__memp; \
244 __tmpval = __result; \
245 while ((__result = pfx##_compare_and_exchange_val_64_acq \
246 (__memp, __result + __addval, __result)) == __tmpval); \
250 #if __GNUC_PREREQ (4, 1)
251 # define atomic_exchange_and_add(mem, value) \
252 __sync_fetch_and_add (mem, value)
254 # define atomic_exchange_and_add(mem, value) \
255 __arch_exchange_and_add_body (LOCK_PREFIX, __arch, mem, value)
258 #define __arch_exchange_and_add_cprefix \
259 "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
261 #define catomic_exchange_and_add(mem, value) \
262 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \
266 #define __arch_add_body(lock, pfx, mem, value) \
268 if (__builtin_constant_p (value) && (value) == 1) \
269 atomic_increment (mem); \
270 else if (__builtin_constant_p (value) && (value) == -1) \
271 atomic_decrement (mem); \
272 else if (sizeof (*mem) == 1) \
273 __asm __volatile (lock "addb %b1, %0" \
275 : "iq" (value), "m" (*mem), \
276 "i" (offsetof (tcbhead_t, multiple_threads))); \
277 else if (sizeof (*mem) == 2) \
278 __asm __volatile (lock "addw %w1, %0" \
280 : "ir" (value), "m" (*mem), \
281 "i" (offsetof (tcbhead_t, multiple_threads))); \
282 else if (sizeof (*mem) == 4) \
283 __asm __volatile (lock "addl %1, %0" \
285 : "ir" (value), "m" (*mem), \
286 "i" (offsetof (tcbhead_t, multiple_threads))); \
289 __typeof (value) __addval = (value); \
290 __typeof (mem) __memp = (mem); \
291 __typeof (*mem) __oldval = *__memp; \
292 __typeof (*mem) __tmpval; \
294 __tmpval = __oldval; \
295 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
296 (__memp, __oldval + __addval, __oldval)) == __tmpval); \
300 #define atomic_add(mem, value) \
301 __arch_add_body (LOCK_PREFIX, __arch, mem, value)
303 #define __arch_add_cprefix \
304 "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
306 #define catomic_add(mem, value) \
307 __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
310 #define atomic_add_negative(mem, value) \
311 ({ unsigned char __result; \
312 if (sizeof (*mem) == 1) \
313 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
314 : "=m" (*mem), "=qm" (__result) \
315 : "iq" (value), "m" (*mem)); \
316 else if (sizeof (*mem) == 2) \
317 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
318 : "=m" (*mem), "=qm" (__result) \
319 : "ir" (value), "m" (*mem)); \
320 else if (sizeof (*mem) == 4) \
321 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
322 : "=m" (*mem), "=qm" (__result) \
323 : "ir" (value), "m" (*mem)); \
329 #define atomic_add_zero(mem, value) \
330 ({ unsigned char __result; \
331 if (sizeof (*mem) == 1) \
332 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
333 : "=m" (*mem), "=qm" (__result) \
334 : "iq" (value), "m" (*mem)); \
335 else if (sizeof (*mem) == 2) \
336 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
337 : "=m" (*mem), "=qm" (__result) \
338 : "ir" (value), "m" (*mem)); \
339 else if (sizeof (*mem) == 4) \
340 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
341 : "=m" (*mem), "=qm" (__result) \
342 : "ir" (value), "m" (*mem)); \
348 #define __arch_increment_body(lock, pfx, mem) \
350 if (sizeof (*mem) == 1) \
351 __asm __volatile (lock "incb %b0" \
354 "i" (offsetof (tcbhead_t, multiple_threads))); \
355 else if (sizeof (*mem) == 2) \
356 __asm __volatile (lock "incw %w0" \
359 "i" (offsetof (tcbhead_t, multiple_threads))); \
360 else if (sizeof (*mem) == 4) \
361 __asm __volatile (lock "incl %0" \
364 "i" (offsetof (tcbhead_t, multiple_threads))); \
367 __typeof (mem) __memp = (mem); \
368 __typeof (*mem) __oldval = *__memp; \
369 __typeof (*mem) __tmpval; \
371 __tmpval = __oldval; \
372 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
373 (__memp, __oldval + 1, __oldval)) == __tmpval); \
377 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
379 #define __arch_increment_cprefix \
380 "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
382 #define catomic_increment(mem) \
383 __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
386 #define atomic_increment_and_test(mem) \
387 ({ unsigned char __result; \
388 if (sizeof (*mem) == 1) \
389 __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \
390 : "=m" (*mem), "=qm" (__result) \
392 else if (sizeof (*mem) == 2) \
393 __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \
394 : "=m" (*mem), "=qm" (__result) \
396 else if (sizeof (*mem) == 4) \
397 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
398 : "=m" (*mem), "=qm" (__result) \
405 #define __arch_decrement_body(lock, pfx, mem) \
407 if (sizeof (*mem) == 1) \
408 __asm __volatile (lock "decb %b0" \
411 "i" (offsetof (tcbhead_t, multiple_threads))); \
412 else if (sizeof (*mem) == 2) \
413 __asm __volatile (lock "decw %w0" \
416 "i" (offsetof (tcbhead_t, multiple_threads))); \
417 else if (sizeof (*mem) == 4) \
418 __asm __volatile (lock "decl %0" \
421 "i" (offsetof (tcbhead_t, multiple_threads))); \
424 __typeof (mem) __memp = (mem); \
425 __typeof (*mem) __oldval = *__memp; \
426 __typeof (*mem) __tmpval; \
428 __tmpval = __oldval; \
429 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
430 (__memp, __oldval - 1, __oldval)) == __tmpval); \
434 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
436 #define __arch_decrement_cprefix \
437 "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
439 #define catomic_decrement(mem) \
440 __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
443 #define atomic_decrement_and_test(mem) \
444 ({ unsigned char __result; \
445 if (sizeof (*mem) == 1) \
446 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
447 : "=m" (*mem), "=qm" (__result) \
449 else if (sizeof (*mem) == 2) \
450 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
451 : "=m" (*mem), "=qm" (__result) \
453 else if (sizeof (*mem) == 4) \
454 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
455 : "=m" (*mem), "=qm" (__result) \
462 #define atomic_bit_set(mem, bit) \
464 if (sizeof (*mem) == 1) \
465 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
467 : "m" (*mem), "iq" (1 << (bit))); \
468 else if (sizeof (*mem) == 2) \
469 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
471 : "m" (*mem), "ir" (1 << (bit))); \
472 else if (sizeof (*mem) == 4) \
473 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
475 : "m" (*mem), "ir" (1 << (bit))); \
481 #define atomic_bit_test_set(mem, bit) \
482 ({ unsigned char __result; \
483 if (sizeof (*mem) == 1) \
484 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
485 : "=q" (__result), "=m" (*mem) \
486 : "m" (*mem), "ir" (bit)); \
487 else if (sizeof (*mem) == 2) \
488 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
489 : "=q" (__result), "=m" (*mem) \
490 : "m" (*mem), "ir" (bit)); \
491 else if (sizeof (*mem) == 4) \
492 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
493 : "=q" (__result), "=m" (*mem) \
494 : "m" (*mem), "ir" (bit)); \
500 #define atomic_delay() asm ("rep; nop")
503 #define __arch_and_body(lock, mem, mask) \
505 if (sizeof (*mem) == 1) \
506 __asm __volatile (lock "andb %b1, %0" \
508 : "iq" (mask), "m" (*mem), \
509 "i" (offsetof (tcbhead_t, multiple_threads))); \
510 else if (sizeof (*mem) == 2) \
511 __asm __volatile (lock "andw %w1, %0" \
513 : "ir" (mask), "m" (*mem), \
514 "i" (offsetof (tcbhead_t, multiple_threads))); \
515 else if (sizeof (*mem) == 4) \
516 __asm __volatile (lock "andl %1, %0" \
518 : "ir" (mask), "m" (*mem), \
519 "i" (offsetof (tcbhead_t, multiple_threads))); \
524 #define __arch_cprefix \
525 "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
527 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
529 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
532 #define __arch_or_body(lock, mem, mask) \
534 if (sizeof (*mem) == 1) \
535 __asm __volatile (lock "orb %b1, %0" \
537 : "iq" (mask), "m" (*mem), \
538 "i" (offsetof (tcbhead_t, multiple_threads))); \
539 else if (sizeof (*mem) == 2) \
540 __asm __volatile (lock "orw %w1, %0" \
542 : "ir" (mask), "m" (*mem), \
543 "i" (offsetof (tcbhead_t, multiple_threads))); \
544 else if (sizeof (*mem) == 4) \
545 __asm __volatile (lock "orl %1, %0" \
547 : "ir" (mask), "m" (*mem), \
548 "i" (offsetof (tcbhead_t, multiple_threads))); \
553 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
555 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)