chiark / gitweb /
eglibc (2.11.3-4+deb6u3) squeeze-lts; urgency=medium
[eglibc.git] / sysdeps / powerpc / powerpc64 / bits / atomic.h
1 /* Atomic operations.  PowerPC64 version.
2    Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Lesser General Public
8    License as published by the Free Software Foundation; either
9    version 2.1 of the License, or (at your option) any later version.
10
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Lesser General Public License for more details.
15
16    You should have received a copy of the GNU Lesser General Public
17    License along with the GNU C Library; if not, write to the Free
18    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19    02111-1307 USA.  */
20
21 /*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
22     This is a hint to the hardware to expect additional updates adjacent
23     to the lock word or not.  If we are acquiring a Mutex, the hint
24     should be true. Otherwise we releasing a Mutex or doing a simple
25     atomic operation.  In that case we don't expect addtional updates
26     adjacent to the lock word after the Store Conditional and the hint
27     should be false.  */
28
29 #if defined _ARCH_PWR6 || defined _ARCH_PWR6X
30 # define MUTEX_HINT_ACQ ",1"
31 # define MUTEX_HINT_REL ",0"
32 #else
33 # define MUTEX_HINT_ACQ
34 # define MUTEX_HINT_REL
35 #endif
36
37 /* The 32-bit exchange_bool is different on powerpc64 because the subf
38    does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
39    (a load word and zero (high 32) form) load.
40    In powerpc64 register values are 64-bit by default,  including oldval.
41    The value in old val unknown sign extension, lwarx loads the 32-bit
42    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
43 #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
44 ({                                                                            \
45   unsigned int __tmp, __tmp2;                                                 \
46   __asm __volatile ("   clrldi  %1,%1,32\n"                                   \
47                     "1: lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"                  \
48                     "   subf.   %0,%1,%0\n"                                   \
49                     "   bne     2f\n"                                         \
50                     "   stwcx.  %4,0,%2\n"                                    \
51                     "   bne-    1b\n"                                         \
52                     "2: " __ARCH_ACQ_INSTR                                    \
53                     : "=&r" (__tmp), "=r" (__tmp2)                            \
54                     : "b" (mem), "1" (oldval), "r" (newval)                   \
55                     : "cr0", "memory");                                       \
56   __tmp != 0;                                                                 \
57 })
58
59 #define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
60 ({                                                                            \
61   unsigned int __tmp, __tmp2;                                                 \
62   __asm __volatile (__ARCH_REL_INSTR "\n"                                     \
63                     "   clrldi  %1,%1,32\n"                                   \
64                     "1: lwarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
65                     "   subf.   %0,%1,%0\n"                                   \
66                     "   bne     2f\n"                                         \
67                     "   stwcx.  %4,0,%2\n"                                    \
68                     "   bne-    1b\n"                                         \
69                     "2: "                                                     \
70                     : "=&r" (__tmp), "=r" (__tmp2)                            \
71                     : "b" (mem), "1" (oldval), "r" (newval)                   \
72                     : "cr0", "memory");                                       \
73   __tmp != 0;                                                                 \
74 })
75
76 /*
77  * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
78  * and Store doubleword conditional indexed (stdcx) instructions.  So here
79  * we define the 64-bit forms.
80  */
81 #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
82 ({                                                                            \
83   unsigned long __tmp;                                                        \
84   __asm __volatile (                                                          \
85                     "1: ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"                  \
86                     "   subf.   %0,%2,%0\n"                                   \
87                     "   bne     2f\n"                                         \
88                     "   stdcx.  %3,0,%1\n"                                    \
89                     "   bne-    1b\n"                                         \
90                     "2: " __ARCH_ACQ_INSTR                                    \
91                     : "=&r" (__tmp)                                           \
92                     : "b" (mem), "r" (oldval), "r" (newval)                   \
93                     : "cr0", "memory");                                       \
94   __tmp != 0;                                                                 \
95 })
96
97 #define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
98 ({                                                                            \
99   unsigned long __tmp;                                                        \
100   __asm __volatile (__ARCH_REL_INSTR "\n"                                     \
101                     "1: ldarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
102                     "   subf.   %0,%2,%0\n"                                   \
103                     "   bne     2f\n"                                         \
104                     "   stdcx.  %3,0,%1\n"                                    \
105                     "   bne-    1b\n"                                         \
106                     "2: "                                                     \
107                     : "=&r" (__tmp)                                           \
108                     : "b" (mem), "r" (oldval), "r" (newval)                   \
109                     : "cr0", "memory");                                       \
110   __tmp != 0;                                                                 \
111 })
112
113 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
114   ({                                                                          \
115       __typeof (*(mem)) __tmp;                                                \
116       __typeof (mem)  __memp = (mem);                                         \
117       __asm __volatile (                                                      \
118                         "1:     ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"          \
119                         "       cmpd    %0,%2\n"                              \
120                         "       bne     2f\n"                                 \
121                         "       stdcx.  %3,0,%1\n"                            \
122                         "       bne-    1b\n"                                 \
123                         "2:     " __ARCH_ACQ_INSTR                            \
124                         : "=&r" (__tmp)                                       \
125                         : "b" (__memp), "r" (oldval), "r" (newval)            \
126                         : "cr0", "memory");                                   \
127       __tmp;                                                                  \
128   })
129
130 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
131   ({                                                                          \
132       __typeof (*(mem)) __tmp;                                                \
133       __typeof (mem)  __memp = (mem);                                         \
134       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
135                         "1:     ldarx   %0,0,%1" MUTEX_HINT_REL "\n"          \
136                         "       cmpd    %0,%2\n"                              \
137                         "       bne     2f\n"                                 \
138                         "       stdcx.  %3,0,%1\n"                            \
139                         "       bne-    1b\n"                                 \
140                         "2:     "                                             \
141                         : "=&r" (__tmp)                                       \
142                         : "b" (__memp), "r" (oldval), "r" (newval)            \
143                         : "cr0", "memory");                                   \
144       __tmp;                                                                  \
145   })
146
147 #define __arch_atomic_exchange_64_acq(mem, value) \
148     ({                                                                        \
149       __typeof (*mem) __val;                                                  \
150       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
151                         "1:     ldarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
152                         "       stdcx.  %3,0,%2\n"                            \
153                         "       bne-    1b\n"                                 \
154                   " " __ARCH_ACQ_INSTR                                        \
155                         : "=&r" (__val), "=m" (*mem)                          \
156                         : "b" (mem), "r" (value), "m" (*mem)                  \
157                         : "cr0", "memory");                                   \
158       __val;                                                                  \
159     })
160
161 #define __arch_atomic_exchange_64_rel(mem, value) \
162     ({                                                                        \
163       __typeof (*mem) __val;                                                  \
164       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
165                         "1:     ldarx   %0,0,%2" MUTEX_HINT_REL "\n"          \
166                         "       stdcx.  %3,0,%2\n"                            \
167                         "       bne-    1b"                                   \
168                         : "=&r" (__val), "=m" (*mem)                          \
169                         : "b" (mem), "r" (value), "m" (*mem)                  \
170                         : "cr0", "memory");                                   \
171       __val;                                                                  \
172     })
173
174 #define __arch_atomic_exchange_and_add_64(mem, value) \
175     ({                                                                        \
176       __typeof (*mem) __val, __tmp;                                           \
177       __asm __volatile ("1:     ldarx   %0,0,%3\n"                            \
178                         "       add     %1,%0,%4\n"                           \
179                         "       stdcx.  %1,0,%3\n"                            \
180                         "       bne-    1b"                                   \
181                         : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
182                         : "b" (mem), "r" (value), "m" (*mem)                  \
183                         : "cr0", "memory");                                   \
184       __val;                                                                  \
185     })
186
187 #define __arch_atomic_increment_val_64(mem) \
188     ({                                                                        \
189       __typeof (*(mem)) __val;                                                \
190       __asm __volatile ("1:     ldarx   %0,0,%2\n"                            \
191                         "       addi    %0,%0,1\n"                            \
192                         "       stdcx.  %0,0,%2\n"                            \
193                         "       bne-    1b"                                   \
194                         : "=&b" (__val), "=m" (*mem)                          \
195                         : "b" (mem), "m" (*mem)                               \
196                         : "cr0", "memory");                                   \
197       __val;                                                                  \
198     })
199
200 #define __arch_atomic_decrement_val_64(mem) \
201     ({                                                                        \
202       __typeof (*(mem)) __val;                                                \
203       __asm __volatile ("1:     ldarx   %0,0,%2\n"                            \
204                         "       subi    %0,%0,1\n"                            \
205                         "       stdcx.  %0,0,%2\n"                            \
206                         "       bne-    1b"                                   \
207                         : "=&b" (__val), "=m" (*mem)                          \
208                         : "b" (mem), "m" (*mem)                               \
209                         : "cr0", "memory");                                   \
210       __val;                                                                  \
211     })
212
213 #define __arch_atomic_decrement_if_positive_64(mem) \
214   ({ int __val, __tmp;                                                        \
215      __asm __volatile ("1:      ldarx   %0,0,%3\n"                            \
216                        "        cmpdi   0,%0,0\n"                             \
217                        "        addi    %1,%0,-1\n"                           \
218                        "        ble     2f\n"                                 \
219                        "        stdcx.  %1,0,%3\n"                            \
220                        "        bne-    1b\n"                                 \
221                        "2:      " __ARCH_ACQ_INSTR                            \
222                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
223                        : "b" (mem), "m" (*mem)                                \
224                        : "cr0", "memory");                                    \
225      __val;                                                                   \
226   })
227
228 /*
229  * All powerpc64 processors support the new "light weight"  sync (lwsync).
230  */
231 #define atomic_read_barrier()   __asm ("lwsync" ::: "memory")
232 /*
233  * "light weight" sync can also be used for the release barrier.
234  */
235 #ifndef UP
236 # define __ARCH_REL_INSTR       "lwsync"
237 #endif
238
239 /*
240  * Include the rest of the atomic ops macros which are common to both
241  * powerpc32 and powerpc64.
242  */
243 #include_next <bits/atomic.h>