chiark / gitweb /
eglibc (2.11.3-4+deb6u3) squeeze-lts; urgency=medium
[eglibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
1 /* Copyright (C) 1992,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006
2         Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, write to the Free
17    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18    02111-1307 USA.  */
19
20 /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
21
22 #ifndef _LINUX_POWERPC_SYSDEP_H
23 #define _LINUX_POWERPC_SYSDEP_H 1
24
25 #include <sysdeps/unix/powerpc/sysdep.h>
26 #include <tls.h>
27
28 /* Define __set_errno() for INLINE_SYSCALL macro below.  */
29 #ifndef __ASSEMBLER__
30 #include <errno.h>
31 #endif
32
33 /* Some systen calls got renamed over time, but retained the same semantics.
34    Handle them here so they can be catched by both C and assembler stubs in
35    glibc.  */
36
37 #ifdef __NR_pread64
38 # ifdef __NR_pread
39 #  error "__NR_pread and __NR_pread64 both defined???"
40 # endif
41 # define __NR_pread __NR_pread64
42 #endif
43
44 #ifdef __NR_pwrite64
45 # ifdef __NR_pwrite
46 #  error "__NR_pwrite and __NR_pwrite64 both defined???"
47 # endif
48 # define __NR_pwrite __NR_pwrite64
49 #endif
50
51 /* For Linux we can use the system call table in the header file
52         /usr/include/asm/unistd.h
53    of the kernel.  But these symbols do not follow the SYS_* syntax
54    so we have to redefine the `SYS_ify' macro here.  */
55 #undef SYS_ify
56 #ifdef __STDC__
57 # define SYS_ify(syscall_name)  __NR_##syscall_name
58 #else
59 # define SYS_ify(syscall_name)  __NR_/**/syscall_name
60 #endif
61
62 #ifdef __ASSEMBLER__
63
64 /* This seems to always be the case on PPC.  */
65 # define ALIGNARG(log2) log2
66 /* For ELF we need the `.type' directive to make shared libs work right.  */
67 # define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg;
68 # define ASM_SIZE_DIRECTIVE(name) .size name,.-name
69
70 #endif /* __ASSEMBLER__ */
71
72 /* This version is for kernels that implement system calls that
73    behave like function calls as far as register saving.
74    It falls back to the syscall in the case that the vDSO doesn't
75    exist or fails for ENOSYS */
76 #ifdef SHARED
77 # define INLINE_VSYSCALL(name, nr, args...) \
78   ({                                                                          \
79     __label__ out;                                                            \
80     __label__ iserr;                                                          \
81     INTERNAL_SYSCALL_DECL (sc_err);                                           \
82     long int sc_ret;                                                          \
83                                                                               \
84     if (__vdso_##name != NULL)                                                \
85       {                                                                       \
86         sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args);   \
87         if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))                       \
88           goto out;                                                           \
89         if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS)                \
90           goto iserr;                                                         \
91       }                                                                       \
92                                                                               \
93     sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args);                     \
94     if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))                            \
95       {                                                                       \
96       iserr:                                                                  \
97         __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));                \
98         sc_ret = -1L;                                                         \
99       }                                                                       \
100   out:                                                                        \
101     sc_ret;                                                                   \
102   })
103 #else
104 # define INLINE_VSYSCALL(name, nr, args...) \
105   INLINE_SYSCALL (name, nr, ##args)
106 #endif
107
108 #ifdef SHARED
109 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
110   ({                                                                          \
111     __label__ out;                                                            \
112     long int v_ret;                                                           \
113                                                                               \
114     if (__vdso_##name != NULL)                                                \
115       {                                                                       \
116         v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);       \
117         if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err)                            \
118             || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS)                 \
119           goto out;                                                           \
120       }                                                                       \
121     v_ret = INTERNAL_SYSCALL (name, err, nr, ##args);                         \
122   out:                                                                        \
123     v_ret;                                                                    \
124   })
125 #else
126 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
127   INTERNAL_SYSCALL (name, err, nr, ##args)
128 #endif
129
130 /* This version is for internal uses when there is no desire
131    to set errno */
132 #define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...)         \
133   ({                                                                          \
134     long int sc_ret = ENOSYS;                                                 \
135                                                                               \
136     if (__vdso_##name != NULL)                                                \
137       sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);        \
138     else                                                                      \
139       err = 1 << 28;                                                          \
140     sc_ret;                                                                   \
141   })
142
143 /* List of system calls which are supported as vsyscalls.  */
144 #define HAVE_CLOCK_GETRES_VSYSCALL      1
145 #define HAVE_CLOCK_GETTIME_VSYSCALL     1
146
147 /* Define a macro which expands inline into the wrapper code for a system
148    call. This use is for internal calls that do not need to handle errors
149    normally. It will never touch errno. This returns just what the kernel
150    gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
151    the negation of the return value in the kernel gets reverted.  */
152
153 #define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
154   ({                                                                    \
155     register void *r0  __asm__ ("r0");                                  \
156     register long int r3  __asm__ ("r3");                               \
157     register long int r4  __asm__ ("r4");                               \
158     register long int r5  __asm__ ("r5");                               \
159     register long int r6  __asm__ ("r6");                               \
160     register long int r7  __asm__ ("r7");                               \
161     register long int r8  __asm__ ("r8");                               \
162     LOADARGS_##nr (funcptr, args);                                      \
163     __asm__ __volatile__                                                \
164       ("mtctr %0\n\t"                                                   \
165        "bctrl\n\t"                                                      \
166        "mfcr  %0\n\t"                                                   \
167        "0:"                                                             \
168        : "=&r" (r0),                                                    \
169          "=&r" (r3), "=&r" (r4), "=&r" (r5),                            \
170          "=&r" (r6), "=&r" (r7), "=&r" (r8)                             \
171        : ASM_INPUT_##nr                                                 \
172        : "r9", "r10", "r11", "r12",                                     \
173          "cr0", "ctr", "lr", "memory");                                 \
174           err = (long int) r0;                                          \
175     (int) r3;                                                           \
176   })
177
178 #undef INLINE_SYSCALL
179
180 /* This version is for kernels that implement system calls that
181    behave like function calls as far as register saving.  */
182 #define INLINE_SYSCALL(name, nr, args...)                               \
183   ({                                                                    \
184     INTERNAL_SYSCALL_DECL (sc_err);                                     \
185     long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args);        \
186     if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))                      \
187       {                                                                 \
188         __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));          \
189         sc_ret = -1L;                                                   \
190       }                                                                 \
191     sc_ret;                                                             \
192   })
193
194 /* Define a macro which expands inline into the wrapper code for a system
195    call. This use is for internal calls that do not need to handle errors
196    normally. It will never touch errno. This returns just what the kernel
197    gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
198    the negation of the return value in the kernel gets reverted.  */
199
200 #undef INTERNAL_SYSCALL
201 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
202   ({                                                                    \
203     register long int r0  __asm__ ("r0");                               \
204     register long int r3  __asm__ ("r3");                               \
205     register long int r4  __asm__ ("r4");                               \
206     register long int r5  __asm__ ("r5");                               \
207     register long int r6  __asm__ ("r6");                               \
208     register long int r7  __asm__ ("r7");                               \
209     register long int r8  __asm__ ("r8");                               \
210     LOADARGS_##nr (name, ##args);                                       \
211     __asm__ __volatile__                                                \
212       ("sc\n\t"                                                         \
213        "mfcr  %0\n\t"                                                   \
214        "0:"                                                             \
215        : "=&r" (r0),                                                    \
216          "=&r" (r3), "=&r" (r4), "=&r" (r5),                            \
217          "=&r" (r6), "=&r" (r7), "=&r" (r8)                             \
218        : ASM_INPUT_##nr                                                 \
219        : "r9", "r10", "r11", "r12",                                     \
220          "cr0", "ctr", "memory");                                       \
221           err = r0;  \
222     (int) r3;  \
223   })
224 #define INTERNAL_SYSCALL(name, err, nr, args...)                        \
225   INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
226
227 #undef INTERNAL_SYSCALL_DECL
228 #define INTERNAL_SYSCALL_DECL(err) long int err
229
230 #undef INTERNAL_SYSCALL_ERROR_P
231 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
232   ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
233
234 #undef INTERNAL_SYSCALL_ERRNO
235 #define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
236
237 #define LOADARGS_0(name, dummy) \
238         r0 = name
239 #define LOADARGS_1(name, __arg1) \
240         long int arg1 = (long int) (__arg1); \
241         LOADARGS_0(name, 0); \
242         extern void __illegally_sized_syscall_arg1 (void); \
243         if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
244           __illegally_sized_syscall_arg1 (); \
245         r3 = arg1
246 #define LOADARGS_2(name, __arg1, __arg2) \
247         long int arg2 = (long int) (__arg2); \
248         LOADARGS_1(name, __arg1); \
249         extern void __illegally_sized_syscall_arg2 (void); \
250         if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
251           __illegally_sized_syscall_arg2 (); \
252         r4 = arg2
253 #define LOADARGS_3(name, __arg1, __arg2, __arg3) \
254         long int arg3 = (long int) (__arg3); \
255         LOADARGS_2(name, __arg1, __arg2); \
256         extern void __illegally_sized_syscall_arg3 (void); \
257         if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
258           __illegally_sized_syscall_arg3 (); \
259         r5 = arg3
260 #define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
261         long int arg4 = (long int) (__arg4); \
262         LOADARGS_3(name, __arg1, __arg2, __arg3); \
263         extern void __illegally_sized_syscall_arg4 (void); \
264         if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
265           __illegally_sized_syscall_arg4 (); \
266         r6 = arg4
267 #define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
268         long int arg5 = (long int) (__arg5); \
269         LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
270         extern void __illegally_sized_syscall_arg5 (void); \
271         if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
272           __illegally_sized_syscall_arg5 (); \
273         r7 = arg5
274 #define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
275         long int arg6 = (long int) (__arg6); \
276         LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
277         extern void __illegally_sized_syscall_arg6 (void); \
278         if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
279           __illegally_sized_syscall_arg6 (); \
280         r8 = arg6
281
282 #define ASM_INPUT_0 "0" (r0)
283 #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
284 #define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
285 #define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
286 #define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
287 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
288 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
289
290
291 /* Pointer mangling support.  */
292 #if defined NOT_IN_libc && defined IS_IN_rtld
293 /* We cannot use the thread descriptor because in ld.so we use setjmp
294    earlier than the descriptor is initialized.  */
295 #else
296 # ifdef __ASSEMBLER__
297 #  define PTR_MANGLE(reg, tmpreg) \
298         ld      tmpreg,POINTER_GUARD(r13); \
299         xor     reg,tmpreg,reg
300 #  define PTR_MANGLE2(reg, tmpreg) \
301         xor     reg,tmpreg,reg
302 #  define PTR_MANGLE3(destreg, reg, tmpreg) \
303         ld      tmpreg,POINTER_GUARD(r13); \
304         xor     destreg,tmpreg,reg
305 #  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
306 #  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
307 #  define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
308 # else
309 #  define PTR_MANGLE(var) \
310   (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
311 #  define PTR_DEMANGLE(var)     PTR_MANGLE (var)
312 # endif
313 #endif
314
315 #endif /* linux/powerpc/powerpc64/sysdep.h */