1 /* Verify that changing AVX registers in audit library won't affect
2 function parameter passing/return. */
10 #include <bits/wordsize.h>
11 #include <gnu/lib-names.h>
14 la_version (unsigned int v)
18 printf ("version: %u\n", v);
21 sprintf (buf, "%u", v);
27 la_activity (uintptr_t *cookie, unsigned int flag)
29 if (flag == LA_ACT_CONSISTENT)
30 printf ("activity: consistent\n");
31 else if (flag == LA_ACT_ADD)
32 printf ("activity: add\n");
33 else if (flag == LA_ACT_DELETE)
34 printf ("activity: delete\n");
36 printf ("activity: unknown activity %u\n", flag);
40 la_objsearch (const char *name, uintptr_t *cookie, unsigned int flag)
44 if (flag == LA_SER_ORIG)
45 flagstr = "LA_SET_ORIG";
46 else if (flag == LA_SER_LIBPATH)
47 flagstr = "LA_SER_LIBPATH";
48 else if (flag == LA_SER_RUNPATH)
49 flagstr = "LA_SER_RUNPATH";
50 else if (flag == LA_SER_CONFIG)
51 flagstr = "LA_SER_CONFIG";
52 else if (flag == LA_SER_DEFAULT)
53 flagstr = "LA_SER_DEFAULT";
54 else if (flag == LA_SER_SECURE)
55 flagstr = "LA_SER_SECURE";
58 sprintf (buf, "unknown flag %d", flag);
61 printf ("objsearch: %s, %s\n", name, flagstr);
67 la_objopen (struct link_map *l, Lmid_t lmid, uintptr_t *cookie)
69 printf ("objopen: %ld, %s\n", lmid, l->l_name);
75 la_preinit (uintptr_t *cookie)
81 la_objclose (uintptr_t *cookie)
83 printf ("objclose\n");
88 la_symbind64 (Elf64_Sym *sym, unsigned int ndx, uintptr_t *refcook,
89 uintptr_t *defcook, unsigned int *flags, const char *symname)
91 printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
92 symname, (long int) sym->st_value, ndx, *flags);
97 #define pltenter la_x86_64_gnu_pltenter
98 #define pltexit la_x86_64_gnu_pltexit
99 #define La_regs La_x86_64_regs
100 #define La_retval La_x86_64_retval
101 #define int_retval lrv_rax
103 #include <tst-audit.h>
106 #include <immintrin.h>
112 __attribute ((always_inline))
117 unsigned int eax, ebx, ecx, edx;
119 if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
128 #include <emmintrin.h>
132 pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
133 uintptr_t *defcook, La_regs *regs, unsigned int *flags,
134 const char *symname, long int *framesizep)
136 printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
137 symname, (long int) sym->st_value, ndx, *flags);
140 if (check_avx () && strcmp (symname, "audit_test") == 0)
146 for (i = 0; i < 8; i += 2)
148 xmm = _mm_set1_epi32 (i + 1);
149 if (memcmp (®s->lr_xmm[i], &xmm, sizeof (xmm))
150 || memcmp (®s->lr_vector[i], &xmm, sizeof (xmm)))
152 regs->lr_xmm[i] = (La_x86_64_xmm) _mm_set1_epi32 (i + 0x100);
153 regs->lr_vector[i].xmm[0] = regs->lr_xmm[i];
155 ymm = _mm256_set1_epi32 (i + 2);
156 if (memcmp (®s->lr_xmm[i + 1],
157 ®s->lr_vector[i + 1].xmm[0], sizeof (xmm))
158 || memcmp (®s->lr_vector[i + 1], &ymm, sizeof (ymm)))
160 regs->lr_vector[i + 1].ymm[0]
161 = (La_x86_64_ymm) _mm256_set1_epi32 (i + 0x101);
162 regs->lr_xmm[i + 1] = regs->lr_vector[i + 1].xmm[0];
165 ymm = _mm256_set1_epi32 (-1);
166 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
167 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
168 asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm) : "xmm2" );
169 asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm) : "xmm3" );
170 asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm) : "xmm4" );
171 asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm) : "xmm5" );
172 asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm) : "xmm6" );
173 asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm) : "xmm7" );
179 return sym->st_value;
183 pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
184 uintptr_t *defcook, const La_regs *inregs, La_retval *outregs,
187 printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
188 symname, (long int) sym->st_value, ndx, outregs->int_retval);
191 if (check_avx () && strcmp (symname, "audit_test") == 0)
195 __m256i ymm = _mm256_set1_epi32 (0x12349876);;
196 if (memcmp (&outregs->lrv_vector0, &ymm, sizeof (ymm)))
201 for (i = 0; i < 8; i += 2)
203 xmm = _mm_set1_epi32 (i + 0x100);
204 if (memcmp (&inregs->lr_xmm[i], &xmm, sizeof (xmm))
205 || memcmp (&inregs->lr_vector[i], &xmm, sizeof (xmm)))
208 ymm = _mm256_set1_epi32 (i + 0x101);
209 if (memcmp (&inregs->lr_xmm[i + 1],
210 &inregs->lr_vector[i + 1].xmm[0], sizeof (xmm))
211 || memcmp (&inregs->lr_vector[i + 1], &ymm, sizeof (ymm)))
215 outregs->lrv_vector0.ymm[0]
216 = (La_x86_64_ymm) _mm256_set1_epi32 (0x98abcdef);
218 ymm = _mm256_set1_epi32 (-1);
219 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
220 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );