/// MA 02111-1307, USA.
///--------------------------------------------------------------------------
-/// External definitions.
+/// Preliminaries.
#include "config.h"
#include "asm-common.h"
-///--------------------------------------------------------------------------
-/// Local utilities.
-
-// Magic constants for shuffling.
-#define ROTL 0x93
-#define ROT2 0x4e
-#define ROTR 0x39
+ .text
///--------------------------------------------------------------------------
/// Main code.
- .arch pentium4
- .text
+FUNC(chacha_core_x86ish_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ // drop through...
+ENDFUNC
+
+ .arch pentium4
FUNC(chacha_core_x86ish_sse2)
#if CPUFAM_X86
// Arguments come in on the stack, and will need to be collected. We
- // we can get away with just the scratch registers for integer work,
- // but we'll run out of XMM registers and will need some properly
- // aligned space which we'll steal from the stack. I don't trust the
- // stack pointer's alignment, so I'll have to mask the stack pointer,
- // which in turn means I'll need to keep track of the old value.
- // Hence I'm making a full i386-style stack frame here.
+ // can get away with just the scratch registers for integer work, but
+ // we'll run out of XMM registers and will need some properly aligned
+ // space which we'll steal from the stack. I don't trust the stack
+ // pointer's alignment, so I'll have to mask the stack pointer, which
+ // in turn means I'll need to keep track of the old value. Hence I'm
+ // making a full i386-style stack frame here.
//
// The Windows and SysV ABIs are sufficiently similar that we don't
// need to worry about the differences here.
# define SAVE0 xmm5
# define SAVE1 xmm6
# define SAVE2 xmm7
-# define SAVE3 [esp]
-
- push ebp
- mov ebp, esp
- sub esp, 16
- mov IN, [ebp + 12]
- mov OUT, [ebp + 16]
- and esp, ~15
- mov NR, [ebp + 8]
+# define SAVE3 [SP]
+
+ pushreg BP
+ setfp
+ stalloc 16
+ mov IN, [BP + 12]
+ mov OUT, [BP + 16]
+ and SP, ~15
+ mov NR, [BP + 8]
#endif
#if CPUFAM_AMD64 && ABI_SYSV
# define IN rdx
# define OUT r8
# define SAVE0 xmm5
-# define SAVE1 [rsp + 0]
-# define SAVE2 [rsp + 16]
-# define SAVE3 [rsp + 32]
+# define SAVE1 [SP + 0]
+# define SAVE2 [SP + 16]
+# define SAVE3 [SP + 32]
- sub rsp, 48 + 8
+ stalloc 48 + 8
#endif
+ endprologue
+
// First job is to slurp the matrix into XMM registers. Be careful:
// the input matrix isn't likely to be properly aligned.
//
// c += d; b ^= c; b <<<= 7
paddd xmm2, xmm3
- pshufd xmm3, xmm3, ROTL
+ pshufd xmm3, xmm3, SHUF(2, 1, 0, 3)
pxor xmm1, xmm2
- pshufd xmm2, xmm2, ROT2
+ pshufd xmm2, xmm2, SHUF(1, 0, 3, 2)
movdqa xmm4, xmm1
pslld xmm1, 7
psrld xmm4, 25
//
// The shuffles have quite high latency, so they've mostly been
// pushed upwards. The remaining one can't be moved, though.
- pshufd xmm1, xmm1, ROTR
+ pshufd xmm1, xmm1, SHUF(0, 3, 2, 1)
// Apply the diagonal quarterround to each of the columns
// simultaneously.
// c += d; b ^= c; b <<<= 7
paddd xmm2, xmm3
- pshufd xmm3, xmm3, ROTR
+ pshufd xmm3, xmm3, SHUF(0, 3, 2, 1)
pxor xmm1, xmm2
- pshufd xmm2, xmm2, ROT2
+ pshufd xmm2, xmm2, SHUF(1, 0, 3, 2)
movdqa xmm4, xmm1
pslld xmm1, 7
psrld xmm4, 25
// Finally, finish off undoing the transpose, and we're done for this
// doubleround. Again, most of this was done above so we don't have
// to wait for the shuffles.
- pshufd xmm1, xmm1, ROTL
+ pshufd xmm1, xmm1, SHUF(2, 1, 0, 3)
// Decrement the loop counter and see if we should go round again.
sub NR, 2
// Tidy things up.
#if CPUFAM_X86
- mov esp, ebp
- pop ebp
+ dropfp
+ popreg BP
#endif
#if CPUFAM_AMD64 && ABI_WIN
- add rsp, 48 + 8
+ stfree 48 + 8
#endif
// And with that, we're done.