1 /// -*- mode: asm; asm-comment-char: ?/ -*-
3 /// AESNI-based implementation of Rijndael
5 /// (c) 2015 Straylight/Edgeware
8 ///----- Licensing notice ---------------------------------------------------
10 /// This file is part of Catacomb.
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
27 ///--------------------------------------------------------------------------
28 /// External definitions.
31 #include "asm-common.h"
34 .globl F(rijndael_rcon)
36 ///--------------------------------------------------------------------------
39 // Magic constants for shuffling.
44 ///--------------------------------------------------------------------------
50 /// The AESNI instructions implement a little-endian version of AES, but
51 /// Catacomb's internal interface presents as big-endian so as to work better
52 /// with things like GCM. We therefore maintain the round keys in
53 /// little-endian form, and have to end-swap blocks in and out.
55 /// For added amusement, the AESNI instructions don't implement the
56 /// larger-block versions of Rijndael, so we have to end-swap the keys if
57 /// we're preparing for one of those.
60 .equ maxrounds, 16 // maximum number of rounds
61 .equ maxblksz, 32 // maximum block size, in bytes
62 .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer
65 .equ nr, 0 // number of rounds
66 .equ w, nr + 4 // encryption key words
67 .equ wi, w + kbufsz // decryption key words
69 ///--------------------------------------------------------------------------
72 FUNC(rijndael_setup_x86ish_aesni)
75 // Arguments are on the stack. We'll need to stack the caller's
76 // register veriables, but we'll manage.
78 # define CTX ebp // context pointer
79 # define BLKSZ [esp + 24] // block size
81 # define SI esi // source pointer
82 # define DI edi // destination pointer
84 # define KSZ ebx // key size
85 # define KSZo ebx // ... as address offset
86 # define NKW edx // total number of key words
87 # define NKW_NEEDS_REFRESH 1 // ... needs recalculating
88 # define RCON ecx // round constants table
89 # define LIM edx // limit pointer
90 # define LIMn edx // ... as integer offset from base
91 # define CYIX edi // index in shift-register cycle
93 # define NR ecx // number of rounds
94 # define LRK eax // distance to last key
95 # define LRKo eax // ... as address offset
96 # define BLKOFF edx // block size in bytes
97 # define BLKOFFo edx // ... as address offset
99 // Stack the caller's registers.
105 // Set up our own variables.
106 mov CTX, [esp + 20] // context base pointer
107 mov SI, [esp + 28] // key material
108 mov KSZ, [esp + 32] // key size, in words
111 #if CPUFAM_AMD64 && ABI_SYSV
112 // Arguments are in registers. We have plenty, but, to be honest,
113 // the initial register allocation is a bit annoying.
115 # define CTX r8 // context pointer
116 # define BLKSZ r9d // block size
118 # define SI rsi // source pointer
119 # define DI rdi // destination pointer
121 # define KSZ edx // key size
122 # define KSZo rdx // ... as address offset
123 # define NKW r10d // total number of key words
124 # define RCON rdi // round constants table
125 # define LIMn ecx // limit pointer
126 # define LIM rcx // ... as integer offset from base
127 # define CYIX r11d // index in shift-register cycle
129 # define NR ecx // number of rounds
130 # define LRK eax // distance to last key
131 # define LRKo rax // ... as address offset
132 # define BLKOFF r9d // block size in bytes
133 # define BLKOFFo r9 // ... as address offset
135 // Move arguments to more useful places.
136 mov CTX, rdi // context base pointer
137 mov BLKSZ, esi // block size in words
138 mov SI, rdx // key material
139 mov KSZ, ecx // key size, in words
142 #if CPUFAM_AMD64 && ABI_WIN
143 // Arguments are in different registers, and they're a little tight.
145 # define CTX r8 // context pointer
146 # define BLKSZ edx // block size
148 # define SI rsi // source pointer
149 # define DI rdi // destination pointer
151 # define KSZ r9d // key size
152 # define KSZo r9 // ... as address offset
153 # define NKW r10d // total number of key words
154 # define RCON rdi // round constants table
155 # define LIMn ecx // limit pointer
156 # define LIM rcx // ... as integer offset from base
157 # define CYIX r11d // index in shift-register cycle
159 # define NR ecx // number of rounds
160 # define LRK eax // distance to last key
161 # define LRKo rax // ... as address offset
162 # define BLKOFF edx // block size in bytes
163 # define BLKOFFo rdx // ... as address offset
165 // We'll need the index registers, which belong to the caller in this
173 // Move arguments to more useful places.
174 mov SI, r8 // key material
175 mov CTX, rcx // context base pointer
178 // The initial round key material is taken directly from the input
179 // key, so copy it over.
180 #if CPUFAM_AMD64 && ABI_SYSV
181 // We've been lucky. We already have a copy of the context pointer
182 // in rdi, and the key size in ecx.
190 // Find out other useful things.
191 mov NKW, [CTX + nr] // number of rounds
193 imul NKW, BLKSZ // total key size in words
194 #if !NKW_NEEDS_REFRESH
195 // If we can't keep NKW for later, then we use the same register for
196 // it and LIM, so this move is unnecessary.
199 sub LIMn, KSZ // offset by the key size
201 // Find the round constants.
203 leaext RCON, F(rijndael_rcon), ecx
205 // Prepare for the main loop.
207 mov eax, [SI + 4*KSZo - 4] // most recent key word
208 lea LIM, [SI + 4*LIM] // limit, offset by one key expansion
209 xor CYIX, CYIX // start of new cycle
211 // Main key expansion loop. The first word of each key-length chunk
212 // needs special treatment.
214 // This is rather tedious because the Intel `AESKEYGENASSIST'
215 // instruction is very strangely shaped. Firstly, it wants to
216 // operate on vast SSE registers, even though we're data-blocked from
217 // doing more than operation at a time unless we're doing two key
218 // schedules simultaneously -- and even then we can't do more than
219 // two, because the instruction ignores two of its input words
220 // entirely, and produces two different outputs for each of the other
221 // two. And secondly it insists on taking the magic round constant
222 // as an immediate, so it's kind of annoying if you're not
223 // open-coding the whole thing. It's much easier to leave that as
224 // zero and XOR in the round constant by hand.
225 0: cmp CYIX, 0 // first word of the cycle?
227 cmp CYIX, 4 // fourth word of the cycle?
229 cmp KSZ, 7 // and a large key?
232 // Fourth word of the cycle, and seven or eight words of key. Do a
233 // byte substitution.
235 pshufd xmm0, xmm0, ROTL
236 aeskeygenassist xmm1, xmm0, 0
240 // First word of the cycle. This is the complicated piece.
242 pshufd xmm0, xmm0, ROTR
243 aeskeygenassist xmm1, xmm0, 0
244 pshufd xmm1, xmm1, ROTL
249 // Common tail. Mix in the corresponding word from the previous
250 // cycle and prepare for the next loop.
252 mov [SI + 4*KSZo], eax
262 // Next job is to construct the decryption keys. The keys for the
263 // first and last rounds don't need to be mangled, but the remaining
264 // ones do -- and they all need to be reordered too.
266 // The plan of action, then, is to copy the final encryption round's
267 // keys into place first, then to do each of the intermediate rounds
268 // in reverse order, and finally do the first round.
270 // Do all of the heavy lifting with SSE registers. The order we're
271 // doing this in means that it's OK if we read or write too much, and
272 // there's easily enough buffer space for the over-enthusiastic reads
273 // and writes because the context has space for 32-byte blocks, which
274 // is our maximum and an exact fit for two SSE registers.
275 9: mov NR, [CTX + nr] // number of rounds
276 #if NKW_NEEDS_REFRESH
281 // If we retain NKW, then BLKSZ and BLKOFF are the same register
282 // because we won't need the former again.
287 lea SI, [CTX + w + 4*LRKo] // last round's keys
288 shl BLKOFF, 2 // block size (in bytes now)
290 // Copy the last encryption round's keys.
295 movdqu xmm0, [SI + 16]
296 movdqu [DI + 16], xmm0
298 // Update the loop variables and stop if we've finished.
304 // Do another middle round's keys...
310 movdqu xmm0, [SI + 16]
312 movdqu [DI + 16], xmm0
315 // Finally do the first encryption round.
320 movdqu xmm0, [SI + 16]
321 movdqu [DI + 16], xmm0
323 // If the block size is not exactly four words then we must end-swap
324 // everything. We can use fancy SSE toys for this.
328 // Find the byte-reordering table.
330 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
332 #if NKW_NEEDS_REFRESH
333 // Calculate the number of subkey words again. (It's a good job
334 // we've got a fast multiplier.)
340 // End-swap the encryption keys.
344 // And the decryption keys.
355 #if CPUFAM_AMD64 && ABI_WIN
363 // End-swap NKW words starting at SI. The end-swapping table is
364 // already loaded into XMM5; and it's OK to work in 16-byte chunks.
391 ///--------------------------------------------------------------------------
392 /// Encrypting and decrypting blocks.
394 .macro encdec op, aes, koff
395 FUNC(rijndael_\op\()_x86ish_aesni)
398 // Arguments come in on the stack, and need to be collected. We
399 // don't have a shortage of registers.
410 #if CPUFAM_AMD64 && ABI_SYSV
411 // Arguments come in registers. All is good.
419 #if CPUFAM_AMD64 && ABI_WIN
420 // Arguments come in different registers.
429 // Find the magic endianness-swapping table.
431 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
439 // Initial whitening.
444 // Dispatch to the correct code.
483 movdqu xmm1, [K + 16]
487 movdqu xmm1, [K + 32]
491 movdqu xmm1, [K + 48]
495 movdqu xmm1, [K + 64]
499 movdqu xmm1, [K + 80]
503 movdqu xmm1, [K + 96]
507 movdqu xmm1, [K + 112]
511 movdqu xmm1, [K + 128]
515 movdqu xmm1, [K + 144]
516 \aes\()last xmm0, xmm1
518 // Unpermute the ciphertext block and store it.
536 encdec eblk, aesenc, w
537 encdec dblk, aesdec, wi
539 ///--------------------------------------------------------------------------
540 /// Random utilities.
543 // Abort the process because of a programming error. Indirecting
544 // through this point serves several purposes: (a) by CALLing, rather
545 // than branching to, `abort', we can save the return address, which
546 // might at least provide a hint as to what went wrong; (b) we don't
547 // have conditional CALLs (and they'd be big anyway); and (c) we can
548 // write a HLT here as a backstop against `abort' being mad.
549 bogus: callext F(abort)
553 ///--------------------------------------------------------------------------
563 ///----- That's all, folks --------------------------------------------------