1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26.file "twofish-avx-x86_64-asm_64.S"
27.text
28
29
30#define s0 0
31#define s1 1024
32#define s2 2048
33#define s3 3072
34#define w 4096
35#define k 4128
36
37
38
39
40#define CTX %rdi
41
42#define RA1 %xmm0
43#define RB1 %xmm1
44#define RC1 %xmm2
45#define RD1 %xmm3
46
47#define RA2 %xmm4
48#define RB2 %xmm5
49#define RC2 %xmm6
50#define RD2 %xmm7
51
52#define RX0 %xmm8
53#define RY0 %xmm9
54
55#define RX1 %xmm10
56#define RY1 %xmm11
57
58#define RK1 %xmm12
59#define RK2 %xmm13
60
61#define RT %xmm14
62#define RR %xmm15
63
64#define RID1 %rbp
65#define RID1d %ebp
66#define RID2 %rsi
67#define RID2d %esi
68
69#define RGI1 %rdx
70#define RGI1bl %dl
71#define RGI1bh %dh
72#define RGI2 %rcx
73#define RGI2bl %cl
74#define RGI2bh %ch
75
76#define RGI3 %rax
77#define RGI3bl %al
78#define RGI3bh %ah
79#define RGI4 %rbx
80#define RGI4bl %bl
81#define RGI4bh %bh
82
83#define RGS1 %r8
84#define RGS1d %r8d
85#define RGS2 %r9
86#define RGS2d %r9d
87#define RGS3 %r10
88#define RGS3d %r10d
89
90
91#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
92 movzbl src
93 movzbl src
94 shrq $16, src; \
95 movl t0(CTX, RID1, 4), dst
96 movl t1(CTX, RID2, 4), RID2d; \
97 movzbl src
98 xorl RID2d, dst
99 movzbl src
100 interleave_op(il_reg); \
101 xorl t2(CTX, RID1, 4), dst
102 xorl t3(CTX, RID2, 4), dst
103
104#define dummy(d)
105
106#define shr_next(reg) \
107 shrq $16, reg;
108
109#define G(gi1, gi2, x, t0, t1, t2, t3) \
110 lookup_32bit(t0, t1, t2, t3,
111 lookup_32bit(t0, t1, t2, t3,
112 \
113 lookup_32bit(t0, t1, t2, t3,
114 shlq $32, RGS2; \
115 orq RGS1, RGS2; \
116 lookup_32bit(t0, t1, t2, t3,
117 shlq $32, RGS1; \
118 orq RGS1, RGS3;
119
120#define round_head_2(a, b, x1, y1, x2, y2) \
121 vmovq b
122 vpextrq $1, b
123 \
124 G(RGI1, RGI2, x1, s0, s1, s2, s3); \
125 vmovq a
126 vpextrq $1, a
127 vmovq RGS2, x1; \
128 vpinsrq $1, RGS3, x1, x1; \
129 \
130 G(RGI3, RGI4, y1, s1, s2, s3, s0); \
131 vmovq b
132 vpextrq $1, b
133 vmovq RGS2, y1; \
134 vpinsrq $1, RGS3, y1, y1; \
135 \
136 G(RGI1, RGI2, x2, s0, s1, s2, s3); \
137 vmovq RGS2, x2; \
138 vpinsrq $1, RGS3, x2, x2; \
139 \
140 G(RGI3, RGI4, y2, s1, s2, s3, s0); \
141 vmovq RGS2, y2; \
142 vpinsrq $1, RGS3, y2, y2;
143
144#define encround_tail(a, b, c, d, x, y, prerotate) \
145 vpaddd x, y, x; \
146 vpaddd x, RK1, RT;\
147 prerotate(b); \
148 vpxor RT, c, c; \
149 vpaddd y, x, y; \
150 vpaddd y, RK2, y; \
151 vpsrld $1, c, RT; \
152 vpslld $(32 - 1), c, c; \
153 vpor c, RT, c; \
154 vpxor d, y, d; \
155
156#define decround_tail(a, b, c, d, x, y, prerotate) \
157 vpaddd x, y, x; \
158 vpaddd x, RK1, RT;\
159 prerotate(a); \
160 vpxor RT, c, c; \
161 vpaddd y, x, y; \
162 vpaddd y, RK2, y; \
163 vpxor d, y, d; \
164 vpsrld $1, d, y; \
165 vpslld $(32 - 1), d, d; \
166 vpor d, y, d; \
167
168#define rotate_1l(x) \
169 vpslld $1, x, RR; \
170 vpsrld $(32 - 1), x, x; \
171 vpor x, RR, x;
172
173#define preload_rgi(c) \
174 vmovq c, RGI1; \
175 vpextrq $1, c, RGI2;
176
177#define encrypt_round(n, a, b, c, d, preload, prerotate) \
178 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
179 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
180 round_head_2(a, b, RX0, RY0, RX1, RY1); \
181 encround_tail(a
182 preload(c
183 encround_tail(a
184
185#define decrypt_round(n, a, b, c, d, preload, prerotate) \
186 vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
187 vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
188 round_head_2(a, b, RX0, RY0, RX1, RY1); \
189 decround_tail(a
190 preload(c
191 decround_tail(a
192
193#define encrypt_cycle(n) \
194 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
195 encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
196
197#define encrypt_cycle_last(n) \
198 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
199 encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
200
201#define decrypt_cycle(n) \
202 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
203 decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
204
205#define decrypt_cycle_last(n) \
206 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
207 decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
208
209#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
210 vpunpckldq x1, x0, t0; \
211 vpunpckhdq x1, x0, t2; \
212 vpunpckldq x3, x2, t1; \
213 vpunpckhdq x3, x2, x3; \
214 \
215 vpunpcklqdq t1, t0, x0; \
216 vpunpckhqdq t1, t0, x1; \
217 vpunpcklqdq x3, t2, x2; \
218 vpunpckhqdq x3, t2, x3;
219
220#define inpack_blocks(in, x0, x1, x2, x3, wkey, t0, t1, t2) \
221 vpxor (0*4*4)(in), wkey, x0; \
222 vpxor (1*4*4)(in), wkey, x1; \
223 vpxor (2*4*4)(in), wkey, x2; \
224 vpxor (3*4*4)(in), wkey, x3; \
225 \
226 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
227
228#define outunpack_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
229 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
230 \
231 vpxor x0, wkey, x0; \
232 vmovdqu x0, (0*4*4)(out); \
233 vpxor x1, wkey, x1; \
234 vmovdqu x1, (1*4*4)(out); \
235 vpxor x2, wkey, x2; \
236 vmovdqu x2, (2*4*4)(out); \
237 vpxor x3, wkey, x3; \
238 vmovdqu x3, (3*4*4)(out);
239
240#define outunpack_xor_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
241 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
242 \
243 vpxor x0, wkey, x0; \
244 vpxor (0*4*4)(out), x0, x0; \
245 vmovdqu x0, (0*4*4)(out); \
246 vpxor x1, wkey, x1; \
247 vpxor (1*4*4)(out), x1, x1; \
248 vmovdqu x1, (1*4*4)(out); \
249 vpxor x2, wkey, x2; \
250 vpxor (2*4*4)(out), x2, x2; \
251 vmovdqu x2, (2*4*4)(out); \
252 vpxor x3, wkey, x3; \
253 vpxor (3*4*4)(out), x3, x3; \
254 vmovdqu x3, (3*4*4)(out);
255
256.align 8
257.global __twofish_enc_blk_8way
258.type __twofish_enc_blk_8way,@function;
259
260__twofish_enc_blk_8way:
261
262
263
264
265
266
267
268 pushq %rbp;
269 pushq %rbx;
270 pushq %rcx;
271
272 vmovdqu w(CTX), RK1;
273
274 leaq (4*4*4)(%rdx), %rax;
275 inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
276 preload_rgi(RA1);
277 rotate_1l(RD1);
278 inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
279 rotate_1l(RD2);
280
281 movq %rsi, %r11;
282
283 encrypt_cycle(0);
284 encrypt_cycle(1);
285 encrypt_cycle(2);
286 encrypt_cycle(3);
287 encrypt_cycle(4);
288 encrypt_cycle(5);
289 encrypt_cycle(6);
290 encrypt_cycle_last(7);
291
292 vmovdqu (w+4*4)(CTX), RK1;
293
294 popq %rcx;
295 popq %rbx;
296 popq %rbp;
297
298 leaq (4*4*4)(%r11), %rax;
299
300 testb %cl, %cl;
301 jnz __enc_xor8;
302
303 outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
304 outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
305
306 ret;
307
308__enc_xor8:
309 outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
310 outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
311
312 ret;
313
314.align 8
315.global twofish_dec_blk_8way
316.type twofish_dec_blk_8way,@function;
317
318twofish_dec_blk_8way:
319
320
321
322
323
324
325 pushq %rbp;
326 pushq %rbx;
327
328 vmovdqu (w+4*4)(CTX), RK1;
329
330 leaq (4*4*4)(%rdx), %rax;
331 inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
332 preload_rgi(RC1);
333 rotate_1l(RA1);
334 inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
335 rotate_1l(RA2);
336
337 movq %rsi, %r11;
338
339 decrypt_cycle(7);
340 decrypt_cycle(6);
341 decrypt_cycle(5);
342 decrypt_cycle(4);
343 decrypt_cycle(3);
344 decrypt_cycle(2);
345 decrypt_cycle(1);
346 decrypt_cycle_last(0);
347
348 vmovdqu (w)(CTX), RK1;
349
350 popq %rbx;
351 popq %rbp;
352
353 leaq (4*4*4)(%r11), %rax;
354 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
355 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
356
357 ret;
358