1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/linkage.h>
27#include <asm/frame.h>
28
29.file "cast5-avx-x86_64-asm_64.S"
30
31.extern cast_s1
32.extern cast_s2
33.extern cast_s3
34.extern cast_s4
35
36
37#define km 0
38#define kr (16*4)
39#define rr ((16*4)+16)
40
41
42#define s1 cast_s1
43#define s2 cast_s2
44#define s3 cast_s3
45#define s4 cast_s4
46
47
48
49
50#define CTX %rdi
51
52#define RL1 %xmm0
53#define RR1 %xmm1
54#define RL2 %xmm2
55#define RR2 %xmm3
56#define RL3 %xmm4
57#define RR3 %xmm5
58#define RL4 %xmm6
59#define RR4 %xmm7
60
61#define RX %xmm8
62
63#define RKM %xmm9
64#define RKR %xmm10
65#define RKRF %xmm11
66#define RKRR %xmm12
67
68#define R32 %xmm13
69#define R1ST %xmm14
70
71#define RTMP %xmm15
72
73#define RID1 %rbp
74#define RID1d %ebp
75#define RID2 %rsi
76#define RID2d %esi
77
78#define RGI1 %rdx
79#define RGI1bl %dl
80#define RGI1bh %dh
81#define RGI2 %rcx
82#define RGI2bl %cl
83#define RGI2bh %ch
84
85#define RGI3 %rax
86#define RGI3bl %al
87#define RGI3bh %ah
88#define RGI4 %rbx
89#define RGI4bl %bl
90#define RGI4bh %bh
91
92#define RFS1 %r8
93#define RFS1d %r8d
94#define RFS2 %r9
95#define RFS2d %r9d
96#define RFS3 %r10
97#define RFS3d %r10d
98
99
100#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src
102 movzbl src
103 shrq $16, src; \
104 movl s1(, RID1, 4), dst
105 op1 s2(, RID2, 4), dst
106 movzbl src
107 movzbl src
108 interleave_op(il_reg); \
109 op2 s3(, RID1, 4), dst
110 op3 s4(, RID2, 4), dst
111
112#define dummy(d)
113
114#define shr_next(reg) \
115 shrq $16, reg;
116
117#define F_head(a, x, gi1, gi2, op0) \
118 op0 a, RKM, x; \
119 vpslld RKRF, x, RTMP; \
120 vpsrld RKRR, x, x; \
121 vpor RTMP, x, x; \
122 \
123 vmovq x, gi1; \
124 vpextrq $1, x, gi2;
125
126#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(
128 lookup_32bit(
129 \
130 lookup_32bit(
131 shlq $32, RFS2; \
132 orq RFS1, RFS2; \
133 lookup_32bit(
134 shlq $32, RFS1; \
135 orq RFS1, RFS3; \
136 \
137 vmovq RFS2, x; \
138 vpinsrq $1, RFS3, x, x;
139
140#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
143 \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
146 \
147 vpxor a1, RX, a1; \
148 vpxor a2, RTMP, a2;
149
150#define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152#define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154#define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156
157#define subround(a1, b1, a2, b2, f) \
158 F
159
160#define round(l, r, n, f) \
161 vbroadcastss (km+(4*n))(CTX), RKM; \
162 vpand R1ST, RKR, RKRF; \
163 vpsubq RKRF, R32, RKRR; \
164 vpsrldq $1, RKR, RKR; \
165 subround(l
166 subround(l
167
168#define enc_preload_rkr() \
169 vbroadcastss .L16_mask, RKR; \
170 \
171 vpxor kr(CTX), RKR, RKR;
172
173#define dec_preload_rkr() \
174 vbroadcastss .L16_mask, RKR; \
175 \
176 vpxor kr(CTX), RKR, RKR; \
177 vpshufb .Lbswap128_mask, RKR, RKR;
178
179#define transpose_2x4(x0, x1, t0, t1) \
180 vpunpckldq x1, x0, t0; \
181 vpunpckhdq x1, x0, t1; \
182 \
183 vpunpcklqdq t1, t0, x0; \
184 vpunpckhqdq t1, t0, x1;
185
186#define inpack_blocks(x0, x1, t0, t1, rmask) \
187 vpshufb rmask, x0, x0; \
188 vpshufb rmask, x1, x1; \
189 \
190 transpose_2x4(x0, x1, t0, t1)
191
192#define outunpack_blocks(x0, x1, t0, t1, rmask) \
193 transpose_2x4(x0, x1, t0, t1) \
194 \
195 vpshufb rmask, x0, x0; \
196 vpshufb rmask, x1, x1;
197
198.data
199
200.align 16
201.Lbswap_mask:
202 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
203.Lbswap128_mask:
204 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
205.Lbswap_iv_mask:
206 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
207.L16_mask:
208 .byte 16, 16, 16, 16
209.L32_mask:
210 .byte 32, 0, 0, 0
211.Lfirst_mask:
212 .byte 0x1f, 0, 0, 0
213
214.text
215
216.align 16
217__cast5_enc_blk16:
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239 pushq %rbp;
240 pushq %rbx;
241
242 vmovdqa .Lbswap_mask, RKM;
243 vmovd .Lfirst_mask, R1ST;
244 vmovd .L32_mask, R32;
245 enc_preload_rkr();
246
247 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
248 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
249 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
250 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
251
252 round(RL, RR, 0, 1);
253 round(RR, RL, 1, 2);
254 round(RL, RR, 2, 3);
255 round(RR, RL, 3, 1);
256 round(RL, RR, 4, 2);
257 round(RR, RL, 5, 3);
258 round(RL, RR, 6, 1);
259 round(RR, RL, 7, 2);
260 round(RL, RR, 8, 3);
261 round(RR, RL, 9, 1);
262 round(RL, RR, 10, 2);
263 round(RR, RL, 11, 3);
264
265 movzbl rr(CTX), %eax;
266 testl %eax, %eax;
267 jnz .L__skip_enc;
268
269 round(RL, RR, 12, 1);
270 round(RR, RL, 13, 2);
271 round(RL, RR, 14, 3);
272 round(RR, RL, 15, 1);
273
274.L__skip_enc:
275 popq %rbx;
276 popq %rbp;
277
278 vmovdqa .Lbswap_mask, RKM;
279
280 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
281 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
282 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
283 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
284
285 ret;
286ENDPROC(__cast5_enc_blk16)
287
288.align 16
289__cast5_dec_blk16:
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 pushq %rbp;
312 pushq %rbx;
313
314 vmovdqa .Lbswap_mask, RKM;
315 vmovd .Lfirst_mask, R1ST;
316 vmovd .L32_mask, R32;
317 dec_preload_rkr();
318
319 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
320 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
321 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
322 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
323
324 movzbl rr(CTX), %eax;
325 testl %eax, %eax;
326 jnz .L__skip_dec;
327
328 round(RL, RR, 15, 1);
329 round(RR, RL, 14, 3);
330 round(RL, RR, 13, 2);
331 round(RR, RL, 12, 1);
332
333.L__dec_tail:
334 round(RL, RR, 11, 3);
335 round(RR, RL, 10, 2);
336 round(RL, RR, 9, 1);
337 round(RR, RL, 8, 3);
338 round(RL, RR, 7, 2);
339 round(RR, RL, 6, 1);
340 round(RL, RR, 5, 3);
341 round(RR, RL, 4, 2);
342 round(RL, RR, 3, 1);
343 round(RR, RL, 2, 3);
344 round(RL, RR, 1, 2);
345 round(RR, RL, 0, 1);
346
347 vmovdqa .Lbswap_mask, RKM;
348 popq %rbx;
349 popq %rbp;
350
351 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
352 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
353 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
354 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
355
356 ret;
357
358.L__skip_dec:
359 vpsrldq $4, RKR, RKR;
360 jmp .L__dec_tail;
361ENDPROC(__cast5_dec_blk16)
362
363ENTRY(cast5_ecb_enc_16way)
364
365
366
367
368
369 FRAME_BEGIN
370
371 movq %rsi, %r11;
372
373 vmovdqu (0*4*4)(%rdx), RL1;
374 vmovdqu (1*4*4)(%rdx), RR1;
375 vmovdqu (2*4*4)(%rdx), RL2;
376 vmovdqu (3*4*4)(%rdx), RR2;
377 vmovdqu (4*4*4)(%rdx), RL3;
378 vmovdqu (5*4*4)(%rdx), RR3;
379 vmovdqu (6*4*4)(%rdx), RL4;
380 vmovdqu (7*4*4)(%rdx), RR4;
381
382 call __cast5_enc_blk16;
383
384 vmovdqu RR1, (0*4*4)(%r11);
385 vmovdqu RL1, (1*4*4)(%r11);
386 vmovdqu RR2, (2*4*4)(%r11);
387 vmovdqu RL2, (3*4*4)(%r11);
388 vmovdqu RR3, (4*4*4)(%r11);
389 vmovdqu RL3, (5*4*4)(%r11);
390 vmovdqu RR4, (6*4*4)(%r11);
391 vmovdqu RL4, (7*4*4)(%r11);
392
393 FRAME_END
394 ret;
395ENDPROC(cast5_ecb_enc_16way)
396
397ENTRY(cast5_ecb_dec_16way)
398
399
400
401
402
403
404 FRAME_BEGIN
405 movq %rsi, %r11;
406
407 vmovdqu (0*4*4)(%rdx), RL1;
408 vmovdqu (1*4*4)(%rdx), RR1;
409 vmovdqu (2*4*4)(%rdx), RL2;
410 vmovdqu (3*4*4)(%rdx), RR2;
411 vmovdqu (4*4*4)(%rdx), RL3;
412 vmovdqu (5*4*4)(%rdx), RR3;
413 vmovdqu (6*4*4)(%rdx), RL4;
414 vmovdqu (7*4*4)(%rdx), RR4;
415
416 call __cast5_dec_blk16;
417
418 vmovdqu RR1, (0*4*4)(%r11);
419 vmovdqu RL1, (1*4*4)(%r11);
420 vmovdqu RR2, (2*4*4)(%r11);
421 vmovdqu RL2, (3*4*4)(%r11);
422 vmovdqu RR3, (4*4*4)(%r11);
423 vmovdqu RL3, (5*4*4)(%r11);
424 vmovdqu RR4, (6*4*4)(%r11);
425 vmovdqu RL4, (7*4*4)(%r11);
426
427 FRAME_END
428 ret;
429ENDPROC(cast5_ecb_dec_16way)
430
431ENTRY(cast5_cbc_dec_16way)
432
433
434
435
436
437 FRAME_BEGIN
438
439 pushq %r12;
440
441 movq %rsi, %r11;
442 movq %rdx, %r12;
443
444 vmovdqu (0*16)(%rdx), RL1;
445 vmovdqu (1*16)(%rdx), RR1;
446 vmovdqu (2*16)(%rdx), RL2;
447 vmovdqu (3*16)(%rdx), RR2;
448 vmovdqu (4*16)(%rdx), RL3;
449 vmovdqu (5*16)(%rdx), RR3;
450 vmovdqu (6*16)(%rdx), RL4;
451 vmovdqu (7*16)(%rdx), RR4;
452
453 call __cast5_dec_blk16;
454
455
456 vmovq (%r12), RX;
457 vpshufd $0x4f, RX, RX;
458 vpxor RX, RR1, RR1;
459 vpxor 0*16+8(%r12), RL1, RL1;
460 vpxor 1*16+8(%r12), RR2, RR2;
461 vpxor 2*16+8(%r12), RL2, RL2;
462 vpxor 3*16+8(%r12), RR3, RR3;
463 vpxor 4*16+8(%r12), RL3, RL3;
464 vpxor 5*16+8(%r12), RR4, RR4;
465 vpxor 6*16+8(%r12), RL4, RL4;
466
467 vmovdqu RR1, (0*16)(%r11);
468 vmovdqu RL1, (1*16)(%r11);
469 vmovdqu RR2, (2*16)(%r11);
470 vmovdqu RL2, (3*16)(%r11);
471 vmovdqu RR3, (4*16)(%r11);
472 vmovdqu RL3, (5*16)(%r11);
473 vmovdqu RR4, (6*16)(%r11);
474 vmovdqu RL4, (7*16)(%r11);
475
476 popq %r12;
477
478 FRAME_END
479 ret;
480ENDPROC(cast5_cbc_dec_16way)
481
482ENTRY(cast5_ctr_16way)
483
484
485
486
487
488
489 FRAME_BEGIN
490
491 pushq %r12;
492
493 movq %rsi, %r11;
494 movq %rdx, %r12;
495
496 vpcmpeqd RTMP, RTMP, RTMP;
497 vpsrldq $8, RTMP, RTMP;
498
499 vpcmpeqd RKR, RKR, RKR;
500 vpaddq RKR, RKR, RKR;
501 vmovdqa .Lbswap_iv_mask, R1ST;
502 vmovdqa .Lbswap128_mask, RKM;
503
504
505 vmovq (%rcx), RX;
506 vpshufb R1ST, RX, RX;
507
508
509 vpsubq RTMP, RX, RX;
510 vpshufb RKM, RX, RL1;
511 vpsubq RKR, RX, RX;
512 vpshufb RKM, RX, RR1;
513 vpsubq RKR, RX, RX;
514 vpshufb RKM, RX, RL2;
515 vpsubq RKR, RX, RX;
516 vpshufb RKM, RX, RR2;
517 vpsubq RKR, RX, RX;
518 vpshufb RKM, RX, RL3;
519 vpsubq RKR, RX, RX;
520 vpshufb RKM, RX, RR3;
521 vpsubq RKR, RX, RX;
522 vpshufb RKM, RX, RL4;
523 vpsubq RKR, RX, RX;
524 vpshufb RKM, RX, RR4;
525
526
527 vpsubq RTMP, RX, RX;
528 vpshufb R1ST, RX, RX;
529 vmovq RX, (%rcx);
530
531 call __cast5_enc_blk16;
532
533
534 vpxor (0*16)(%r12), RR1, RR1;
535 vpxor (1*16)(%r12), RL1, RL1;
536 vpxor (2*16)(%r12), RR2, RR2;
537 vpxor (3*16)(%r12), RL2, RL2;
538 vpxor (4*16)(%r12), RR3, RR3;
539 vpxor (5*16)(%r12), RL3, RL3;
540 vpxor (6*16)(%r12), RR4, RR4;
541 vpxor (7*16)(%r12), RL4, RL4;
542 vmovdqu RR1, (0*16)(%r11);
543 vmovdqu RL1, (1*16)(%r11);
544 vmovdqu RR2, (2*16)(%r11);
545 vmovdqu RL2, (3*16)(%r11);
546 vmovdqu RR3, (4*16)(%r11);
547 vmovdqu RL3, (5*16)(%r11);
548 vmovdqu RR4, (6*16)(%r11);
549 vmovdqu RL4, (7*16)(%r11);
550
551 popq %r12;
552
553 FRAME_END
554 ret;
555ENDPROC(cast5_ctr_16way)
556