1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#ifdef CONFIG_AS_AVX2
53#include <linux/linkage.h>
54
55.text
56
57
58Y_0 = %ymm4
59Y_1 = %ymm5
60Y_2 = %ymm6
61Y_3 = %ymm7
62
63YTMP0 = %ymm0
64YTMP1 = %ymm1
65YTMP2 = %ymm2
66YTMP3 = %ymm3
67YTMP4 = %ymm8
68XFER = YTMP0
69
70BYTE_FLIP_MASK = %ymm9
71
72
73CTX = %rdi
74
75INP = %rsi
76
77NUM_BLKS = %rdx
78
79c = %rcx
80d = %r8
81e = %rdx
82y3 = %rsi
83
84TBL = %rbp
85
86a = %rax
87b = %rbx
88
89f = %r9
90g = %r10
91h = %r11
92old_h = %r11
93
94T1 = %r12
95y0 = %r13
96y1 = %r14
97y2 = %r15
98
99y4 = %r12
100
101
102XFER_SIZE = 4*8
103SRND_SIZE = 1*8
104INP_SIZE = 1*8
105INPEND_SIZE = 1*8
106RSPSAVE_SIZE = 1*8
107GPRSAVE_SIZE = 6*8
108
109frame_XFER = 0
110frame_SRND = frame_XFER + XFER_SIZE
111frame_INP = frame_SRND + SRND_SIZE
112frame_INPEND = frame_INP + INP_SIZE
113frame_RSPSAVE = frame_INPEND + INPEND_SIZE
114frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
115frame_size = frame_GPRSAVE + GPRSAVE_SIZE
116
117
118#define VMOVDQ vmovdqu
119
120
121
122.macro addm p1 p2
123 add \p1, \p2
124 mov \p2, \p1
125.endm
126
127
128
129
130.macro COPY_YMM_AND_BSWAP p1 p2 p3
131 VMOVDQ \p2, \p1
132 vpshufb \p3, \p1, \p1
133.endm
134
135
136.macro rotate_Ys
137 Y_ = Y_0
138 Y_0 = Y_1
139 Y_1 = Y_2
140 Y_2 = Y_3
141 Y_3 = Y_
142.endm
143
144
145.macro RotateState
146
147 old_h = h
148 TMP_ = h
149 h = g
150 g = f
151 f = e
152 e = d
153 d = c
154 c = b
155 b = a
156 a = TMP_
157.endm
158
159
160
161.macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL
162 vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST
163 vpalignr $\RVAL, \YSRC2, \YDST, \YDST
164.endm
165
166.macro FOUR_ROUNDS_AND_SCHED
167
168
169
170 MY_VPALIGNR YTMP0, Y_3, Y_2, 8
171
172 vpaddq Y_0, YTMP0, YTMP0
173
174 MY_VPALIGNR YTMP1, Y_1, Y_0, 8
175
176
177
178
179 vpsrlq $1, YTMP1, YTMP2
180 vpsllq $(64-1), YTMP1, YTMP3
181 vpor YTMP2, YTMP3, YTMP3
182
183 vpsrlq $7, YTMP1, YTMP4
184
185 mov a, y3
186 rorx $41, e, y0
187 rorx $18, e, y1
188 add frame_XFER(%rsp),h
189 or c, y3
190 mov f, y2
191 rorx $34, a, T1
192
193 xor y1, y0
194 xor g, y2
195 rorx $14, e, y1
196
197 and e, y2
198 xor y1, y0
199 rorx $39, a, y1
200 add h, d
201
202 and b, y3
203 xor T1, y1
204 rorx $28, a, T1
205
206 xor g, y2
207 xor T1, y1
208 mov a, T1
209 and c, T1
210
211 add y0, y2
212 or T1, y3
213 add y1, h
214
215 add y2, d
216
217 add y2, h
218 add y3, h
219
220 RotateState
221
222
223
224
225 vpsrlq $8, YTMP1, YTMP2
226 vpsllq $(64-8), YTMP1, YTMP1
227 vpor YTMP2, YTMP1, YTMP1
228
229 vpxor YTMP4, YTMP3, YTMP3
230 vpxor YTMP1, YTMP3, YTMP1
231
232
233
234 vpaddq YTMP1, YTMP0, YTMP0
235
236 vperm2f128 $0x0, YTMP0, YTMP0, Y_0
237
238 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0
239
240
241
242
243 vperm2f128 $0x11, Y_3, Y_3, YTMP2
244 vpsrlq $6, YTMP2, YTMP4
245
246
247 mov a, y3
248 rorx $41, e, y0
249 rorx $18, e, y1
250 add 1*8+frame_XFER(%rsp), h
251 or c, y3
252
253
254 mov f, y2
255 rorx $34, a, T1
256 xor y1, y0
257 xor g, y2
258
259
260 rorx $14, e, y1
261 xor y1, y0
262 rorx $39, a, y1
263 and e, y2
264 add h, d
265
266 and b, y3
267 xor T1, y1
268
269 rorx $28, a, T1
270 xor g, y2
271
272 xor T1, y1
273 mov a, T1
274 and c, T1
275 add y0, y2
276
277 or T1, y3
278 add y1, h
279
280 add y2, d
281 add y2, h
282 add y3, h
283
284 RotateState
285
286
287
288
289 vpsrlq $19, YTMP2, YTMP3
290 vpsllq $(64-19), YTMP2, YTMP1
291 vpor YTMP1, YTMP3, YTMP3
292 vpxor YTMP3, YTMP4, YTMP4
293 vpsrlq $61, YTMP2, YTMP3
294 vpsllq $(64-61), YTMP2, YTMP1
295 vpor YTMP1, YTMP3, YTMP3
296 vpxor YTMP3, YTMP4, YTMP4
297
298
299
300 vpaddq YTMP4, Y_0, Y_0
301
302
303 vpsrlq $6, Y_0, YTMP4
304
305 mov a, y3
306 rorx $41, e, y0
307 add 2*8+frame_XFER(%rsp), h
308
309 rorx $18, e, y1
310 or c, y3
311 mov f, y2
312 xor g, y2
313
314 rorx $34, a, T1
315 xor y1, y0
316 and e, y2
317
318 rorx $14, e, y1
319 add h, d
320 and b, y3
321
322 xor y1, y0
323 rorx $39, a, y1
324 xor g, y2
325
326 xor T1, y1
327 rorx $28, a, T1
328
329 xor T1, y1
330 mov a, T1
331 and c, T1
332 add y0, y2
333
334 or T1, y3
335 add y1, h
336 add y2, d
337 add y2, h
338
339 add y3, h
340
341 RotateState
342
343
344
345 vpsrlq $19, Y_0, YTMP3
346 vpsllq $(64-19), Y_0, YTMP1
347 vpor YTMP1, YTMP3, YTMP3
348 vpxor YTMP3, YTMP4, YTMP4
349 vpsrlq $61, Y_0, YTMP3
350 vpsllq $(64-61), Y_0, YTMP1
351 vpor YTMP1, YTMP3, YTMP3
352 vpxor YTMP3, YTMP4, YTMP4
353
354
355
356
357 vpaddq YTMP4, YTMP0, YTMP2
358
359
360 vpblendd $0xF0, YTMP2, Y_0, Y_0
361
362 mov a, y3
363 rorx $41, e, y0
364 rorx $18, e, y1
365 add 3*8+frame_XFER(%rsp), h
366 or c, y3
367
368
369 mov f, y2
370 rorx $34, a, T1
371 xor y1, y0
372 xor g, y2
373
374
375 rorx $14, e, y1
376 and e, y2
377 add h, d
378 and b, y3
379
380 xor y1, y0
381 xor g, y2
382
383 rorx $39, a, y1
384 add y0, y2
385
386 xor T1, y1
387 add y2, d
388
389 rorx $28, a, T1
390
391 xor T1, y1
392 mov a, T1
393 and c, T1
394 or T1, y3
395
396 add y1, h
397 add y2, h
398 add y3, h
399
400 RotateState
401
402 rotate_Ys
403.endm
404
405.macro DO_4ROUNDS
406
407
408
409 mov f, y2
410 rorx $41, e, y0
411 rorx $18, e, y1
412 xor g, y2
413
414 xor y1, y0
415 rorx $14, e, y1
416 and e, y2
417
418 xor y1, y0
419 rorx $34, a, T1
420 xor g, y2
421 rorx $39, a, y1
422 mov a, y3
423
424 xor T1, y1
425 rorx $28, a, T1
426 add frame_XFER(%rsp), h
427 or c, y3
428
429 xor T1, y1
430 mov a, T1
431 and b, y3
432 and c, T1
433 add y0, y2
434
435 add h, d
436 or T1, y3
437 add y1, h
438
439 add y2, d
440
441 RotateState
442
443
444
445 add y2, old_h
446 mov f, y2
447 rorx $41, e, y0
448 rorx $18, e, y1
449 xor g, y2
450
451 xor y1, y0
452 rorx $14, e, y1
453 and e, y2
454 add y3, old_h
455
456 xor y1, y0
457 rorx $34, a, T1
458 xor g, y2
459 rorx $39, a, y1
460 mov a, y3
461
462 xor T1, y1
463 rorx $28, a, T1
464 add 8*1+frame_XFER(%rsp), h
465 or c, y3
466
467 xor T1, y1
468 mov a, T1
469 and b, y3
470 and c, T1
471 add y0, y2
472
473 add h, d
474 or T1, y3
475 add y1, h
476
477 add y2, d
478
479 RotateState
480
481
482
483 add y2, old_h
484 mov f, y2
485 rorx $41, e, y0
486 rorx $18, e, y1
487 xor g, y2
488
489 xor y1, y0
490 rorx $14, e, y1
491 and e, y2
492 add y3, old_h
493
494 xor y1, y0
495 rorx $34, a, T1
496 xor g, y2
497 rorx $39, a, y1
498 mov a, y3
499
500 xor T1, y1
501 rorx $28, a, T1
502 add 8*2+frame_XFER(%rsp), h
503 or c, y3
504
505 xor T1, y1
506 mov a, T1
507 and b, y3
508 and c, T1
509 add y0, y2
510
511 add h, d
512 or T1, y3
513 add y1, h
514
515 add y2, d
516
517 RotateState
518
519
520
521 add y2, old_h
522 mov f, y2
523 rorx $41, e, y0
524 rorx $18, e, y1
525 xor g, y2
526
527 xor y1, y0
528 rorx $14, e, y1
529 and e, y2
530 add y3, old_h
531
532 xor y1, y0
533 rorx $34, a, T1
534 xor g, y2
535 rorx $39, a, y1
536 mov a, y3
537
538 xor T1, y1
539 rorx $28, a, T1
540 add 8*3+frame_XFER(%rsp), h
541 or c, y3
542
543 xor T1, y1
544 mov a, T1
545 and b, y3
546 and c, T1
547 add y0, y2
548
549
550 add h, d
551 or T1, y3
552 add y1, h
553
554 add y2, d
555
556 add y2, h
557
558 add y3, h
559
560 RotateState
561
562.endm
563
564
565
566
567
568
569
570
571ENTRY(sha512_transform_rorx)
572
573 mov %rsp, %rax
574 sub $frame_size, %rsp
575 and $~(0x20 - 1), %rsp
576 mov %rax, frame_RSPSAVE(%rsp)
577
578
579 mov %rbp, frame_GPRSAVE(%rsp)
580 mov %rbx, 8*1+frame_GPRSAVE(%rsp)
581 mov %r12, 8*2+frame_GPRSAVE(%rsp)
582 mov %r13, 8*3+frame_GPRSAVE(%rsp)
583 mov %r14, 8*4+frame_GPRSAVE(%rsp)
584 mov %r15, 8*5+frame_GPRSAVE(%rsp)
585
586 shl $7, NUM_BLKS
587 jz done_hash
588 add INP, NUM_BLKS
589 mov NUM_BLKS, frame_INPEND(%rsp)
590
591
592 mov 8*0(CTX),a
593 mov 8*1(CTX),b
594 mov 8*2(CTX),c
595 mov 8*3(CTX),d
596 mov 8*4(CTX),e
597 mov 8*5(CTX),f
598 mov 8*6(CTX),g
599 mov 8*7(CTX),h
600
601 vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
602
603loop0:
604 lea K512(%rip), TBL
605
606
607 COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK
608 COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK
609 COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK
610 COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK
611
612 mov INP, frame_INP(%rsp)
613
614
615 movq $4, frame_SRND(%rsp)
616
617.align 16
618loop1:
619 vpaddq (TBL), Y_0, XFER
620 vmovdqa XFER, frame_XFER(%rsp)
621 FOUR_ROUNDS_AND_SCHED
622
623 vpaddq 1*32(TBL), Y_0, XFER
624 vmovdqa XFER, frame_XFER(%rsp)
625 FOUR_ROUNDS_AND_SCHED
626
627 vpaddq 2*32(TBL), Y_0, XFER
628 vmovdqa XFER, frame_XFER(%rsp)
629 FOUR_ROUNDS_AND_SCHED
630
631 vpaddq 3*32(TBL), Y_0, XFER
632 vmovdqa XFER, frame_XFER(%rsp)
633 add $(4*32), TBL
634 FOUR_ROUNDS_AND_SCHED
635
636 subq $1, frame_SRND(%rsp)
637 jne loop1
638
639 movq $2, frame_SRND(%rsp)
640loop2:
641 vpaddq (TBL), Y_0, XFER
642 vmovdqa XFER, frame_XFER(%rsp)
643 DO_4ROUNDS
644 vpaddq 1*32(TBL), Y_1, XFER
645 vmovdqa XFER, frame_XFER(%rsp)
646 add $(2*32), TBL
647 DO_4ROUNDS
648
649 vmovdqa Y_2, Y_0
650 vmovdqa Y_3, Y_1
651
652 subq $1, frame_SRND(%rsp)
653 jne loop2
654
655 addm 8*0(CTX),a
656 addm 8*1(CTX),b
657 addm 8*2(CTX),c
658 addm 8*3(CTX),d
659 addm 8*4(CTX),e
660 addm 8*5(CTX),f
661 addm 8*6(CTX),g
662 addm 8*7(CTX),h
663
664 mov frame_INP(%rsp), INP
665 add $128, INP
666 cmp frame_INPEND(%rsp), INP
667 jne loop0
668
669done_hash:
670
671
672 mov frame_GPRSAVE(%rsp) ,%rbp
673 mov 8*1+frame_GPRSAVE(%rsp) ,%rbx
674 mov 8*2+frame_GPRSAVE(%rsp) ,%r12
675 mov 8*3+frame_GPRSAVE(%rsp) ,%r13
676 mov 8*4+frame_GPRSAVE(%rsp) ,%r14
677 mov 8*5+frame_GPRSAVE(%rsp) ,%r15
678
679
680 mov frame_RSPSAVE(%rsp), %rsp
681 ret
682ENDPROC(sha512_transform_rorx)
683
684
685
686
687.data
688
689.align 64
690
691K512:
692 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
693 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
694 .quad 0x3956c25bf348b538,0x59f111f1b605d019
695 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
696 .quad 0xd807aa98a3030242,0x12835b0145706fbe
697 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
698 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
699 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
700 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
701 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
702 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
703 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
704 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
705 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
706 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
707 .quad 0x06ca6351e003826f,0x142929670a0e6e70
708 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
709 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
710 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
711 .quad 0x81c2c92e47edaee6,0x92722c851482353b
712 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
713 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
714 .quad 0xd192e819d6ef5218,0xd69906245565a910
715 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
716 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
717 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
718 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
719 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
720 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
721 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
722 .quad 0x90befffa23631e28,0xa4506cebde82bde9
723 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
724 .quad 0xca273eceea26619c,0xd186b8c721c0c207
725 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
726 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
727 .quad 0x113f9804bef90dae,0x1b710b35131c471b
728 .quad 0x28db77f523047d84,0x32caab7b40c72493
729 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
730 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
731 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
732
733.align 32
734
735
736PSHUFFLE_BYTE_FLIP_MASK:
737 .octa 0x08090a0b0c0d0e0f0001020304050607
738 .octa 0x18191a1b1c1d1e1f1011121314151617
739
740MASK_YMM_LO:
741 .octa 0x00000000000000000000000000000000
742 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
743#endif
744