Lines Matching refs:TMP2

199 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
202 movdqa SHUF_MASK(%rip), \TMP2
203 pshufb \TMP2, \TMP3
207 movdqa \TMP3, \TMP2
209 psrlq $63, \TMP2
210 movdqa \TMP2, \TMP1
211 pslldq $8, \TMP2
213 por \TMP2, \TMP3
217 pshufd $0x24, \TMP1, \TMP2
218 pcmpeqd TWOONE(%rip), \TMP2
219 pand POLY(%rip), \TMP2
220 pxor \TMP2, \TMP3
228 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
236 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
243 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
514 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
516 pshufd $78, \GH, \TMP2
518 pxor \GH, \TMP2 # TMP2 = a1+a0
522 pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
523 pxor \GH, \TMP2
524 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
525 movdqa \TMP2, \TMP3
527 psrldq $8, \TMP2 # right shift TMP2 2 DWs
529 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK
533 movdqa \GH, \TMP2
535 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
538 pslld $31, \TMP2 # packed right shift <<31
541 pxor \TMP3, \TMP2 # xor the shifted versions
542 pxor \TMP4, \TMP2
543 movdqa \TMP2, \TMP5
545 pslldq $12, \TMP2 # left shift TMP2 3 DWs
546 pxor \TMP2, \GH
550 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
555 psrld $1,\TMP2 # packed left shift >>1
558 pxor \TMP3,\TMP2 # xor the shifted versions
559 pxor \TMP4,\TMP2
560 pxor \TMP5, \TMP2
561 pxor \TMP2, \GH
598 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
612 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
628 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
791 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
804 MOVADQ 0(%arg1),\TMP2
813 pxor \TMP2, %xmm\index
853 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
855 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
857 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
860 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
862 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
865 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
918 MOVADQ (%r10),\TMP2
920 aesenc \TMP2, %xmm\index
927 MOVADQ (%r10), \TMP2
928 aesenclast \TMP2, \XMM1
929 aesenclast \TMP2, \XMM2
930 aesenclast \TMP2, \XMM3
931 aesenclast \TMP2, \XMM4
980 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
1027 pshufd $78, \XMM6, \TMP2
1028 pxor \XMM6, \TMP2
1043 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1052 pxor \TMP2, \TMP6
1054 pshufd $78, \XMM7, \TMP2
1055 pxor \XMM7, \TMP2
1073 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1082 pxor \TMP2, \TMP6
1088 pshufd $78, \XMM8, \TMP2
1089 pxor \XMM8, \TMP2
1120 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1140 pxor \TMP6, \TMP2
1141 pxor \TMP1, \TMP2
1142 pxor \XMM5, \TMP2
1143 movdqa \TMP2, \TMP3
1145 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1147 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1151 movdqa \XMM5, \TMP2
1154 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1155 pslld $31, \TMP2 # packed right shift << 31
1158 pxor \TMP3, \TMP2 # xor the shifted versions
1159 pxor \TMP4, \TMP2
1160 movdqa \TMP2, \TMP5
1162 pslldq $12, \TMP2 # left shift T2 3 DWs
1163 pxor \TMP2, \XMM5
1167 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1170 psrld $1, \TMP2 # packed left shift >>1
1173 pxor \TMP3,\TMP2 # xor the shifted versions
1174 pxor \TMP4,\TMP2
1175 pxor \TMP5, \TMP2
1176 pxor \TMP2, \XMM5
1188 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1235 pshufd $78, \XMM6, \TMP2
1236 pxor \XMM6, \TMP2
1251 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1260 pxor \TMP2, \TMP6
1262 pshufd $78, \XMM7, \TMP2
1263 pxor \XMM7, \TMP2
1281 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1290 pxor \TMP2, \TMP6
1296 pshufd $78, \XMM8, \TMP2
1297 pxor \XMM8, \TMP2
1328 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1352 pxor \TMP6, \TMP2
1353 pxor \TMP1, \TMP2
1354 pxor \XMM5, \TMP2
1355 movdqa \TMP2, \TMP3
1357 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1359 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1363 movdqa \XMM5, \TMP2
1366 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1367 pslld $31, \TMP2 # packed right shift << 31
1370 pxor \TMP3, \TMP2 # xor the shifted versions
1371 pxor \TMP4, \TMP2
1372 movdqa \TMP2, \TMP5
1374 pslldq $12, \TMP2 # left shift T2 3 DWs
1375 pxor \TMP2, \XMM5
1379 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1382 psrld $1, \TMP2 # packed left shift >>1
1385 pxor \TMP3,\TMP2 # xor the shifted versions
1386 pxor \TMP4,\TMP2
1387 pxor \TMP5, \TMP2
1388 pxor \TMP2, \XMM5
1395 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1401 pshufd $78, \XMM1, \TMP2
1402 pxor \XMM1, \TMP2
1407 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1409 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
1414 pshufd $78, \XMM2, \TMP2
1415 pxor \XMM2, \TMP2
1420 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1423 pxor \TMP2, \XMM1
1429 pshufd $78, \XMM3, \TMP2
1430 pxor \XMM3, \TMP2
1435 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1438 pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1
1442 pshufd $78, \XMM4, \TMP2
1443 pxor \XMM4, \TMP2
1448 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1451 pxor \XMM1, \TMP2
1452 pxor \TMP6, \TMP2
1453 pxor \XMMDst, \TMP2
1455 movdqa \TMP2, \TMP4
1457 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1459 pxor \TMP2, \TMP6
1462 movdqa \XMMDst, \TMP2
1465 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1466 pslld $31, \TMP2 # packed right shifting << 31
1469 pxor \TMP3, \TMP2 # xor the shifted versions
1470 pxor \TMP4, \TMP2
1471 movdqa \TMP2, \TMP7
1473 pslldq $12, \TMP2 # left shift TMP2 3 DWs
1474 pxor \TMP2, \XMMDst
1477 movdqa \XMMDst, \TMP2
1481 psrld $1, \TMP2 # packed left shift >> 1
1484 pxor \TMP3, \TMP2 # xor the shifted versions
1485 pxor \TMP4, \TMP2
1486 pxor \TMP7, \TMP2
1487 pxor \TMP2, \XMMDst