Lines Matching refs:XMM8

1001 … REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
1080 # XMM8 has the combined result here
1082 vmovdqa \XMM8, TMP1(%rsp)
1083 vmovdqa \XMM8, \T3
1119 vmovdqa \CTR, \XMM8
1120 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1130 vpxor \T_key, \XMM8, \XMM8
1143 vaesenc \T_key, \XMM8, \XMM8
1156 vaesenclast \T_key, \XMM8, \XMM8
1208 vpxor \T1, \XMM8, \XMM8
1209 vmovdqu \XMM8, 16*7(arg3 , %r11)
1211 vmovdqa \T1, \XMM8
1224 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1236 …_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
1245 vmovdqa \XMM8, TMP8(%rsp)
1255 vpaddd ONE(%rip), \XMM7, \XMM8
1256 vmovdqa \XMM8, \CTR
1265 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1274 vpaddd ONEf(%rip), \XMM7, \XMM8
1275 vmovdqa \XMM8, \CTR
1289 vpxor \T1, \XMM8, \XMM8
1305 vaesenc \T1, \XMM8, \XMM8
1315 vaesenc \T1, \XMM8, \XMM8
1338 vaesenc \T1, \XMM8, \XMM8
1361 vaesenc \T1, \XMM8, \XMM8
1386 vaesenc \T1, \XMM8, \XMM8
1409 vaesenc \T1, \XMM8, \XMM8
1433 vaesenc \T1, \XMM8, \XMM8
1457 vaesenc \T1, \XMM8, \XMM8
1482 vaesenc \T5, \XMM8, \XMM8
1513 vaesenc \T5, \XMM8, \XMM8
1569 vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer
1592 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
1603 .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 argument
1709 vpshufd $0b01001110, \XMM8, \T2
1710 vpxor \XMM8, \T2, \T2
1712 vpclmulqdq $0x11, \T5, \XMM8, \T4
1715 vpclmulqdq $0x00, \T5, \XMM8, \T4
1949 … num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
2030 # XMM8 has the combined result here
2032 vmovdqa \XMM8, TMP1(%rsp)
2033 vmovdqa \XMM8, \T3
2069 vmovdqa \CTR, \XMM8
2070 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2080 vpxor \T_key, \XMM8, \XMM8
2093 vaesenc \T_key, \XMM8, \XMM8
2107 vaesenclast \T_key, \XMM8, \XMM8
2159 vpxor \T1, \XMM8, \XMM8
2160 vmovdqu \XMM8, 16*7(arg3 , %r11)
2162 vmovdqa \T1, \XMM8
2176 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2191 …PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
2200 vmovdqa \XMM8, TMP8(%rsp)
2210 vpaddd ONE(%rip), \XMM7, \XMM8
2211 vmovdqa \XMM8, \CTR
2220 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2229 vpaddd ONEf(%rip), \XMM7, \XMM8
2230 vmovdqa \XMM8, \CTR
2244 vpxor \T1, \XMM8, \XMM8
2260 vaesenc \T1, \XMM8, \XMM8
2270 vaesenc \T1, \XMM8, \XMM8
2290 vaesenc \T1, \XMM8, \XMM8
2314 vaesenc \T1, \XMM8, \XMM8
2340 vaesenc \T1, \XMM8, \XMM8
2364 vaesenc \T1, \XMM8, \XMM8
2389 vaesenc \T1, \XMM8, \XMM8
2413 vaesenc \T1, \XMM8, \XMM8
2440 vaesenc \T5, \XMM8, \XMM8
2470 vaesenc \T5, \XMM8, \XMM8
2520 vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer
2542 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
2553 .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 argument
2680 vpshufd $0b01001110, \XMM8, \T2
2682 vpxor \XMM8, \T2, \T2
2685 vpclmulqdq $0x11, \T5, \XMM8, \T4
2688 vpclmulqdq $0x00, \T5, \XMM8, \T4