Lines Matching refs:xmm4
257 vperm v4, $sb1t, v7, v2 # vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
261 vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
264 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A
266 lvx v4, r12, r10 # vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
270 vperm v0, v0, v7, v4 # vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
272 vperm v4, v3, v7, v1 # vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
274 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
282 vperm v4, $invlo, $invlo, v0 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
285 vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
289 vperm v3, $invlo, v7, v4 # vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
298 # vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
300 vperm v4, $sbou, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
303 vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
304 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A
456 lvx v5, 0, $key # vmovdqu (%r9), %xmm4 # round0 key
464 vxor v0, v0, v5 # vpxor %xmm4, %xmm2, %xmm2
475 # vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
477 vperm v4, $sb9u, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
481 vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0
482 # vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
486 vperm v4, $sbdu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
489 vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
490 # vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
494 vperm v4, $sbbu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
497 vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
498 # vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
502 vperm v4, $sbeu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
505 vxor v0, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
514 vperm v4, $invlo, $invlo, v0 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
517 vxor v4, v4, v2 # vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
521 vperm v3, $invlo, v7, v4 # vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
530 # vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
531 vperm v4, $sbou, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
535 vxor v4, v4, v5 # vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
536 vxor v0, v1, v4 # vpxor %xmm4, %xmm1, %xmm0 # 0 = A
1163 vxor v4, v4, v4 # vpxor %xmm4, %xmm4, %xmm4
1216 ## Clobbers %xmm1-%xmm4, %r11.
1221 #vxor v4, v4, v4 # vpxor %xmm4, %xmm4, %xmm4
1222 ?vsldoi v1, $rcon, v9, 15 # vpalignr \$15, %xmm8, %xmm4, %xmm1
1238 ?vsldoi v4, v9, v7, 8 # vpslldq \$8, %xmm7, %xmm4
1243 vxor v7, v7, v4 # vpxor %xmm4, %xmm7, %xmm7
1248 vperm v4, $invlo, v9, v1 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
1251 vxor v4, v4, v2 # vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
1252 vperm v2, $invlo, v9, v4 # vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
1255 vperm v4, v15, v9, v3 # vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
1257 vxor v1, v1, v4 # vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
1313 #vmr v4, v0 # vmovdqa %xmm0, %xmm4 # save xmm0 for later
1318 vxor v4, v0, v26 # vpxor .Lk_s63(%rip), %xmm0, %xmm4
1320 vperm v4, v4, v4, v25 # vpshufb %xmm5, %xmm4, %xmm4
1321 vperm v1, v4, v4, v25 # vpshufb %xmm5, %xmm4, %xmm1
1323 vxor v4, v4, v1 # vpxor %xmm1, %xmm4, %xmm4
1325 vxor v3, v3, v4 # vpxor %xmm4, %xmm3, %xmm3
1342 vsrb v1, v0, v8 # vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
1343 #and v4, v0, v9 # vpand %xmm9, %xmm4, %xmm4 # 4 = lo
1346 vperm v2, v16, v16, v0 # vpshufb %xmm4, %xmm2, %xmm2
1353 vperm v2, v18, v18, v0 # vpshufb %xmm4, %xmm2, %xmm2
1361 vperm v2, v20, v20, v0 # vpshufb %xmm4, %xmm2, %xmm2
1368 vperm v2, v22, v22, v0 # vpshufb %xmm4, %xmm2, %xmm2
1370 # vmovdqa 0x70(%r11), %xmm4
1371 vperm v4, v23, v23, v1 # vpshufb %xmm1, %xmm4, %xmm4
1374 vxor v3, v4, v2 # vpxor %xmm2, %xmm4, %xmm3