/external/llvm/test/CodeGen/X86/ |
D | vector-half-conversions.ll | 14 ; ALL-NEXT: vmovd %eax, %xmm0 35 ; ALL-NEXT: vmovd %edx, %xmm0 38 ; ALL-NEXT: vmovd %ecx, %xmm1 41 ; ALL-NEXT: vmovd %eax, %xmm2 43 ; ALL-NEXT: vmovd %esi, %xmm3 66 ; ALL-NEXT: vmovd %edx, %xmm0 69 ; ALL-NEXT: vmovd %ecx, %xmm1 72 ; ALL-NEXT: vmovd %eax, %xmm2 74 ; ALL-NEXT: vmovd %esi, %xmm3 106 ; AVX1-NEXT: vmovd %esi, %xmm0 [all …]
|
D | promote-vec3.ll | 48 ; AVX_ANY-NEXT: vmovd %xmm0, %eax 58 ; AVX_X86_64-NEXT: vmovd %edi, %xmm0 62 ; AVX_X86_64-NEXT: vmovd %xmm0, %eax 116 ; AVX_ANY-NEXT: vmovd %xmm0, %eax 126 ; AVX_X86_64-NEXT: vmovd %edi, %xmm0 131 ; AVX_X86_64-NEXT: vmovd %xmm0, %eax
|
D | fast-isel-float-half-convertion.ll | 8 ; CHECK-NEXT: vmovd %xmm0, %eax 18 ; CHECK-NEXT: vmovd %eax, %xmm0
|
D | memset-nonzero.ll | 222 ; AVX1-NEXT: vmovd %esi, %xmm0 230 ; AVX2-NEXT: vmovd %esi, %xmm0 263 ; AVX1-NEXT: vmovd %esi, %xmm0 273 ; AVX2-NEXT: vmovd %esi, %xmm0 313 ; AVX1-NEXT: vmovd %esi, %xmm0 324 ; AVX2-NEXT: vmovd %esi, %xmm0 377 ; AVX1-NEXT: vmovd %esi, %xmm0 390 ; AVX2-NEXT: vmovd %esi, %xmm0 435 ; AVX1-NEXT: vmovd %esi, %xmm0 452 ; AVX2-NEXT: vmovd %esi, %xmm0
|
D | f16c-intrinsics-fast-isel.ll | 12 ; X32-NEXT: vmovd %eax, %xmm0 22 ; X64-NEXT: vmovd %eax, %xmm0 45 ; X32-NEXT: vmovd %xmm0, %eax 54 ; X64-NEXT: vmovd %xmm0, %eax
|
D | pr15267.ll | 10 ; CHECK-NEXT: vmovd %eax, %xmm0 29 ; CHECK-NEXT: vmovd %eax, %xmm0 52 ; CHECK-NEXT: vmovd %edx, %xmm0 80 ; CHECK-NEXT: vmovd %edx, %xmm0
|
D | vector-shuffle-variable-256.ll | 240 ; AVX2-NEXT: vmovd %edi, %xmm1 242 ; AVX2-NEXT: vmovd %esi, %xmm2 244 ; AVX2-NEXT: vmovd %edx, %xmm3 246 ; AVX2-NEXT: vmovd %ecx, %xmm4 248 ; AVX2-NEXT: vmovd %r8d, %xmm5 250 ; AVX2-NEXT: vmovd %r9d, %xmm6 252 ; AVX2-NEXT: vmovd {{.*#+}} xmm7 = mem[0],zero,zero,zero 254 ; AVX2-NEXT: vmovd {{.*#+}} xmm8 = mem[0],zero,zero,zero 336 ; AVX1-NEXT: vmovd %eax, %xmm0 360 ; AVX1-NEXT: vmovd %eax, %xmm1 [all …]
|
D | avg.ll | 16 ; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 17 ; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 19 ; AVX2-NEXT: vmovd %xmm0, (%rax) 24 ; AVX512BW-NEXT: vmovd (%rdi), %xmm0 25 ; AVX512BW-NEXT: vmovd (%rsi), %xmm1 27 ; AVX512BW-NEXT: vmovd %xmm0, (%rax) 268 ; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 269 ; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 271 ; AVX2-NEXT: vmovd %xmm0, (%rax) 276 ; AVX512BW-NEXT: vmovd (%rdi), %xmm0 [all …]
|
D | half.ll | 90 ; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]] 112 ; CHECK-F16C-NEXT: vmovd [[REG0]], %eax 140 ; CHECK-F16C-NEXT: vmovd [[REG0]], [[REG1:%[a-z0-9]+]] 185 ; CHECK-F16C-NEXT: vmovd [[REG4]], %eax 300 ; CHECK-F16C-NEXT: vmovd %eax, %xmm0
|
D | psubus.ll | 69 ; AVX1-NEXT: vmovd %esi, %xmm0 79 ; AVX2-NEXT: vmovd %esi, %xmm0 172 ; AVX1-NEXT: vmovd %esi, %xmm0 182 ; AVX2-NEXT: vmovd %esi, %xmm0 314 ; AVX1-NEXT: vmovd %esi, %xmm2 332 ; AVX2-NEXT: vmovd %esi, %xmm0 478 ; AVX1-NEXT: vmovd %esi, %xmm1 497 ; AVX2-NEXT: vmovd %esi, %xmm0
|
D | extractelement-index.ll | 141 ; AVX-NEXT: vmovd %xmm0, %eax 173 ; AVX-NEXT: vmovd %xmm0, %eax 215 ; AVX-NEXT: vmovd %xmm0, %eax 250 ; AVX-NEXT: vmovd %xmm0, %eax 266 ; AVX-NEXT: vmovd %xmm0, %eax 531 ; AVX2-NEXT: vmovd %edi, %xmm1 533 ; AVX2-NEXT: vmovd %xmm0, %eax
|
D | vec_sdiv_to_shift.ll | 83 ; AVX-NEXT: vmovd %xmm0, %eax 86 ; AVX-NEXT: vmovd %eax, %xmm1 268 ; AVX-NEXT: vmovd %xmm0, %edx 273 ; AVX-NEXT: vmovd %esi, %xmm1
|
D | 2012-1-10-buildvector.ll | 21 ; CHECK-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
D | merge-consecutive-loads-512.ll | 544 ; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 550 ; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 556 ; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 614 ; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 620 ; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 626 ; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 682 ; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 684 ; ALL-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 694 ; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 696 ; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
D | merge-consecutive-loads-256.ll | 391 ; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 398 ; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 405 ; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 413 ; X32-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 476 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 482 ; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 598 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 604 ; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 621 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 627 ; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
D | vector-shift-shl-256.ll | 351 ; AVX1-NEXT: vmovd %xmm1, %eax 353 ; AVX1-NEXT: vmovd %eax, %xmm1 361 ; AVX2-NEXT: vmovd %xmm1, %eax 363 ; AVX2-NEXT: vmovd %eax, %xmm1 370 ; XOPAVX1-NEXT: vmovd %xmm1, %eax 372 ; XOPAVX1-NEXT: vmovd %eax, %xmm1 380 ; XOPAVX2-NEXT: vmovd %xmm1, %eax 382 ; XOPAVX2-NEXT: vmovd %eax, %xmm1 388 ; AVX512-NEXT: vmovd %xmm1, %eax 390 ; AVX512-NEXT: vmovd %eax, %xmm1
|
D | scalar-int-to-fp.ll | 112 ; AVX512_32: vmovd %eax, %xmm0 152 ; AVX512_32: vmovd %eax, %xmm0
|
D | avx-basic.ll | 79 ; CHECK-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 109 ; CHECK-NEXT: vmovd %eax, %xmm0
|
D | vector-shift-lshr-256.ll | 387 ; AVX1-NEXT: vmovd %xmm1, %eax 389 ; AVX1-NEXT: vmovd %eax, %xmm1 397 ; AVX2-NEXT: vmovd %xmm1, %eax 399 ; AVX2-NEXT: vmovd %eax, %xmm1 406 ; XOPAVX1-NEXT: vmovd %xmm1, %eax 408 ; XOPAVX1-NEXT: vmovd %eax, %xmm1 416 ; XOPAVX2-NEXT: vmovd %xmm1, %eax 418 ; XOPAVX2-NEXT: vmovd %eax, %xmm1 424 ; AVX512-NEXT: vmovd %xmm1, %eax 426 ; AVX512-NEXT: vmovd %eax, %xmm1
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | compare_win.cc | 97 vmovd eax, xmm0 in SumSquareError_AVX2() 186 vmovd xmm0, [esp + 12] // seed in HashDjb2_AVX2() 210 vmovd eax, xmm0 // return hash in HashDjb2_AVX2()
|
D | row_gcc.cc | 2019 MEMOPREG(vmovd, 0x00, [u_buf], [v_buf], 1, xmm1) \ 3822 MEMOPREG(vmovd,0x00,4,3,4,xmm0) // vmovd 0x0(%4,%3,4),%%xmm0 in ARGBUnattenuateRow_AVX2() 3824 MEMOPREG(vmovd,0x00,4,3,4,xmm1) // vmovd 0x0(%4,%3,4),%%xmm1 in ARGBUnattenuateRow_AVX2() 3827 MEMOPREG(vmovd,0x00,4,3,4,xmm2) // vmovd 0x0(%4,%3,4),%%xmm2 in ARGBUnattenuateRow_AVX2() 3829 MEMOPREG(vmovd,0x00,4,3,4,xmm3) // vmovd 0x0(%4,%3,4),%%xmm3 in ARGBUnattenuateRow_AVX2() 3832 MEMOPREG(vmovd,0x00,4,3,4,xmm0) // vmovd 0x0(%4,%3,4),%%xmm0 in ARGBUnattenuateRow_AVX2() 3834 MEMOPREG(vmovd,0x00,4,3,4,xmm1) // vmovd 0x0(%4,%3,4),%%xmm1 in ARGBUnattenuateRow_AVX2() 3837 MEMOPREG(vmovd,0x00,4,3,4,xmm2) // vmovd 0x0(%4,%3,4),%%xmm2 in ARGBUnattenuateRow_AVX2() 3839 MEMOPREG(vmovd,0x00,4,3,4,xmm3) // vmovd 0x0(%4,%3,4),%%xmm3 in ARGBUnattenuateRow_AVX2()
|
/external/libyuv/files/source/ |
D | compare_win.cc | 97 vmovd eax, xmm0 in SumSquareError_AVX2() 186 vmovd xmm0, [esp + 12] // seed in HashDjb2_AVX2() 210 vmovd eax, xmm0 // return hash in HashDjb2_AVX2()
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | avx-bitcast.ll | 4 ; CHECK-NEXT: vmovd %xmm
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | widen-vmovs.ll | 8 ; The vmovs is first widened to a vmovd, and then converted to a vorr because of the v2f32 vadd.f32.
|
/external/llvm/test/CodeGen/ARM/ |
D | widen-vmovs.ll | 8 ; The vmovs is first widened to a vmovd, and then converted to a vorr because of the v2f32 vadd.f32.
|