/external/llvm/test/CodeGen/X86/ |
D | vector-shift-ashr-256.ll | 150 ; AVX1-NEXT: vpsraw $8, %xmm4, %xmm5 152 ; AVX1-NEXT: vpsraw $4, %xmm2, %xmm4 154 ; AVX1-NEXT: vpsraw $2, %xmm2, %xmm4 157 ; AVX1-NEXT: vpsraw $1, %xmm2, %xmm4 164 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm4 166 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 168 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 171 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 234 ; AVX1-NEXT: vpsraw $4, %xmm5, %xmm6 236 ; AVX1-NEXT: vpsraw $2, %xmm5, %xmm6 [all …]
|
D | vector-shift-ashr-512.ll | 64 ; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6 66 ; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6 69 ; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6 75 ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5 77 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5 80 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5 88 ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 90 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 93 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 99 ; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4 [all …]
|
D | vec_sdiv_to_shift.ll | 19 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm1 22 ; AVX-NEXT: vpsraw $5, %xmm0, %xmm0 42 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm1 45 ; AVX-NEXT: vpsraw $5, %xmm0, %xmm0 213 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1 216 ; AVX1-NEXT: vpsraw $2, %xmm1, %xmm1 218 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2 221 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm0 227 ; AVX2-NEXT: vpsraw $15, %ymm0, %ymm1 230 ; AVX2-NEXT: vpsraw $2, %ymm0, %ymm0
|
D | vector-shift-ashr-128.ll | 295 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 297 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 299 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 302 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 471 ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 473 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 476 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 482 ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 484 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 487 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 [all …]
|
D | lower-vec-shift.ll | 88 ; AVX: vpsraw 89 ; AVX-NEXT: vpsraw 91 ; AVX2: vpsraw 92 ; AVX2-NEXT: vpsraw
|
D | avx2-vector-shifts.ll | 119 ; CHECK-NEXT: vpsraw $1, %ymm0, %ymm0 129 ; CHECK-NEXT: vpsraw $15, %ymm0, %ymm0 365 ; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4 367 ; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4 370 ; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4 376 ; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3 378 ; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3 381 ; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3
|
D | avx-shift.ll | 94 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm1 96 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm0 199 ; CHECK-NEXT: vpsraw $8, %xmm1, %xmm1 202 ; CHECK-NEXT: vpsraw $8, %xmm0, %xmm0
|
D | pr17764.ll | 9 ; CHECK-NEXT: vpsraw $15, %ymm0, %ymm0
|
D | vector-idiv-sdiv-256.ll | 141 ; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1 145 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0 154 ; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0 410 ; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 417 ; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 428 ; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
|
D | setcc-lowering.ll | 23 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
|
D | avx512-ext.ll | 10 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 31 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 54 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 76 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 107 ; KNL-NEXT: vpsraw $15, %ymm1, %ymm1 137 ; KNL-NEXT: vpsraw $15, %ymm1, %ymm1 159 ; KNL-NEXT: vpsraw $15, %ymm3, %ymm3 164 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 188 ; KNL-NEXT: vpsraw $15, %ymm3, %ymm3 193 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 [all …]
|
D | avx2-shift.ll | 123 ; CHECK: vpsraw 252 ; CHECK: vpsraw
|
D | lower-vec-shift-2.ll | 132 ; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
D | vector-pcmp.ll | 182 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1 184 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
|
D | viabs.ll | 242 ; AVX1-NEXT: vpsraw $15, %xmm1, %xmm2 244 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm3
|
D | avx512bw-intrinsics.ll | 2582 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} 2583 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm3 {%k1} {z} 2584 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0 2592 ; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} 2593 ; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm3 {%k1} {z} 2594 ; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm0 2612 ; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} 2613 ; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm2 {%k1} {z} 2614 ; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0 2622 ; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} [all …]
|
D | vector-idiv-sdiv-128.ll | 168 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm0 474 ; AVX-NEXT: vpsraw $1, %xmm1, %xmm1
|
/external/llvm/test/MC/X86/ |
D | x86-64-avx512bw.s | 1861 vpsraw %xmm21, %zmm27, %zmm28 1865 vpsraw %xmm21, %zmm27, %zmm28 {%k4} 1869 vpsraw %xmm21, %zmm27, %zmm28 {%k4} {z} 1873 vpsraw (%rcx), %zmm27, %zmm28 1877 vpsraw 291(%rax,%r14,8), %zmm27, %zmm28 1881 vpsraw 2032(%rdx), %zmm27, %zmm28 1885 vpsraw 2048(%rdx), %zmm27, %zmm28 1889 vpsraw -2048(%rdx), %zmm27, %zmm28 1893 vpsraw -2064(%rdx), %zmm27, %zmm28 1973 vpsraw $171, %zmm29, %zmm28 [all …]
|
D | x86-64-avx512bw_vl.s | 2005 vpsraw %xmm28, %xmm28, %xmm17 2009 vpsraw %xmm28, %xmm28, %xmm17 {%k1} 2013 vpsraw %xmm28, %xmm28, %xmm17 {%k1} {z} 2017 vpsraw (%rcx), %xmm28, %xmm17 2021 vpsraw 291(%rax,%r14,8), %xmm28, %xmm17 2025 vpsraw 2032(%rdx), %xmm28, %xmm17 2029 vpsraw 2048(%rdx), %xmm28, %xmm17 2033 vpsraw -2048(%rdx), %xmm28, %xmm17 2037 vpsraw -2064(%rdx), %xmm28, %xmm17 2041 vpsraw %xmm19, %ymm26, %ymm18 [all …]
|
D | x86-32-avx.s | 1196 vpsraw %xmm1, %xmm2, %xmm3 1200 vpsraw (%eax), %xmm2, %xmm3 1256 vpsraw $10, %xmm2, %xmm3
|
D | x86_64-avx-encoding.s | 1909 vpsraw %xmm11, %xmm12, %xmm13 1913 vpsraw (%rax), %xmm12, %xmm13 1969 vpsraw $10, %xmm12, %xmm13
|
/external/libjpeg-turbo/simd/x86_64/ |
D | jdcolext-avx2.asm | 129 vpsraw ymm4, ymm4, 1 ; ymm4=(CbE * -FIX(0.22800)) 130 vpsraw ymm5, ymm5, 1 ; ymm5=(CbO * -FIX(0.22800)) 133 vpsraw ymm0, ymm0, 1 ; ymm0=(CrE * FIX(0.40200)) 134 vpsraw ymm1, ymm1, 1 ; ymm1=(CrO * FIX(0.40200))
|
/external/libjpeg-turbo/simd/i386/ |
D | jdcolext-avx2.asm | 141 vpsraw ymm4, ymm4, 1 ; ymm4=(CbE * -FIX(0.22800)) 142 vpsraw ymm5, ymm5, 1 ; ymm5=(CbO * -FIX(0.22800)) 145 vpsraw ymm0, ymm0, 1 ; ymm0=(CrE * FIX(0.40200)) 146 vpsraw ymm1, ymm1, 1 ; ymm1=(CrO * FIX(0.40200))
|
/external/capstone/suite/MC/X86/ |
D | x86-32-avx.s.cs | 300 0xc5,0xe9,0xe1,0xd9 = vpsraw %xmm1, %xmm2, %xmm3 301 0xc5,0xe9,0xe1,0x18 = vpsraw (%eax), %xmm2, %xmm3 315 0xc5,0xe1,0x71,0xe2,0x0a = vpsraw $0xa, %xmm2, %xmm3
|
D | x86_64-avx-encoding.s.cs | 478 0xc4,0x41,0x19,0xe1,0xeb = vpsraw %xmm11, %xmm12, %xmm13 479 0xc5,0x19,0xe1,0x28 = vpsraw (%rax), %xmm12, %xmm13 493 0xc4,0xc1,0x11,0x71,0xe4,0x0a = vpsraw $10, %xmm12, %xmm13
|