/external/llvm/test/CodeGen/AMDGPU/ |
D | operand-folding.ll | 59 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 60 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 61 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 62 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 81 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} 92 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 93 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 94 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 95 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
|
D | xor.ll | 10 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 11 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 27 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 28 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 29 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 30 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 63 ; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]] 75 ; SI: v_xor_b32_e32 111 ; SI: v_xor_b32_e32 112 ; SI: v_xor_b32_e32
|
D | bfi_int.ll | 42 ; SI: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
|
D | sint_to_fp.i64.ll | 27 ; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | operand-folding.ll | 59 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 60 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 61 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 62 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 81 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} 92 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 93 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 94 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 95 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
|
D | xor.ll | 10 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 11 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 27 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 28 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 29 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 30 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 63 ; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[B]], [[A]] 75 ; SI: v_xor_b32_e32 111 ; SI: v_xor_b32_e32 112 ; SI: v_xor_b32_e32 [all …]
|
D | move-to-valu-worklist.ll | 10 ; GCN: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 11 ; GCN-NEXT: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
D | select-fabs-fneg-extract-legacy.ll | 12 ; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]] 30 ; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]]
|
D | fneg.f16.ll | 18 ; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[VAL]] 82 ; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80008000, [[VAL]] 106 ; CI: v_xor_b32_e32 [[FNEG:v[0-9]+]], 0x80008000, [[VAL]] 153 ; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80008000, [[VAL]]
|
D | bfi_int.ll | 43 ; GCN: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}} 105 ; GCN-DAG: v_xor_b32_e32 v1, v1, v3 106 ; GCN-DAG: v_xor_b32_e32 v0, v0, v2
|
D | fneg-combines.ll | 13 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]] 35 ; GCN-DAG: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]] 58 ; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]] 86 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, 110 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]] 134 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]] 159 ; GCN-SAFE: v_xor_b32_e32 [[NEG_A:v[0-9]+]], [[A]], [[SIGNBIT]] 161 ; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], [[ADD]], [[SIGNBIT]] 163 ; GCN-NSZ-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]] 189 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]] [all …]
|
D | sint_to_fp.i64.ll | 27 ; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}} 62 ; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
|
D | enable-no-signed-zeros-fp-math.ll | 11 ; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
|
D | rsq.ll | 82 ; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]] 113 ; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
|
D | fsub.ll | 71 ; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]] 113 ; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
|
D | extend-bit-ops-i16.ll | 34 ; GCN: v_xor_b32_e32 [[VAL16:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
|
D | anyext.ll | 23 ; GFX89: v_xor_b32_e32 [[XOR:v[0-9]+]], -1, [[ADD]]
|
D | fneg.ll | 64 ; GCN: v_xor_b32_e32 [[RES:v[0-9]+]], [[NEG_VALUE]], [[SIGNBIT]]
|
D | select-fabs-fneg-extract.ll | 292 ; GCN-DAG: v_xor_b32_e32 [[NEG_X:v[0-9]+]], 0x80000000, [[X]] 342 ; GCN: v_xor_b32_e32 [[X_NEG:v[0-9]+]], 0x80000000, [[X]] 563 ; GCN-DAG: v_xor_b32_e32 [[X_NEG:v[0-9]+]], 0x80000000, [[X]] 586 ; GCN-DAG: v_xor_b32_e32 [[Y_NEG:v[0-9]+]], 0x80000000, [[Y]] 819 ; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]]
|
D | fpext.f16.ll | 133 ; GCN-DAG: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[A]]
|
D | insert_vector_elt.ll | 211 ; VI: v_xor_b32_e32 [[NOT_MASK:v[0-9]+]], -1, [[MASK]]
|
D | fcanonicalize-elimination.ll | 241 ; GCN: v_xor_b32_e32 [[V:v[0-9]+]], 0x80000000, v{{[0-9]+}}
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AMDGPU/ |
D | vop2.s | 243 v_xor_b32_e32 v1, v2, v3 label
|
/external/llvm/test/MC/Disassembler/AMDGPU/ |
D | vop2_vi.txt | 72 # VI: v_xor_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x2a]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/Disassembler/AMDGPU/ |
D | vop2_vi.txt | 72 # VI: v_xor_b32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x2a]
|