/external/llvm/test/CodeGen/AMDGPU/ |
D | wqm.ll | 40 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 62 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 122 ;CHECK-NEXT: s_and_b64 exec, exec, [[ORIG]] 123 ;CHECK-NEXT: s_and_b64 [[SAVED]], exec, [[SAVED]] 158 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 162 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 208 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 284 ;CHECK: s_and_b64 exec, exec, [[ORIG]] 323 ; CHECK: s_and_b64 exec, exec, [[ORIG]] 346 ; CHECK: s_and_b64 exec, exec, [[ORIG]]
|
D | smrd-vccz-bug.ll | 8 ; GCN: s_and_b64 vcc, exec, [[MASK]] 32 ; GCN: s_and_b64 vcc, exec, vcc
|
D | uniform-cfg.ll | 35 ; SI-DAG: s_and_b64 vcc, exec, [[COND]] 92 ; SI-DAG: s_and_b64 vcc, exec, [[COND]] 123 ; SI: s_and_b64 vcc, exec, [[COND]] 148 ; SI: s_and_b64 vcc, exec, [[COND]] 257 ; SI: s_and_b64 vcc, exec, [[MASK]] 288 ; SI: s_and_b64 vcc, exec, vcc
|
D | and.ll | 164 ; SI: s_and_b64 193 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}} 369 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0 384 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0 399 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5 414 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5 455 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0 470 ; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
|
D | i1-copy-implicit-def.ll | 7 ; SI-NEXT: s_and_b64 vcc, exec
|
D | partially-dead-super-register-immediate.ll | 4 ; s_and_b64. This is split into 2 x v_and_i32, part of the immediate
|
D | uniform-loop-inside-nonuniform.ll | 10 ; CHECK: s_and_b64 vcc, exec, vcc
|
D | valu-i1.ll | 83 ; SI-DAG: s_and_b64 vcc, exec, vcc 131 ; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]]
|
D | fceil64.ll | 19 ; SI-DAG: s_and_b64
|
D | cf-loop-on-constant.ll | 100 ; GCN: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, exec, vcc
|
D | ftrunc.f64.ll | 31 ; SI-DAG: s_and_b64
|
D | skip-if-dead.ll | 217 ; CHECK-NEXT: s_and_b64 vcc, exec, vcc
|
D | llvm.amdgcn.div.fmas.ll | 114 ; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]]
|
D | setcc.ll | 396 ; SI: s_and_b64 s[2:3], [[A]], [[B]]
|
D | salu-to-valu.ll | 439 ; GCN: s_and_b64 vcc, exec, vcc
|
/external/mesa3d/src/amd/compiler/tests/ |
D | test_optimizer.cpp | 194 writeout(3, bld.sop2(aco_opcode::s_and_b64, bld.def(bld.lm), bld.def(s1, scc), 200 writeout(4, bld.sop2(aco_opcode::s_and_b64, bld.def(bld.lm), bld.def(s1, scc), 206 writeout(5, bld.sop2(aco_opcode::s_and_b64, bld.def(bld.lm), bld.def(s1, scc),
|
/external/llvm/test/MC/AMDGPU/ |
D | sop2.s | 47 s_and_b64 s[2:3], s[4:5], s[6:7] label
|
/external/llvm/test/MC/Disassembler/AMDGPU/ |
D | sop2_vi.txt | 6 # VI: s_and_b64 s[2:3], s[4:5], s[6:7] ; encoding: [0x04,0x06,0x82,0x86]
|
/external/mesa3d/src/amd/compiler/ |
D | aco_optimizer.cpp | 1419 case aco_opcode::s_and_b64: in label_instruction() 2134 case aco_opcode::s_and_b64: in combine_salu_not_bitwise() 2158 case aco_opcode::s_and_b64: in combine_salu_not_bitwise() 2206 case aco_opcode::s_and_b64: in combine_salu_n2() 2890 instr->opcode == aco_opcode::s_and_b64 || instr->opcode == aco_opcode::s_or_b64) { in combine_instruction() 2916 case aco_opcode::s_and_b64: in to_uniform_bool_instr()
|
D | aco_instruction_selection.cpp | 1393 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true); in visit_alu_instr() 2046 …Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), tmp0, tmp… in visit_alu_instr() 3142 …aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand((uint64_t)… in emit_load()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SOPInstructions.td | 459 def S_AND_B64 : SOP2_64 <"s_and_b64",
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstructions.td | 243 defm S_AND_B64 : SOP2_64 <sop2<0x0f, 0x0d>, "s_and_b64",
|
/external/mesa3d/docs/relnotes/ |
D | 20.0.0.rst | 725 - aco: use s_and_b64 exec to reduce uniform booleans to one bit
|