/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | inst-select-amdgcn.exp.mir | 12 %0:vgpr(s32) = COPY $vgpr0 19 …ic(@llvm.amdgcn.exp), %1:sgpr(s32), %2:sgpr(s32), %0:vgpr(s32), %0:vgpr(s32), %0:vgpr(s32), %0:vgp… 22 …ic(@llvm.amdgcn.exp), %1:sgpr(s32), %2:sgpr(s32), %0:vgpr(s32), %0:vgpr(s32), %0:vgpr(s32), %0:vgp… 24 %5:vgpr(<2 x s16>) = G_BITCAST %0(s32) 28 …trinsic(@llvm.amdgcn.exp.compr), %1:sgpr(s32), %2:sgpr(s32), %5:vgpr(<2 x s16>), %5:vgpr(<2 x s16>… 32 …trinsic(@llvm.amdgcn.exp.compr), %1:sgpr(s32), %2:sgpr(s32), %5:vgpr(<2 x s16>), %5:vgpr(<2 x s16>…
|
D | regbankselect-insert-vector-elt.mir | 32 ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 35 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 36 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) 37 …; CHECK: [[IVEC:%[0-9]+]]:vgpr(<4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY2]](s32), [[COPY3]… 55 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 57 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<4 x s32>) = COPY [[COPY]](<4 x s32>) 58 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) 59 …; CHECK: [[IVEC:%[0-9]+]]:vgpr(<4 x s32>) = G_INSERT_VECTOR_ELT [[COPY2]], [[COPY1]](s32), [[COPY3… 79 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(<4 x s32>) = COPY [[COPY]](<4 x s32>) 80 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) [all …]
|
D | inst-select-implicit-def.mir | 17 %0:vgpr(s64) = COPY $vgpr3_vgpr4 18 %1:vgpr(s32) = G_IMPLICIT_DEF 34 %0:vgpr(s64) = COPY $vgpr3_vgpr4 35 %1:vgpr(s64) = G_IMPLICIT_DEF 47 %0:vgpr(p0) = G_IMPLICIT_DEF 48 %1:vgpr(s32) = G_CONSTANT 4 64 %0:vgpr(p1) = G_IMPLICIT_DEF 65 %1:vgpr(s32) = G_CONSTANT 4 80 %0:vgpr(p3) = G_IMPLICIT_DEF 81 %1:vgpr(s32) = G_CONSTANT 4 [all …]
|
D | inst-select-minnum.mir | 20 %1:vgpr(s32) = COPY $vgpr0 21 %2:vgpr(s32) = COPY $vgpr1 22 %3:vgpr(s64) = COPY $vgpr3_vgpr4 28 %11:vgpr(s64) = COPY $vgpr10_vgpr11 29 %12:vgpr(s64) = COPY $vgpr12_vgpr13 33 %4:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum.f32), %1, %0 37 %5:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum.f32), %0, %1 41 %6:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum.f32), %1, %2 51 %14:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.minnum.f64), %10, %11 55 %15:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.minnum.f64), %11, %10 [all …]
|
D | inst-select-maxnum.mir | 20 %1:vgpr(s32) = COPY $vgpr0 21 %2:vgpr(s32) = COPY $vgpr1 22 %3:vgpr(s64) = COPY $vgpr3_vgpr4 28 %11:vgpr(s64) = COPY $vgpr10_vgpr11 29 %12:vgpr(s64) = COPY $vgpr12_vgpr13 33 %4:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum.f32), %1, %0 37 %5:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum.f32), %0, %1 41 %6:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum.f32), %1, %2 51 %14:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.maxnum.f64), %10, %11 55 %15:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.maxnum.f64), %11, %10 [all …]
|
D | inst-select-amdgcn.cvt.pkrtz.mir | 20 %1:vgpr(s32) = COPY $vgpr0 22 %2:vgpr(s32) = COPY $vgpr1 23 %3:vgpr(s64) = COPY $vgpr3_vgpr4 27 %4:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %0 31 %5:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1 35 %6:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %2 37 %7:vgpr(s32) = G_BITCAST %4 38 %8:vgpr(s32) = G_BITCAST %5 39 %9:vgpr(s32) = G_BITCAST %6
|
D | regbankselect-fadd.mir | 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY2]] 31 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 32 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY1]] 46 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 48 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 49 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY2]] 63 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 64 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 65 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY1]]
|
D | regbankselect-fmul.mir | 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY2]] 31 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 32 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY1]] 46 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 48 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 49 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY2]] 63 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 64 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 65 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY1]]
|
D | regbankselect-minnum.mir | 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY]](s32), [[COPY2]]… 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY]](s32), [[COPY1]]… 45 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 47 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY1]](s32), [[COPY2]… 60 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 61 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 62 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY]](s32), [[COPY1]]…
|
D | regbankselect-maxnum.mir | 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY]](s32), [[COPY2]]… 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY]](s32), [[COPY1]]… 45 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 47 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY1]](s32), [[COPY2]… 60 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 61 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 62 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY]](s32), [[COPY1]]…
|
D | regbankselect-amdgcn.cvt.pkrtz.mir | 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY]](s32),… 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY]](s32),… 45 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 47 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY1]](s32)… 60 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 61 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 62 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY]](s32),…
|
D | regbankselect-mul.mir | 29 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 30 ; CHECK: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[COPY]], [[COPY1]] 44 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 47 ; CHECK: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[COPY]], [[COPY2]] 61 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 62 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 63 ; CHECK: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[COPY]], [[COPY1]]
|
D | regbankselect-add.mir | 29 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 30 ; CHECK: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY1]] 44 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 47 ; CHECK: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY2]] 61 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 62 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 63 ; CHECK: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY1]]
|
D | regbankselect-sub.mir | 29 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 30 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY1]] 44 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 47 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY2]] 61 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 62 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 63 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY1]]
|
D | inst-select-ashr.mir | 22 %2:vgpr(s32) = COPY $vgpr0 23 %3:vgpr(s64) = COPY $vgpr3_vgpr4 52 %11:vgpr(s32) = G_ASHR %2, %10 57 %12:vgpr(s32) = G_ASHR %10, %11 62 %13:vgpr(s32) = G_ASHR %12, %2 67 %14:vgpr(s32) = G_ASHR %4, %13 71 %15:vgpr(s32) = G_ASHR %14, %4 76 %16:vgpr(s32) = G_ASHR %5, %15 80 %17:vgpr(s32) = G_ASHR %16, %5
|
D | regbankselect-and.mir | 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY]], [[COPY1]] 45 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 47 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 48 ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY]], [[COPY2]] 62 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 63 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 64 ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY]], [[COPY1]]
|
D | regbankselect-or.mir | 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[COPY]], [[COPY1]] 45 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 47 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 48 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[COPY]], [[COPY2]] 62 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 63 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 64 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[COPY]], [[COPY1]]
|
D | regbankselect-xor.mir | 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 ; CHECK: [[XOR:%[0-9]+]]:vgpr(s32) = G_XOR [[COPY]], [[COPY1]] 45 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 47 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 48 ; CHECK: [[XOR:%[0-9]+]]:vgpr(s32) = G_XOR [[COPY]], [[COPY2]] 62 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 63 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 64 ; CHECK: [[XOR:%[0-9]+]]:vgpr(s32) = G_XOR [[COPY]], [[COPY1]]
|
D | regbankselect-shl.mir | 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY1]] 45 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 47 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 48 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY2]] 62 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 63 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 64 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY1]]
|
D | inst-select-fadd.mir | 17 %1:vgpr(s32) = COPY $vgpr0 18 %2:vgpr(s32) = COPY $vgpr1 19 %3:vgpr(s64) = COPY $vgpr3_vgpr4 23 %4:vgpr(s32) = G_FADD %1, %0 27 %5:vgpr(s32) = G_FADD %0, %1 31 %6:vgpr(s32) = G_FADD %1, %2
|
D | inst-select-fmul.mir | 17 %1:vgpr(s32) = COPY $vgpr0 18 %2:vgpr(s32) = COPY $vgpr1 19 %3:vgpr(s64) = COPY $vgpr3_vgpr4 23 %4:vgpr(s32) = G_FMUL %1, %0 27 %5:vgpr(s32) = G_FMUL %0, %1 31 %6:vgpr(s32) = G_FMUL %1, %2
|
D | inst-select-constant.mir | 17 %0:vgpr(s64) = COPY $vgpr0_vgpr1 18 %1:vgpr(s64) = COPY $vgpr2_vgpr3 37 %6:vgpr(s32) = G_CONSTANT i32 1 42 %7:vgpr(s64) = G_CONSTANT i64 4294967296 45 %8:vgpr(s32) = G_FCONSTANT float 1.0 50 %9:vgpr(s64) = G_FCONSTANT double 1.0
|
D | inst-select-or.mir | 21 %2:vgpr(s32) = COPY $vgpr0 22 %3:vgpr(s64) = COPY $vgpr3_vgpr4 32 %7:vgpr(s32) = G_OR %2, %6 36 %8:vgpr(s32) = G_OR %6, %7 40 %9:vgpr(s32) = G_OR %8, %2
|
D | regbankselect-amdgcn-exp.mir | 36 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 37 ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 38 ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) 39 ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY3]](s32) 61 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 62 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 63 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 64 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | illegal-sgpr-to-vgpr-copy.ll | 8 %vgpr = call i32 asm sideeffect "; def $0", "=${v1}"() 9 call void asm sideeffect "; use $0", "${s9}"(i32 %vgpr) 16 %vgpr = call <2 x i32> asm sideeffect "; def $0", "=${v[0:1]}"() 17 call void asm sideeffect "; use $0", "${s[10:11]}"(<2 x i32> %vgpr) 24 %vgpr = call <4 x i32> asm sideeffect "; def $0", "=${v[0:3]}"() 25 call void asm sideeffect "; use $0", "${s[8:11]}"(<4 x i32> %vgpr) 32 %vgpr = call <8 x i32> asm sideeffect "; def $0", "=${v[0:7]}"() 33 call void asm sideeffect "; use $0", "${s[8:15]}"(<8 x i32> %vgpr) 40 %vgpr = call <16 x i32> asm sideeffect "; def $0", "=${v[0:15]}"() 41 call void asm sideeffect "; use $0", "${s[16:31]}"(<16 x i32> %vgpr)
|