/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/llvm-ir/ |
D | cvt.ll | 10 ; MIPS32: cvt.d.s {{.*}} # <MCInst #{{[0-9]+}} CVT_D32_S 11 ; MIPS32FP64: cvt.d.s {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_S 12 ; MM: cvt.d.s {{.*}} # <MCInst #{{[0-9]+}} CVT_D32_S_MM 13 ; MMFP64: cvt.d.s {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_S_MM 14 ; MMR6: cvt.d.s {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_S_MM 20 ; MIPS32: cvt.d.w {{.*}} # <MCInst #{{[0-9]+}} CVT_D32_W 21 ; MIPS32FP64: cvt.d.w {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_W 22 ; MM: cvt.d.w {{.*}} # <MCInst #{{[0-9]+}} CVT_D32_W_MM 23 ; MMFP64: cvt.d.w {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_W_MM 24 ; MMR6: cvt.d.w {{.*}} # <MCInst #{{[0-9]+}} CVT_D64_W_MM [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/ |
D | convert-fp.ll | 5 ; CHECK: cvt.rzi.u16.f32 %rs{{[0-9]+}}, %f{{[0-9]+}}; 11 ; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}}; 17 ; CHECK: cvt.rzi.u32.f32 %r{{[0-9]+}}, %f{{[0-9]+}}; 23 ; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}}; 29 ; CHECK: cvt.rzi.u64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}}; 35 ; CHECK: cvt.rzi.u64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}}; 42 ; CHECK: cvt.rn.f32.u16 %f{{[0-9]+}}, %rs{{[0-9]+}}; 48 ; CHECK: cvt.rn.f32.u32 %f{{[0-9]+}}, %r{{[0-9]+}}; 54 ; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rd{{[0-9]+}}; 60 ; CHECK: cvt.rn.f64.u16 %fd{{[0-9]+}}, %rs{{[0-9]+}}; [all …]
|
D | f16x2-instructions.ll | 67 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]] 68 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]] 69 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]] 70 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]] 73 ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]] 74 ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]] 93 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]] 94 ; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]] 97 ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]] 98 ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]] [all …]
|
D | fp16.ll | 9 ; CHECK: cvt.f32.f16 12 %cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone 13 store float %cvt, float addrspace(1)* %out, align 4 19 ; CHECK: cvt.f64.f16 22 %cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone 23 store double %cvt, double addrspace(1)* %out, align 4 29 ; CHECK: cvt.rn.f16.f32 32 %cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone 33 store i16 %cvt, i16 addrspace(1)* %out, align 4 39 ; CHECK: cvt.rn.f16.f64 [all …]
|
D | addrspacecast.ll | 8 ; ALL-NOT: cvt.u64.u32 19 ; PTRCONV: cvt.u64.u32 20 ; NOPTRCONV-NOT: cvt.u64.u32 31 ; PTRCONV: cvt.u64.u32 32 ; NOPTRCONV-NOT: cvt.u64.u32 43 ; PTRCONV: cvt.u64.u32 44 ; NOPTRCONV-NOT: cvt.u64.u32 55 ; ALL-NOT: cvt.u64.u32 67 ; PTRCONV: cvt.u32.u64 68 ; NOPTRCONV-NOT: cvt.u32.u64 [all …]
|
D | f16-instructions.ll | 29 ; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]] 30 ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] 32 ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]] 44 ; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]] 45 ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] 47 ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]] 60 ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] 62 ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]] 74 ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] 76 ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]] [all …]
|
D | sext-in-reg.ll | 7 ; CHECK: cvt.s64.s8 8 ; CHECK: cvt.s64.s8 26 ; CHECK: cvt.s64.s32 27 ; CHECK: cvt.s64.s32 44 ; CHECK: cvt.s64.s16 45 ; CHECK: cvt.s64.s16 62 ; CHECK: cvt.s32.s8 63 ; CHECK: cvt.s32.s8 80 ; CHECK: cvt.s32.s16 81 ; CHECK: cvt.s32.s16 [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | convert-fp.ll | 6 ; CHECK: cvt.rzi.u16.f32 %rs{{[0-9]+}}, %f{{[0-9]+}}; 13 ; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}}; 20 ; CHECK: cvt.rzi.u32.f32 %r{{[0-9]+}}, %f{{[0-9]+}}; 27 ; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}}; 35 ; CHECK: cvt.rzi.u64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}}; 42 ; CHECK: cvt.rzi.u64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}}; 49 ; CHECK: cvt.rn.f32.u16 %f{{[0-9]+}}, %rs{{[0-9]+}}; 56 ; CHECK: cvt.rn.f32.u32 %f{{[0-9]+}}, %r{{[0-9]+}}; 63 ; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rd{{[0-9]+}}; 70 ; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fd{{[0-9]+}}; [all …]
|
D | fp16.ll | 9 ; CHECK: cvt.f32.f16 12 %cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone 13 store float %cvt, float addrspace(1)* %out, align 4 19 ; CHECK: cvt.f64.f16 22 %cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone 23 store double %cvt, double addrspace(1)* %out, align 4 29 ; CHECK: cvt.rn.f16.f32 32 %cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone 33 store i16 %cvt, i16 addrspace(1)* %out, align 4 39 ; CHECK: cvt.rn.f16.f64 [all …]
|
D | sext-in-reg.ll | 7 ; CHECK: cvt.s64.s8 8 ; CHECK: cvt.s64.s8 26 ; CHECK: cvt.s64.s32 27 ; CHECK: cvt.s64.s32 44 ; CHECK: cvt.s64.s16 45 ; CHECK: cvt.s64.s16 62 ; CHECK: cvt.s32.s8 63 ; CHECK: cvt.s32.s8 80 ; CHECK: cvt.s32.s16 81 ; CHECK: cvt.s32.s16 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | cvt_f32_ubyte.ll | 12 %cvt = uitofp i8 %load to float 13 store float %cvt, float addrspace(1)* %out, align 4 24 %cvt = uitofp <2 x i8> %load to <2 x float> 25 store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16 38 %cvt = uitofp <3 x i8> %load to <3 x float> 39 store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16 54 %cvt = uitofp <4 x i8> %load to <4 x float> 55 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 78 %cvt = uitofp <4 x i8> %load to <4 x float> 79 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 [all …]
|
D | trunc-store-f64-to-f16.ll | 8 %cvt = fptrunc double %val to half 9 store half %cvt, half addrspace(1)* %out 17 %cvt = fptrunc <2 x double> %val to <2 x half> 18 store <2 x half> %cvt, <2 x half> addrspace(1)* %out 26 %cvt = fptrunc <3 x double> %val to <3 x half> 27 store <3 x half> %cvt, <3 x half> addrspace(1)* %out 35 %cvt = fptrunc <4 x double> %val to <4 x half> 36 store <4 x half> %cvt, <4 x half> addrspace(1)* %out 44 %cvt = fptrunc <8 x double> %val to <8 x half> 45 store <8 x half> %cvt, <8 x half> addrspace(1)* %out [all …]
|
D | cvt_flr_i32_f32.ll | 15 %cvt = fptosi float %floor to i32 16 store i32 %cvt, i32 addrspace(1)* %out 28 %cvt = fptosi float %floor to i32 29 store i32 %cvt, i32 addrspace(1)* %out 41 %cvt = fptosi float %floor to i32 42 store i32 %cvt, i32 addrspace(1)* %out 54 %cvt = fptosi float %floor to i32 55 store i32 %cvt, i32 addrspace(1)* %out 68 %cvt = fptosi float %floor to i32 69 store i32 %cvt, i32 addrspace(1)* %out [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | trunc-store-f64-to-f16.ll | 7 %cvt = fptrunc double %val to half 8 store half %cvt, half addrspace(1)* %out 16 %cvt = fptrunc <2 x double> %val to <2 x half> 17 store <2 x half> %cvt, <2 x half> addrspace(1)* %out 25 %cvt = fptrunc <3 x double> %val to <3 x half> 26 store <3 x half> %cvt, <3 x half> addrspace(1)* %out 34 %cvt = fptrunc <4 x double> %val to <4 x half> 35 store <4 x half> %cvt, <4 x half> addrspace(1)* %out 43 %cvt = fptrunc <8 x double> %val to <8 x half> 44 store <8 x half> %cvt, <8 x half> addrspace(1)* %out [all …]
|
D | llvm.amdgcn.cvt.pkrtz.ll | 11 %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y) 20 %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %x) 29 %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float undef) 47 %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b) 48 store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep 61 %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float 1.0) 62 store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep 76 %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 1.0, float %a) 77 store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep 94 %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %neg.a, float %b) [all …]
|
D | cvt_f32_ubyte.ll | 17 %cvt = uitofp i8 %load to float 18 store float %cvt, float addrspace(1)* %out, align 4 31 %cvt = uitofp <2 x i8> %load to <2 x float> 32 store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16 47 %cvt = uitofp <3 x i8> %load to <3 x float> 48 store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16 65 %cvt = uitofp <4 x i8> %load to <4 x float> 66 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 91 %cvt = uitofp <4 x i8> %load to <4 x float> 92 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 [all …]
|
D | cvt_flr_i32_f32.ll | 15 %cvt = fptosi float %floor to i32 16 store i32 %cvt, i32 addrspace(1)* %out 28 %cvt = fptosi float %floor to i32 29 store i32 %cvt, i32 addrspace(1)* %out 41 %cvt = fptosi float %floor to i32 42 store i32 %cvt, i32 addrspace(1)* %out 54 %cvt = fptosi float %floor to i32 55 store i32 %cvt, i32 addrspace(1)* %out 68 %cvt = fptosi float %floor to i32 69 store i32 %cvt, i32 addrspace(1)* %out [all …]
|
D | llvm.amdgcn.cvt.pknorm.u16.ll | 12 %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %x, float %y) 22 %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %x, float %x) 41 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %a, float %b) 42 %r = bitcast <2 x i16> %cvt to i32 56 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %a, float 1.0) 57 %r = bitcast <2 x i16> %cvt to i32 72 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float 1.0, float %a) 73 %r = bitcast <2 x i16> %cvt to i32 91 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %neg.a, float %b) 92 %r = bitcast <2 x i16> %cvt to i32 [all …]
|
D | llvm.amdgcn.cvt.pknorm.i16.ll | 12 %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %x, float %y) 22 %result = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %x, float %x) 41 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %a, float %b) 42 %r = bitcast <2 x i16> %cvt to i32 56 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %a, float 1.0) 57 %r = bitcast <2 x i16> %cvt to i32 72 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float 1.0, float %a) 73 %r = bitcast <2 x i16> %cvt to i32 91 %cvt = call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %neg.a, float %b) 92 %r = bitcast <2 x i16> %cvt to i32 [all …]
|
D | mad-mix-lo.ll | 14 %cvt.result = fptrunc float %result to half 15 ret half %cvt.result 27 %cvt.result = fptrunc float %result to half 28 ret half %cvt.result 41 %cvt.result = fptrunc float %result to half 42 ret half %cvt.result 55 %cvt.result = fptrunc float %result to half 56 %max = call half @llvm.maxnum.f16(half %cvt.result, half 0.0) 74 %cvt.result = fptrunc float %clamp to half 75 ret half %cvt.result [all …]
|
D | v_cvt_pk_u8_f32.ll | 4 declare i32 @llvm.amdgcn.cvt.pk.u8.f32(float, i32, i32) #0 9 %result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 0, i32 %reg) #0 17 %result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 1, i32 %reg) #0 25 %result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 2, i32 %reg) #0 33 %result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 3, i32 %reg) #0 44 %result0 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 0, i32 %reg) #0 45 %result1 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 1, i32 %result0) #0 46 %result2 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 2, i32 %result1) #0 47 %result3 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 3, i32 %result2) #0 55 %result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 %idx, i32 %reg) #0
|
D | cvt_rpi_i32_f32.ll | 15 %cvt = fptosi float %floor to i32 16 store i32 %cvt, i32 addrspace(1)* %out 28 %cvt = fptosi float %floor to i32 29 store i32 %cvt, i32 addrspace(1)* %out 44 %cvt = fptosi float %floor to i32 45 store i32 %cvt, i32 addrspace(1)* %out 63 %cvt = fptosi float %floor to i32 64 store i32 %cvt, i32 addrspace(1)* %out 77 %cvt = fptoui float %floor to i32 78 store i32 %cvt, i32 addrspace(1)* %out
|
/external/mesa3d/src/gallium/drivers/nouveau/codegen/lib/ |
D | gk104.asm | 19 long cvt u32 $r1 neg u32 $r1 34 long cvt u32 $r2 neg u32 $r1 55 long cvt s32 $r0 abs s32 $r0 56 long cvt s32 $r1 abs s32 $r1 61 cvt u32 $r1 neg u32 $r1 76 long cvt u32 $r2 neg u32 $r1 85 long $p3 cvt s32 $r0 neg s32 $r0 87 $p2 cvt s32 $r1 neg s32 $r1 108 cvt rn f32 $r3 u16 1 $r1 label 109 cvt rn f32 $r2 u16 0 $r1 label [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/Mips/micromips/ |
D | valid-fp64.s | 10 cvt.d.s $f0, $f2 # CHECK: cvt.d.s $f0, $f2 # encoding: [0x54,0x02,0x13,0x7b] 12 cvt.d.w $f0, $f2 # CHECK: cvt.d.w $f0, $f2 # encoding: [0x54,0x02,0x33,0x7b] 14 cvt.s.d $f0, $f2 # CHECK: cvt.s.d $f0, $f2 # encoding: [0x54,0x02,0x1b,0x7b] 16 cvt.w.d $f0, $f2 # CHECK: cvt.w.d $f0, $f2 # encoding: [0x54,0x02,0x49,0x3b] 18 cvt.l.s $f4, $f2 # CHECK: cvt.l.s $f4, $f2 # encoding: [0x54,0x82,0x01,0x3b] 20 cvt.l.d $f4, $f2 # CHECK: cvt.l.d $f4, $f2 # encoding: [0x54,0x82,0x41,0x3b]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/PTX/ |
D | cvt.ll | 71 ; CHECK: cvt.u16.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}; 78 ; CHECK: cvt.u16.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}; 85 ; CHECK: cvt.rzi.u16.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}; 92 ; CHECK: cvt.rzi.u16.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}; 108 ; CHECK: cvt.u32.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}; 115 ; CHECK: cvt.u32.u64 %ret{{[0-9]+}}, %rd{{[0-9]+}}; 122 ; CHECK: cvt.rzi.u32.f32 %ret{{[0-9]+}}, %f{{[0-9]+}}; 129 ; CHECK: cvt.rzi.u32.f64 %ret{{[0-9]+}}, %fd{{[0-9]+}}; 145 ; CHECK: cvt.u64.u16 %ret{{[0-9]+}}, %rh{{[0-9]+}}; 152 ; CHECK: cvt.u64.u32 %ret{{[0-9]+}}, %r{{[0-9]+}}; [all …]
|