/external/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 7 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 8 ; CHECK: call void @clobber_and_use(i32 %val0) 9 ; CHECK: call void @clobber_and_use(i32 %val0) 10 ; CHECK: call void @clobber_and_use(i32 %val0) 13 %val0 = load i32, i32* %ptr, !invariant.load !{} 14 call void @clobber_and_use(i32 %val0) 28 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 29 ; CHECK: call void @clobber_and_use(i32 %val0) 30 ; CHECK: call void @clobber_and_use(i32 %val0) 32 %val0 = load i32, i32* %ptr, !invariant.load !{} [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | mulwide.ll | 9 %val0 = sext i16 %a to i32 11 %val2 = mul i32 %val0, %val1 20 %val0 = zext i16 %a to i32 22 %val2 = mul i32 %val0, %val1 31 %val0 = sext i8 %a to i32 33 %val2 = mul i32 %val0, %val1 42 %val0 = zext i8 %a to i32 44 %val2 = mul i32 %val0, %val1 53 %val0 = sext i32 %a to i64 55 %val2 = mul i64 %val0, %val1 [all …]
|
D | bfe.ll | 9 %val0 = ashr i32 %a, 4 10 %val1 = and i32 %val0, 15 19 %val0 = ashr i32 %a, 3 20 %val1 = and i32 %val0, 7 29 %val0 = ashr i32 %a, 5 30 %val1 = and i32 %val0, 7
|
/external/llvm-project/llvm/test/CodeGen/NVPTX/ |
D | mulwide.ll | 9 %val0 = sext i16 %a to i32 11 %val2 = mul i32 %val0, %val1 20 %val0 = zext i16 %a to i32 22 %val2 = mul i32 %val0, %val1 31 %val0 = sext i8 %a to i32 33 %val2 = mul i32 %val0, %val1 42 %val0 = zext i8 %a to i32 44 %val2 = mul i32 %val0, %val1 53 %val0 = sext i32 %a to i64 55 %val2 = mul i64 %val0, %val1 [all …]
|
D | bfe.ll | 9 %val0 = ashr i32 %a, 4 10 %val1 = and i32 %val0, 15 19 %val0 = ashr i32 %a, 3 20 %val1 = and i32 %val0, 7 29 %val0 = ashr i32 %a, 5 30 %val1 = and i32 %val0, 7
|
/external/libaom/libaom/aom_dsp/mips/ |
D | intrapred_msa.c | 158 uint32_t val0, val1; in intra_predict_dc_4x4_msa() local 164 val0 = LW(src_top); in intra_predict_dc_4x4_msa() 166 INSERT_W2_SB(val0, val1, src); in intra_predict_dc_4x4_msa() 172 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_4x4_msa() 174 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_4x4_msa() 179 uint32_t val0; in intra_predict_dc_tl_4x4_msa() local 184 val0 = LW(src); in intra_predict_dc_tl_4x4_msa() 185 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); in intra_predict_dc_tl_4x4_msa() 190 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_tl_4x4_msa() 192 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_tl_4x4_msa() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/ |
D | readonly_references_to_resources.mlir | 8 …%val0 = "tf.VariableV2"() {_class = ["loc:@v"], container = "", device = "", shape = #tf.shape<96>… 9 %val1 = "tf.Identity"(%val0) : (tensor<96x!tf.f32ref>) -> tensor<96xf32> 20 …%val0 = "tf.VariableV2"() {container = "", device = "", shape = #tf.shape<96>, shared_name = ""} :… 21 %val1 = "tf.Identity"(%val0) {_class = ["loc:@v"]} : (tensor<96x!tf.f32ref>) -> tensor<96xf32> 38 …%val0 = "tf.VariableV2"() {_class = ["loc:@v"], container = "", device = "", shape = #tf.shape<96>… 39 %val1 = "tf.Identity"(%val0) : (tensor<96x!tf.f32ref>) -> tensor<96xf32> 40 %val2 = "tf.Identity"(%val0) : (tensor<96x!tf.f32ref>) -> tensor<96xf32> 51 …%val0 = "tf.VariableV2"() {_class = ["loc:@v"], container = "", device = "", shape = #tf.shape<96>… 61 …%val0 = "tf.VariableV2"() {_class = ["loc:@v"], container = "", device = "", shape = #tf.shape<96>… 62 %val1 = "tf.CustomIdentity"(%val0) : (tensor<96x!tf.f32ref>) -> tensor<96xf32> [all …]
|
/external/igt-gpu-tools/tests/i915/ |
D | gem_caching.c | 144 uint8_t val0 = i; variable 155 igt_assert_f(cpu_ptr[j] == val0, 157 j, cpu_ptr[j], val0); 171 uint8_t val0 = i, val1; variable 174 blt_bo_fill(staging_bo, scratch_bo, val0); 179 val1 = val0 + 63; 190 igt_assert_f(gtt_ptr[j] == val0, 192 j, start, len, gtt_ptr[j], val0); 200 igt_assert_f(gtt_ptr[j] == val0, 202 j, start, len, gtt_ptr[j], val0); [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | amdgcn-ieee.ll | 11 %val0 = load volatile float, float addrspace(1)* undef 13 %min = call float @llvm.minnum.f32(float %val0, float %val1) 26 %val0 = load volatile float, float addrspace(1)* undef 28 %min = call float @llvm.minnum.f32(float %val0, float %val1) 41 %val0 = load volatile float, float addrspace(1)* undef 43 %min = call float @llvm.minnum.f32(float %val0, float %val1) 56 %val0 = load volatile float, float addrspace(1)* undef 58 %min = call float @llvm.minnum.f32(float %val0, float %val1) 71 %val0 = load volatile float, float addrspace(1)* undef 73 %min = call float @llvm.minnum.f32(float %val0, float %val1) [all …]
|
D | vector_shuffle.packed.ll | 11 %val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0 13 …%shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> <i32 2, i32 3, i32 undef, i… 27 %val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0 29 …%shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> <i32 2, i32 3, i32 4, i32 u… 40 %val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0 42 …%shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> <i32 undef, i32 1, i32 unde… 54 %val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0 56 …%shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> <i32 undef, i32 3, i32 unde… 67 %val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0 69 …%shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> <i32 undef, i32 3, i32 unde… [all …]
|
D | ds_read2_superreg.ll | 18 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0, align 4 20 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 32 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0 34 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 49 %val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 4 50 %elt0 = extractelement <4 x float> %val0, i32 0 51 %elt1 = extractelement <4 x float> %val0, i32 1 52 %elt2 = extractelement <4 x float> %val0, i32 2 53 %elt3 = extractelement <4 x float> %val0, i32 3 74 %val0 = load <3 x float>, <3 x float> addrspace(3)* %arrayidx0, align 4 [all …]
|
D | hip.extern.shared.array.ll | 18 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 20 store float %val0, float addrspace(3)* %arrayidx1, align 4 36 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 45 %val = phi float [ %val0, %if ], [ %val1, %else ] 58 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 60 store float %val0, float addrspace(3)* %arrayidx1, align 4 73 %val0 = load i8, i8 addrspace(3)* %arrayidx0, align 4 74 %val1 = uitofp i8 %val0 to float 90 %val0 = load i8, i8 addrspace(3)* %arrayidx0, align 4 91 %val1 = uitofp i8 %val0 to float [all …]
|
D | cgp-bitfield-extract.ll | 15 ; OPT-NEXT: %val0 = and i32 %0, 255 44 %val0 = and i32 %shr, 255 54 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 65 ; OPT-NEXT: %val0 = and i32 %0, 255 84 %val0 = and i32 %shr, 255 94 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 106 ; OPT-NEXT: %val0 = and i16 %0, 255 141 %val0 = and i16 %shr, 255 151 %phi = phi i16 [ %val0, %bb0 ], [ %val1, %bb1 ] 166 ; OPT-NEXT: %val0 = and i64 %0, 255 [all …]
|
D | annotate-kernel-features.ll | 31 %val0 = call i32 @llvm.r600.read.tgid.y() 32 store volatile i32 %val0, i32 addrspace(1)* %ptr 40 %val0 = call i32 @llvm.r600.read.tgid.x() 42 store volatile i32 %val0, i32 addrspace(1)* %ptr 56 %val0 = call i32 @llvm.r600.read.tgid.x() 58 store volatile i32 %val0, i32 addrspace(1)* %ptr 65 %val0 = call i32 @llvm.r600.read.tgid.y() 67 store volatile i32 %val0, i32 addrspace(1)* %ptr 74 %val0 = call i32 @llvm.r600.read.tgid.x() 77 store volatile i32 %val0, i32 addrspace(1)* %ptr [all …]
|
/external/deqp/data/gles31/shaders/es31/ |
D | linkage_tessellation_uniform_types.test | 9 uniform float val0 = -1.25; 27 tc_out[gl_InvocationID] = val0; 61 uniform vec2 val0 = [ vec2(-1.25, 1.25) ]; 79 tc_out[gl_InvocationID] = val0; 113 uniform vec3 val0 = [ vec3(-1.25, 1.25, -9.5) ]; 131 tc_out[gl_InvocationID] = val0; 165 uniform vec4 val0 = [ vec4(-1.25, 1.25, -9.5, -12.2) ]; 183 tc_out[gl_InvocationID] = val0; 217 uniform mat2 val0 = [ mat2(-1.25, 1.25, -9.5, -12.2) ]; 235 tc_out[gl_InvocationID] = val0; [all …]
|
/external/deqp/data/gles31/shaders/es32/ |
D | linkage_tessellation_uniform_types.test | 26 uniform float val0 = -1.25; 44 tc_out[gl_InvocationID] = val0; 77 uniform vec2 val0 = [ vec2(-1.25, 1.25) ]; 95 tc_out[gl_InvocationID] = val0; 128 uniform vec3 val0 = [ vec3(-1.25, 1.25, -9.5) ]; 146 tc_out[gl_InvocationID] = val0; 179 uniform vec4 val0 = [ vec4(-1.25, 1.25, -9.5, -12.2) ]; 197 tc_out[gl_InvocationID] = val0; 230 uniform mat2 val0 = [ mat2(-1.25, 1.25, -9.5, -12.2) ]; 248 tc_out[gl_InvocationID] = val0; [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | intrapred_msa.c | 156 uint32_t val0, val1; in intra_predict_dc_4x4_msa() local 162 val0 = LW(src_top); in intra_predict_dc_4x4_msa() 164 INSERT_W2_SB(val0, val1, src); in intra_predict_dc_4x4_msa() 170 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_4x4_msa() 172 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_4x4_msa() 177 uint32_t val0; in intra_predict_dc_tl_4x4_msa() local 182 val0 = LW(src); in intra_predict_dc_tl_4x4_msa() 183 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); in intra_predict_dc_tl_4x4_msa() 188 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_tl_4x4_msa() 190 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_tl_4x4_msa() [all …]
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_constant_binary.cc | 133 const auto val0 = input0_data[Offset(input0_shape, input0_indices)]; in EvaluateBinaryOperatorOnConstantInputs() local 138 outval = val0 + val1; in EvaluateBinaryOperatorOnConstantInputs() 140 outval = val0 * val1; in EvaluateBinaryOperatorOnConstantInputs() 142 outval = val0 - val1; in EvaluateBinaryOperatorOnConstantInputs() 144 outval = val0 / val1; in EvaluateBinaryOperatorOnConstantInputs() 146 outval = std::floor(val0 / val1); in EvaluateBinaryOperatorOnConstantInputs() 148 outval = val0 - (std::floor(val0 / val1) * val1); in EvaluateBinaryOperatorOnConstantInputs() 150 outval = std::min(val0, val1); in EvaluateBinaryOperatorOnConstantInputs() 152 outval = std::max(val0, val1); in EvaluateBinaryOperatorOnConstantInputs() 154 outval = val0 < val1; in EvaluateBinaryOperatorOnConstantInputs() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ds_read2_superreg.ll | 18 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0, align 4 20 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 32 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0 34 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 49 %val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 4 50 %elt0 = extractelement <4 x float> %val0, i32 0 51 %elt1 = extractelement <4 x float> %val0, i32 1 52 %elt2 = extractelement <4 x float> %val0, i32 2 53 %elt3 = extractelement <4 x float> %val0, i32 3 74 %val0 = load <3 x float>, <3 x float> addrspace(3)* %arrayidx0, align 4 [all …]
|
D | cgp-bitfield-extract.ll | 15 ; OPT-NEXT: %val0 = and i32 %0, 255 45 %val0 = and i32 %shr, 255 55 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 66 ; OPT-NEXT: %val0 = and i32 %0, 255 85 %val0 = and i32 %shr, 255 95 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 107 ; OPT-NEXT: %val0 = and i16 %0, 255 137 %val0 = and i16 %shr, 255 147 %phi = phi i16 [ %val0, %bb0 ], [ %val1, %bb1 ] 162 ; OPT-NEXT: %val0 = and i64 %0, 255 [all …]
|
D | annotate-kernel-features.ll | 31 %val0 = call i32 @llvm.r600.read.tgid.y() 32 store volatile i32 %val0, i32 addrspace(1)* %ptr 40 %val0 = call i32 @llvm.r600.read.tgid.x() 42 store volatile i32 %val0, i32 addrspace(1)* %ptr 56 %val0 = call i32 @llvm.r600.read.tgid.x() 58 store volatile i32 %val0, i32 addrspace(1)* %ptr 65 %val0 = call i32 @llvm.r600.read.tgid.y() 67 store volatile i32 %val0, i32 addrspace(1)* %ptr 74 %val0 = call i32 @llvm.r600.read.tgid.x() 77 store volatile i32 %val0, i32 addrspace(1)* %ptr [all …]
|
D | ds_read2st64.ll | 16 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 20 %sum = fadd float %val0, %val1 36 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 40 %sum = fadd float %val0, %val1 56 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 60 %sum = fadd float %val0, %val1 76 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 80 %sum = fadd float %val0, %val1 92 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 96 %sum = fadd float %val0, %val1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | hip.extern.shared.array.ll | 18 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 20 store float %val0, float addrspace(3)* %arrayidx1, align 4 38 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 47 %val = phi float [ %val0, %if ], [ %val1, %else ] 60 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 62 store float %val0, float addrspace(3)* %arrayidx1, align 4 75 %val0 = load i8, i8 addrspace(3)* %arrayidx0, align 4 76 %val1 = uitofp i8 %val0 to float 92 %val0 = load i8, i8 addrspace(3)* %arrayidx0, align 4 93 %val1 = uitofp i8 %val0 to float [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | logical-imm.ll | 9 %val0 = and i32 %in32, 2863311530 10 store volatile i32 %val0, i32* @var32 31 %val0 = or i32 %in32, 2863311530 32 store volatile i32 %val0, i32* @var32 53 %val0 = xor i32 %in32, 2863311530 54 store volatile i32 %val0, i32* @var32 74 %val0 = add i32 %in32, 2863311530 75 store i32 %val0, i32* @var32
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | logical-imm.ll | 9 %val0 = and i32 %in32, 2863311530 10 store volatile i32 %val0, i32* @var32 31 %val0 = or i32 %in32, 2863311530 32 store volatile i32 %val0, i32* @var32 53 %val0 = xor i32 %in32, 2863311530 54 store volatile i32 %val0, i32* @var32 74 %val0 = add i32 %in32, 2863311530 75 store i32 %val0, i32* @var32
|