/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 5 …@llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru) 8 define <2 x double> @load_zeromask(<2 x double>* %ptr, <2 x double> %passthru) { 9 …ked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru) 13 ; CHECK-NEXT: ret <2 x double> %passthru 16 define <2 x double> @load_onemask(<2 x double>* %ptr, <2 x double> %passthru) { 17 …masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru) 25 define <2 x double> @load_undefmask(<2 x double>* %ptr, <2 x double> %passthru) { 26 …ed.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru) 51 define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) { 52 ….gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitializer, <2 x double> %passthru) [all …]
|
D | debuginfo.ll | 16 define i8* @passthru(i8* %a, i32 %b, i64 %c) !dbg !1 { 36 ; CHECK-LABEL: define i8* @passthru(i8* %a, i32 %b, i64 %c) 87 !1 = distinct !DISubprogram(name: "passthru", line: 79, isLocal: true, isDefinition: true, virtualI…
|
D | 2011-09-03-Trampoline.ll | 96 !4 = distinct !DISubprogram(name: "passthru", scope: !1, file: !1, line: 79, type: !5, isLocal: tru…
|
/external/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 5 …double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x double> %passthru) 8 define <2 x double> @load_zeromask(<2 x double>* %ptr, <2 x double> %passthru) { 9 …ked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroinitializer, <2 x double> %passthru) 13 ; CHECK-NEXT: ret <2 x double> %passthru 16 define <2 x double> @load_onemask(<2 x double>* %ptr, <2 x double> %passthru) { 17 …masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 1>, <2 x double> %passthru) 25 define <2 x double> @load_undefmask(<2 x double>* %ptr, <2 x double> %passthru) { 26 …ed.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1, i1 undef>, <2 x double> %passthru) 51 define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) { 52 …m.masked.gather.v2f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitializer, <2 x double> %passthru) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Verifier/ |
D | scatter_gather.ll | 5 define <16 x float> @gather2(<16 x float*> %ptrs, <16 x i1>* %mask, <16 x float> %passthru) { 6 …masked.gather.v16f32.v16p0f32(<16 x float*> %ptrs, i32 4, <16 x i1>* %mask, <16 x float> %passthru) 13 define <8 x float> @gather3(<8 x float*> %ptrs, <16 x i1> %mask, <8 x float> %passthru) { 14 …llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <16 x i1> %mask, <8 x float> %passthru) 21 define <8 x float>* @gather4(<8 x float*> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 22 …lvm.masked.gather.p0v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %passthru) 29 define <8 x float> @gather5(<8 x float*>* %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 30 …vm.masked.gather.v8f32.p0v8p0f32(<8 x float*>* %ptrs, i32 4, <8 x i1> %mask, <8 x float> %passthru) 37 define <8 x float> @gather6(<8 x float> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 38 …t> @llvm.masked.gather.v8f32.v8f32(<8 x float> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %passthru) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-shuffle-masked.ll | 4 define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask… 14 %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru 31 define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask… 41 %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru 58 define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passthru, i8 %mask) { 68 %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru 85 define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passthru, i8 %mask… 95 %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru 112 define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8 %mask) { 122 %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru [all …]
|
D | avx512-rndscale.ll | 149 define <2 x double> @floor_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { 158 %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru 162 define <4 x float> @floor_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { 171 %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru 175 define <4 x double> @floor_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { 184 %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru 188 define <8 x float> @floor_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { 197 %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru 201 define <8 x double> @floor_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { 210 %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru [all …]
|
D | avx512-gfni-intrinsics.ll | 6 define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru,… 29 %4 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> %passthru 36 define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru,… 59 %4 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> %passthru 66 define <64 x i8> @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru,… 89 %4 = select <64 x i1> %1, <64 x i8> %2, <64 x i8> %passthru 96 define <16 x i8> @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i1… 119 %4 = select <16 x i1> %1, <16 x i8> %2, <16 x i8> %passthru 126 define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i3… 149 %4 = select <32 x i1> %1, <32 x i8> %2, <32 x i8> %passthru [all …]
|
D | stack-folding-int-avx512.ll | 19 define <16 x i32> @stack_fold_valignd_mask(<16 x i32> %a, <16 x i32> %b, <16 x i32>* %passthru, i16… 25 %4 = load <16 x i32>, <16 x i32>* %passthru 48 define <8 x i64> @stack_fold_valignq_mask(<8 x i64> %a, <8 x i64> %b, <8 x i64>* %passthru, i8 %mas… 54 %4 = load <8 x i64>, <8 x i64>* %passthru 82 define <64 x i8> @stack_fold_pavgb_mask(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %ma… 86 %2 = load <64 x i8>, <64 x i8>* %passthru 126 define <32 x i16> @stack_fold_pavgw_mask(<32 x i16>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i32… 130 %2 = load <32 x i16>, <32 x i16>* %passthru 228 define <64 x i8> @stack_fold_pabsb_mask(<64 x i8> %passthru, <64 x i8> %a0, i64 %mask) { 236 %6 = select <64 x i1> %5, <64 x i8> %4, <64 x i8> %passthru [all …]
|
D | vector-shuffle-512-v16.ll | 471 …2_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, <16 x i32> %passthru, i16 %mask) { 487 %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru 491 …6_07_08_09_10_11_12_13_14_15_16_17(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) { 507 %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru 609 …6_07_16_17_18_19_20_21_22_23(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) { 625 %res = select <16 x i1> %mask.cast, <16 x float> %shuffle, <16 x float> %passthru 629 …8_19_08_09_10_11_12_13_14_15(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) { 645 %res = select <16 x i1> %mask.cast, <16 x float> %shuffle, <16 x float> %passthru 649 …4_05_06_07_16_17_18_19_20_21_22_23(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) { 665 %res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru [all …]
|
D | avx512-cvt.ll | 1991 define <2 x i64> @test_2f64toub(<2 x double> %a, <2 x i64> %passthru) { 2032 %select = select <2 x i1> %mask, <2 x i64> %passthru, <2 x i64> zeroinitializer 2036 define <4 x i64> @test_4f64toub(<4 x double> %a, <4 x i64> %passthru) { 2073 %select = select <4 x i1> %mask, <4 x i64> %passthru, <4 x i64> zeroinitializer 2077 define <8 x i64> @test_8f64toub(<8 x double> %a, <8 x i64> %passthru) { 2110 %select = select <8 x i1> %mask, <8 x i64> %passthru, <8 x i64> zeroinitializer 2114 define <2 x i64> @test_2f32toub(<2 x float> %a, <2 x i64> %passthru) { 2153 %select = select <2 x i1> %mask, <2 x i64> %passthru, <2 x i64> zeroinitializer 2157 define <4 x i64> @test_4f32toub(<4 x float> %a, <4 x i64> %passthru) { 2194 %select = select <4 x i1> %mask, <4 x i64> %passthru, <4 x i64> zeroinitializer [all …]
|
D | stack-folding-fp-avx512.ll | 421 …> @stack_fold_shuff64x2_mask(<8 x double> %a, <8 x double> %b, i8 %mask, <8 x double>* %passthru) { 428 %4 = load <8 x double>, <8 x double>* %passthru 433 … @stack_fold_shuff64x2_maskz(<8 x double> %a, <8 x double> %b, i8 %mask, <8 x double>* %passthru) { 443 … @stack_fold_shuff32x4_mask(<16 x float> %a, <16 x float> %b, i16 %mask, <16 x float>* %passthru) { 450 %4 = load <16 x float>, <16 x float>* %passthru 603 define <8 x double> @stack_fold_insertf64x4_mask(<8 x double> %passthru, <4 x double> %a0, <4 x dou… 609 %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %passthru 702 define <8 x double> @stack_fold_permpd_mask(<8 x double>* %passthru, <8 x double> %a0, i8 %mask) { 709 %4 = load <8 x double>, <8 x double>* %passthru 754 define <8 x double> @stack_fold_permilpd_zmm_mask(<8 x double>* %passthru, <8 x double> %a0, i8 %ma… [all …]
|
D | required-vector-width.ll | 585 define <16 x i16> @test_16f32toub_256(<16 x float>* %ptr, <16 x i16> %passthru) "required-vector-wi… 599 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer 603 define <16 x i16> @test_16f32toub_512(<16 x float>* %ptr, <16 x i16> %passthru) "required-vector-wi… 613 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer 617 define <16 x i16> @test_16f32tosb_256(<16 x float>* %ptr, <16 x i16> %passthru) "required-vector-wi… 631 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer 635 define <16 x i16> @test_16f32tosb_512(<16 x float>* %ptr, <16 x i16> %passthru) "required-vector-wi… 644 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
|
D | stack-folding-int-avx512vl.ll | 19 define <8 x i32> @stack_fold_valignd_ymm_mask(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %passthru, i8 … 25 %4 = load <8 x i32>, <8 x i32>* %passthru 516 define <32 x i8> @stack_fold_palignr_mask(<32 x i8> %a0, <32 x i8> %a1, <32 x i8>* %passthru, i32 %… 522 %4 = load <32 x i8>, <32 x i8>* %passthru 810 define <8 x i16> @stack_fold_pmaddubsw_mask(<8 x i16>* %passthru, <16 x i8> %a0, <16 x i8> %a1, i8 … 817 %4 = load <8 x i16>, <8 x i16>* %passthru 841 define <16 x i16> @stack_fold_pmaddubsw_ymm_mask(<16 x i16>* %passthru, <32 x i8> %a0, <32 x i8> %a… 848 %4 = load <16 x i16>, <16 x i16>* %passthru 872 define <4 x i32> @stack_fold_pmaddwd_mask(<4 x i32>* %passthru, <8 x i16> %a0, <8 x i16> %a1, i8 %m… 880 %5 = load <4 x i32>, <4 x i32>* %passthru [all …]
|
D | avx512-intrinsics.ll | 5 define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask… 12 …double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, <8 x double> %passthru, i8 %mask) 36 define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mas… 43 …float> @llvm.x86.avx512.mask.compress.ps.512(<16 x float> %data, <16 x float> %passthru, i16 %mask) 67 define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) { 74 …%res = call <8 x i64> @llvm.x86.avx512.mask.compress.q.512(<8 x i64> %data, <8 x i64> %passthru, i… 98 define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) { 105 … <16 x i32> @llvm.x86.avx512.mask.compress.d.512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) 137 define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) { 144 …x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8 x double> %passthru, i8 %mask) [all …]
|
D | avx512vbmi2vl-intrinsics.ll | 13 define <8 x i16> @test_mask_expand_w_128(<8 x i16> %data, <8 x i16> %passthru, i8 %mask) { 28 …%res = call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %data, <8 x i16> %passthru, i8 … 59 define <16 x i8> @test_mask_expand_b_128(<16 x i8> %data, <16 x i8> %passthru, i16 %mask) { 73 …%res = call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %data, <16 x i8> %passthru, i16… 95 define <8 x i16> @test_mask_compress_w_128(<8 x i16> %data, <8 x i16> %passthru, i8 %mask) { 110 …%res = call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %data, <8 x i16> %passthru, i… 141 define <16 x i8> @test_mask_compress_b_128(<16 x i8> %data, <16 x i8> %passthru, i16 %mask) { 155 …%res = call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %data, <16 x i8> %passthru, i… 193 define <16 x i16> @test_mask_expand_w_256(<16 x i16> %data, <16 x i16> %passthru, i16 %mask) { 207 …%res = call <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16> %data, <16 x i16> %passthru, … [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/X86/ |
D | x86-avx512.ll | 1890 define <8 x i32> @identity_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { 1898 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %passthru 1911 define <8 x i32> @zero_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { 1920 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %passthru 1933 define <8 x i32> @shuffle_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { 1942 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %passthru 1955 define <8 x i32> @undef_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { 1964 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %passthru 1978 define <8 x float> @identity_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %m… 1986 %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> %passthru [all …]
|
/external/mesa3d/src/gallium/auxiliary/gallivm/ |
D | lp_bld_gather.c | 334 LLVMValueRef passthru = LLVMGetUndef(src_vec_type); in lp_build_gather_avx2() local 336 LLVMValueRef args[] = { src_ptr, alignment, mask, passthru }; in lp_build_gather_avx2() 370 LLVMValueRef passthru = LLVMGetUndef(src_vec_type); in lp_build_gather_avx2() local 375 LLVMValueRef args[] = { passthru, base_ptr, offsets, mask, scale }; in lp_build_gather_avx2()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/ |
D | auto_upgrade_intrinsics.ll | 72 define <2 x double> @tests.masked.load(<2 x double>* %ptr, <2 x i1> %mask, <2 x double> %passthru) … 75 … double> @llvm.masked.load.v2f64(<2 x double>* %ptr, i32 1, <2 x i1> %mask, <2 x double> %passthru) 90 define <2 x double> @tests.masked.gather(<2 x double*> %ptr, <2 x i1> %mask, <2 x double> %passthru… 93 …ouble> @llvm.masked.gather.v2f64(<2 x double*> %ptr, i32 1, <2 x i1> %mask, <2 x double> %passthru)
|
/external/deqp/external/vulkancts/modules/vulkan/spirv_assembly/ |
D | vktSpvAsmGraphicsShaderTestUtil.cpp | 1192 map<string, string> passthru = passthruFragments(); in addShaderCodeCustomVertex() local 1195 …dst.spirvAsmSources.add("frag", spirVAsmBuildOptions) << makeFragmentShaderAssembly(passthru) << S… in addShaderCodeCustomVertex() 1229 map<string, string> passthru = passthruFragments(); in addShaderCodeCustomTessControl() local 1231 …dst.spirvAsmSources.add("vert", spirVAsmBuildOptions) << makeVertexShaderAssembly(passthru) << Sp… in addShaderCodeCustomTessControl() 1233 …dst.spirvAsmSources.add("tesse", spirVAsmBuildOptions) << makeTessEvalShaderAssembly(passthru) << … in addShaderCodeCustomTessControl() 1234 …dst.spirvAsmSources.add("frag", spirVAsmBuildOptions) << makeFragmentShaderAssembly(passthru) << … in addShaderCodeCustomTessControl() 1268 map<string, string> passthru = passthruFragments(); in addShaderCodeCustomTessEval() local 1269 …dst.spirvAsmSources.add("vert", spirVAsmBuildOptions) << makeVertexShaderAssembly(passthru) << Sp… in addShaderCodeCustomTessEval() 1270 …dst.spirvAsmSources.add("tessc", spirVAsmBuildOptions) << makeTessControlShaderAssembly(passthru) … in addShaderCodeCustomTessEval() 1272 …dst.spirvAsmSources.add("frag", spirVAsmBuildOptions) << makeFragmentShaderAssembly(passthru) << … in addShaderCodeCustomTessEval() [all …]
|
/external/kernel-headers/original/uapi/sound/ |
D | hdsp.h | 66 unsigned char passthru; member
|
D | hdspm.h | 73 unsigned int passthru; member
|
/external/llvm/test/Assembler/ |
D | auto_upgrade_intrinsics.ll | 63 define <2 x double> @tests.masked.load(<2 x double>* %ptr, <2 x i1> %mask, <2 x double> %passthru) … 66 … double> @llvm.masked.load.v2f64(<2 x double>* %ptr, i32 1, <2 x i1> %mask, <2 x double> %passthru)
|
/external/u-boot/include/usb/ |
D | ulpi.h | 71 int passthru, int complement);
|
/external/libnl/lib/route/link/ |
D | macvlan.c | 320 __ADD(MACVLAN_MODE_PASSTHRU, passthru)
|