/external/llvm/test/CodeGen/X86/ |
D | 2009-11-13-VirtRegRewriterBug.ll | 9 %mask133.masked.masked.masked.masked.masked.masked = or i640 undef, undef ; <i640> [#uses=1] 31 %mask271.masked.masked.masked.masked.masked.masked.masked = or i256 0, undef ; <i256> [#uses=2] 32 …%mask266.masked.masked.masked.masked.masked.masked = or i256 %mask271.masked.masked.masked.masked.… 33 %mask241.masked = or i256 undef, undef ; <i256> [#uses=1] 53 …%tmp211 = lshr i256 %mask271.masked.masked.masked.masked.masked.masked.masked, 112 ; <i256> [#uses… 55 %tmp208 = lshr i256 %mask266.masked.masked.masked.masked.masked.masked, 128 ; <i256> [#uses=1] 60 %tmp193 = lshr i256 %mask241.masked, 208 ; <i256> [#uses=1] 97 %tmp101 = lshr i640 %mask133.masked.masked.masked.masked.masked.masked, 256 ; <i640> [#uses=1]
|
D | avx512bw-mov.ll | 110 …%res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> u… 113 declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) 125 …%res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> z… 128 declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>) 140 …%res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> un… 143 declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) 155 …%res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i1… 158 declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>) 170 call void @llvm.masked.store.v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask) 173 declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) [all …]
|
D | pr28515.ll | 10 …%wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* bitcast (i32* getele… 14 declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) #0
|
D | peephole-na-phys-copy-folding.ll | 28 %masked = and i8 %loaded_M, 8 29 %M_is_true = icmp ne i8 %masked, 0 54 %masked = and i8 %loaded_M, 8 55 %M_is_true = icmp ne i8 %masked, 0 80 %masked = and i8 %loaded_M, 8 81 %M_is_true = icmp ne i8 %masked, 0 106 %masked = and i8 %loaded_M, 8 107 %M_is_true = icmp ne i8 %masked, 0
|
D | avx512-bugfix-26264.ll | 21 …%res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1>… 42 …%res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask… 46 declare <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32, <32 x i1> %mask, <32 x… 47 declare <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask,…
|
D | masked_gather_scatter.ll | 48 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 52 declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>) 53 declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*>, i32, <16 x i1>, <16 x float>) 54 declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> , i32, <8 x i1> , <8 x i32> ) 97 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> %i… 130 …%res = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask… 172 …%gt1 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask… 173 …%gt2 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask… 224 …call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %i… 225 …call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %i… [all …]
|
D | narrow-shl-load.ll | 19 %shl15.masked = and i64 %shl15, 4294967294 20 %and17 = or i64 %shl15.masked, %conv11
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2009-11-13-VirtRegRewriterBug.ll | 9 %mask133.masked.masked.masked.masked.masked.masked = or i640 undef, undef ; <i640> [#uses=1] 31 %mask271.masked.masked.masked.masked.masked.masked.masked = or i256 0, undef ; <i256> [#uses=2] 32 …%mask266.masked.masked.masked.masked.masked.masked = or i256 %mask271.masked.masked.masked.masked.… 33 %mask241.masked = or i256 undef, undef ; <i256> [#uses=1] 53 …%tmp211 = lshr i256 %mask271.masked.masked.masked.masked.masked.masked.masked, 112 ; <i256> [#uses… 55 %tmp208 = lshr i256 %mask266.masked.masked.masked.masked.masked.masked, 128 ; <i256> [#uses=1] 60 %tmp193 = lshr i256 %mask241.masked, 208 ; <i256> [#uses=1] 97 %tmp101 = lshr i640 %mask133.masked.masked.masked.masked.masked.masked, 256 ; <i640> [#uses=1]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | masked-intrinsic-cost.ll | 7 ; AVX2: Found an estimated cost of 4 {{.*}}.masked 10 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask… 15 ; AVX2: Found an estimated cost of 4 {{.*}}.masked 18 …%res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x… 23 ; AVX2: Found an estimated cost of 4 {{.*}}.masked 26 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask) 31 ; AVX2: Found an estimated cost of 4 {{.*}}.masked 34 …%res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, … 39 ; AVX2: Found an estimated cost of 5 {{.*}}.masked 42 …call void @llvm.masked.store.v2f32.p0v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%ma… [all …]
|
D | vector_gep.ll | 6 declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) 15 …%res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %arrayidx, i32 4, <4 x i1> <i1 true, i1…
|
/external/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x… 4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>… 5 declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x doubl… 6 declare void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask) 9 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroi… 17 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 26 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 35 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> … 43 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> … 52 …%res = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitia… [all …]
|
D | x86-masked-memops.ll | 56 ; CHECK-NEXT: %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %castvec, i32 1, … 68 ; CHECK-NEXT: %1 = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %castvec, i32 1… 80 ; CHECK-NEXT: %1 = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %castvec, i32 1, … 90 ; CHECK-NEXT: %1 = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %castvec, i32 1… 102 ; CHECK-NEXT: %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %castvec, i32 1, <4 x… 112 ; CHECK-NEXT: %1 = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %castvec, i32 1, <2 x… 122 ; CHECK-NEXT: %1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %castvec, i32 1, <8 x… 132 ; CHECK-NEXT: %1 = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %castvec, i32 1, <4 x… 190 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %v, <4 x float>* %castvec, i3… 202 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %v, <2 x double>* %castvec, … [all …]
|
D | icmp-logical.ll | 131 %masked = and i32 %in, 1 132 %tst2 = icmp eq i32 %masked, 0 145 %masked = and i32 %in, 1 146 %tst1 = icmp eq i32 %masked, 0
|
/external/llvm/test/Assembler/ |
D | auto_upgrade_intrinsics.ll | 61 declare <2 x double> @llvm.masked.load.v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x double>… 63 define <2 x double> @tests.masked.load(<2 x double>* %ptr, <2 x i1> %mask, <2 x double> %passthru) … 64 ; CHECK-LABEL: @tests.masked.load( 65 ; CHECK: @llvm.masked.load.v2f64.p0v2f64 66 …%res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %ptr, i32 1, <2 x i1> %mask, <2 x d… 70 declare void @llvm.masked.store.v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1> %mask) 72 define void @tests.masked.store(<2 x double>* %ptr, <2 x i1> %mask, <2 x double> %val) { 73 ; CHECK-LABEL: @tests.masked.store( 74 ; CHECK: @llvm.masked.store.v2f64.p0v2f64 75 call void @llvm.masked.store.v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> %mask)
|
/external/rappor/client/java/com/google/android/rappor/ |
D | Encoder.java | 519 BitSet masked = new BitSet(); in computePermanentRandomizedResponse() local 520 masked.or(bits); in computePermanentRandomizedResponse() 521 masked.andNot(inputMask); in computePermanentRandomizedResponse() 522 checkArgument(masked.isEmpty(), "Input bits had bits set past Encoder's numBits limit."); in computePermanentRandomizedResponse() 581 BitSet masked = new BitSet(); in computeInstantaneousRandomizedResponse() local 582 masked.or(bits); in computeInstantaneousRandomizedResponse() 583 masked.andNot(inputMask); in computeInstantaneousRandomizedResponse() 584 checkArgument(masked.isEmpty(), "Input bits had bits set past Encoder's numBits limit."); in computeInstantaneousRandomizedResponse()
|
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/ |
D | beam_search_decoder_test.py | 85 masked = beam_search_decoder._mask_probs(probs, eos_token, 90 masked = sess.run(masked) 92 self.assertAllEqual(probs[0][0], masked[0][0]) 93 self.assertAllEqual(probs[0][2], masked[0][2]) 94 self.assertAllEqual(probs[1][0], masked[1][0]) 96 self.assertEqual(masked[0][1][0], 0) 97 self.assertEqual(masked[1][1][0], 0) 98 self.assertEqual(masked[1][2][0], 0) 101 self.assertAllClose(masked[0][1][i], np.finfo('float32').min) 102 self.assertAllClose(masked[1][1][i], np.finfo('float32').min) [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | rlwimi-dyn-and.ll | 17 %shl161.masked = and i32 %shl161, %const_mat 18 %conv174 = or i32 %shl170, %shl161.masked 37 %shl161.masked = and i32 %shl161, 32768 38 %conv174 = or i32 %shl170, %shl161.masked
|
D | rlwinm2.ll | 25 %tmp2.masked = and i32 %tmp2, 96 ; <i32> [#uses=1] 26 %tmp5 = or i32 %tmp1, %tmp2.masked ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
D | masked_load_store.ll | 21 ;AVX: call <8 x i32> @llvm.masked.load.v8i32.p0v8i32 23 ;AVX: call void @llvm.masked.store.v8i32.p0v8i32 28 ;AVX512: call <16 x i32> @llvm.masked.load.v16i32.p0v16i32 30 ;AVX512: call void @llvm.masked.store.v16i32.p0v16i32 96 ;AVX: call <8 x i32> @llvm.masked.load.v8i32.p1v8i32 98 ;AVX: call void @llvm.masked.store.v8i32.p1v8i32 103 ;AVX512: call <16 x i32> @llvm.masked.load.v16i32.p1v16i32 105 ;AVX512: call void @llvm.masked.store.v16i32.p1v16i32 180 ;AVX: call <8 x float> @llvm.masked.load.v8f32.p0v8f32 182 ;AVX: call void @llvm.masked.store.v8f32.p0v8f32 [all …]
|
D | gather_scatter.ll | 3 ;AVX1-NOT: llvm.masked 20 ;AVX512: llvm.masked.load.v16i32 21 ;AVX512: llvm.masked.gather.v16f32 22 ;AVX512: llvm.masked.store.v16f32 99 ;AVX512: llvm.masked.gather.v16f32 100 ;AVX512: llvm.masked.store.v16f32 174 ;AVX512: llvm.masked.gather.v16f32 177 ;AVX512: llvm.masked.scatter.v16f32 236 declare void @llvm.masked.scatter.v16f32(<16 x float>, <16 x float*>, i32, <16 x i1>)
|
/external/swiftshader/src/Shader/ |
D | PixelRoutine.cpp | 1585 Int masked = value; in writeColor() local 1587 masked &= *Pointer<Int>(constants + OFFSET(Constants,mask565Q[~bgraWriteMask & 0x7][0])); in writeColor() 1588 c01 |= masked; in writeColor() 1603 Int masked = value; in writeColor() local 1605 masked &= *Pointer<Int>(constants + OFFSET(Constants,mask565Q[~bgraWriteMask & 0x7][0])); in writeColor() 1606 c23 |= masked; in writeColor() 1662 Short4 masked = value; in writeColor() local 1664 masked &= *Pointer<Short4>(constants + OFFSET(Constants,invMaskB4Q[bgraWriteMask][0])); in writeColor() 1665 c01 |= masked; in writeColor() 1680 Short4 masked = value; in writeColor() local [all …]
|
/external/llvm/test/Transforms/ConstantHoisting/PowerPC/ |
D | masks.ll | 25 %shl161.masked = and i32 %shl161, 32768 26 %conv174 = or i32 %shl170, %shl161.masked 55 %shl161.masked = and i32 %shl161, 32773 56 %conv174 = or i32 %shl170, %shl161.masked
|
/external/llvm/test/Transforms/FunctionAttrs/ |
D | readattrs.ll | 70 ; CHECK: declare void @llvm.masked.scatter 71 declare void @llvm.masked.scatter.v4i32(<4 x i32>%val, <4 x i32*>, i32, <4 x i1>) 77 …call void @llvm.masked.scatter.v4i32(<4 x i32>%val, <4 x i32*> %ptrs, i32 4, <4 x i1><i1 true, i1 … 81 ; CHECK: declare <4 x i32> @llvm.masked.gather 82 declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) 86 …%res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 4, <4 x i1><i1 true, i1 fals…
|
/external/llvm/test/Transforms/InstSimplify/ |
D | call.ll | 212 …%masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* bitcast (i32* getelementp… 213 ret <8 x i32> %masked.load 219 …%masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %V, i32 4, <8 x i1> undef… 220 ret <8 x i32> %masked.load 225 declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>)
|
/external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/ |
D | rlwinm2.ll | 25 %tmp2.masked = and i32 %tmp2, 96 ; <i32> [#uses=1] 26 %tmp5 = or i32 %tmp1, %tmp2.masked ; <i32> [#uses=1]
|