| /external/python/cpython3/Lib/test/decimaltestdata/ |
| D | reduce.decTest | 2 -- reduce.decTest -- remove trailing zeros -- 30 redx001 reduce '1' -> '1' 31 redx002 reduce '-1' -> '-1' 32 redx003 reduce '1.00' -> '1' 33 redx004 reduce '-1.00' -> '-1' 34 redx005 reduce '0' -> '0' 35 redx006 reduce '0.00' -> '0' 36 redx007 reduce '00.0' -> '0' 37 redx008 reduce '00.00' -> '0' 38 redx009 reduce '00' -> '0' [all …]
|
| D | ddReduce.decTest | 29 ddred001 reduce '1' -> '1' 30 ddred002 reduce '-1' -> '-1' 31 ddred003 reduce '1.00' -> '1' 32 ddred004 reduce '-1.00' -> '-1' 33 ddred005 reduce '0' -> '0' 34 ddred006 reduce '0.00' -> '0' 35 ddred007 reduce '00.0' -> '0' 36 ddred008 reduce '00.00' -> '0' 37 ddred009 reduce '00' -> '0' 38 ddred010 reduce '0E+1' -> '0' [all …]
|
| D | dqReduce.decTest | 30 dqred001 reduce '1' -> '1' 31 dqred002 reduce '-1' -> '-1' 32 dqred003 reduce '1.00' -> '1' 33 dqred004 reduce '-1.00' -> '-1' 34 dqred005 reduce '0' -> '0' 35 dqred006 reduce '0.00' -> '0' 36 dqred007 reduce '00.0' -> '0' 37 dqred008 reduce '00.00' -> '0' 38 dqred009 reduce '00' -> '0' 39 dqred010 reduce '0E+1' -> '0' [all …]
|
| /external/python/cpython2/Lib/test/decimaltestdata/ |
| D | reduce.decTest | 2 -- reduce.decTest -- remove trailing zeros -- 30 redx001 reduce '1' -> '1' 31 redx002 reduce '-1' -> '-1' 32 redx003 reduce '1.00' -> '1' 33 redx004 reduce '-1.00' -> '-1' 34 redx005 reduce '0' -> '0' 35 redx006 reduce '0.00' -> '0' 36 redx007 reduce '00.0' -> '0' 37 redx008 reduce '00.00' -> '0' 38 redx009 reduce '00' -> '0' [all …]
|
| D | ddReduce.decTest | 29 ddred001 reduce '1' -> '1' 30 ddred002 reduce '-1' -> '-1' 31 ddred003 reduce '1.00' -> '1' 32 ddred004 reduce '-1.00' -> '-1' 33 ddred005 reduce '0' -> '0' 34 ddred006 reduce '0.00' -> '0' 35 ddred007 reduce '00.0' -> '0' 36 ddred008 reduce '00.00' -> '0' 37 ddred009 reduce '00' -> '0' 38 ddred010 reduce '0E+1' -> '0' [all …]
|
| D | dqReduce.decTest | 30 dqred001 reduce '1' -> '1' 31 dqred002 reduce '-1' -> '-1' 32 dqred003 reduce '1.00' -> '1' 33 dqred004 reduce '-1.00' -> '-1' 34 dqred005 reduce '0' -> '0' 35 dqred006 reduce '0.00' -> '0' 36 dqred007 reduce '00.0' -> '0' 37 dqred008 reduce '00.00' -> '0' 38 dqred009 reduce '00' -> '0' 39 dqred010 reduce '0E+1' -> '0' [all …]
|
| /external/google-fruit/extras/scripts/ |
| D | parser.out | 128 $end reduce using rule 3 (optional_balanced_string -> .) 129 COMMA reduce using rule 3 (optional_balanced_string -> .) 130 GREATER_THAN reduce using rule 3 (optional_balanced_string -> .) 131 RBRACKET reduce using rule 3 (optional_balanced_string -> .) 132 RPAREN reduce using rule 3 (optional_balanced_string -> .) 133 RBRACE reduce using rule 3 (optional_balanced_string -> .) 164 $end reduce using rule 3 (optional_balanced_string -> .) 165 COMMA reduce using rule 3 (optional_balanced_string -> .) 166 GREATER_THAN reduce using rule 3 (optional_balanced_string -> .) 167 RBRACKET reduce using rule 3 (optional_balanced_string -> .) [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
| D | reduction_splitter.cc | 30 Status HandleReduce(HloInstruction *reduce) override { in HandleReduce() argument 31 VLOG(4) << "Input: " << reduce->ToString(); in HandleReduce() 35 if (IsReductionFromOrToContiguousDimensions(*reduce)) { in HandleReduce() 38 if (reduce->dimensions().size() < 2) { in HandleReduce() 41 if (!reduce->shape().IsArray()) { in HandleReduce() 46 HloInstruction *operand = reduce->mutable_operand(0); in HandleReduce() 52 for (int64_t i = 0; i < reduce->dimensions().size(); ++i) { in HandleReduce() 53 for (int64_t j = i + 1; j < reduce->dimensions().size(); ++j) { in HandleReduce() 54 CHECK(abs(reduce->dimensions(i) - reduce->dimensions(j)) > 1) in HandleReduce() 59 // The reduce op has non-contiguous dimensions. Look for the dimension with in HandleReduce() [all …]
|
| D | gpu_fusible_test.cc | 42 ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add in TEST_F() 67 ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 100 ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 122 ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 142 ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add in TEST_F() 172 ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 203 ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 219 ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add in TEST_F() 223 const HloInstruction* reduce = in TEST_F() local 225 ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce); in TEST_F() [all …]
|
| D | reduction_layout_normalizer.cc | 39 auto reduce = Cast<HloReduceInstruction>(hlo); in HandleReduce() local 40 VLOG(5) << "Input: " << reduce->ToString(); in HandleReduce() 48 const Shape &first_instruction_shape = reduce->inputs()[0]->shape(); in HandleReduce() 50 for (HloInstruction *operand : reduce->inputs()) { in HandleReduce() 56 reduce->parent()->AddInstruction(HloInstruction::CreateUnary( in HandleReduce() 73 reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx) in HandleReduce() 74 : reduce->shape(); in HandleReduce() 89 absl::c_count_if(reduce->dimensions(), [&](int64_t dim) { in HandleReduce() 106 if (absl::c_linear_search(reduce->dimensions(), logical_dim)) { in HandleReduce() 126 if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) { in HandleReduce() [all …]
|
| D | multi_output_fusion_test.cc | 61 // Fusion with reduce instruction root and a sibling reduce instruction in TEST_F() 68 …ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computat… in TEST_F() 76 … reduce.2 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation in TEST_F() 77 ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion, reduce.2) in TEST_F() 86 op::Tuple(op::Reduce(), op::Reduce())); in TEST_F() 95 ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0}, to_apply=scalar_add_computation in TEST_F() 102 ROOT reduce.2 = f32[] reduce(r1, const.2), dimensions={1,0}, to_apply=scalar_mul_computation in TEST_F() 122 ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0,1}, to_apply=scalar_add_computation in TEST_F() 128 … ROOT reduce.2 = f32[10]{0} reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation in TEST_F() 144 // Two sibling fusions with reduce instruction roots sharing the same input in TEST_F() [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/service/ |
| D | reduce_decomposer.cc | 37 auto reduce = Cast<HloReduceInstruction>(hlo); in HandleReduce() local 40 for (HloInstruction* input : reduce->inputs()) { in HandleReduce() 41 auto first_input = reduce->inputs()[0]; in HandleReduce() 59 MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(), in HandleReduce() 60 reduce->called_computations()[0])); in HandleReduce() 61 TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce)); in HandleReduce() 74 auto reduce = Cast<HloReduceInstruction>(hlo); in HandleReduce() local 75 auto shape = reduce->shape(); in HandleReduce() 76 if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) { in HandleReduce() 80 std::vector<Shape> expected_shapes(reduce->input_count()); in HandleReduce() [all …]
|
| D | ar_crs_combiner_test.cc | 415 %all-reduce.ar.1 = bf16[] in TEST_F() 416 all-reduce(%p), in TEST_F() 422 convert(%all-reduce.ar.1), in TEST_F() 424 %all-reduce.1 = f32[] in TEST_F() 425 all-reduce(%convert.1), in TEST_F() 430 %all-reduce.ar.2 = bf16[] in TEST_F() 431 all-reduce(%constant.bf16), in TEST_F() 437 convert(%all-reduce.ar.2), in TEST_F() 439 %all-reduce.2 = f32[] in TEST_F() 440 all-reduce(%convert.2), in TEST_F() [all …]
|
| D | all_reduce_simplifier_test.cc | 73 all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum in TEST_F() 74 all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max in TEST_F() 75 all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min in TEST_F() 76 all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1 in TEST_F() 77 …= (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduc… in TEST_F() 112 all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max in TEST_F() 113 ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum in TEST_F() 151 all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum in TEST_F() 152 all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max in TEST_F() 153 all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min in TEST_F() [all …]
|
| D | all_reduce_reassociate_test.cc | 62 ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum in TEST_F() 63 ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum in TEST_F() 87 ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum in TEST_F() 88 ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum in TEST_F() 116 ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum in TEST_F() 117 ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum in TEST_F() 118 ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum in TEST_F() 119 ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum in TEST_F() 151 ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum in TEST_F() 152 ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum in TEST_F() [all …]
|
| /external/angle/third_party/vulkan-deps/glslang/src/Test/baseResults/ |
| D | spv.shaderBallotAMD.comp.out | 96 29: 6(int) GroupSMin 28 Reduce 27 102 36: 7(int) GroupUMin 28 Reduce 35 104 38: 7(int) GroupUMin 28 Reduce 37 111 46: 9(float) GroupFMin 28 Reduce 45 113 48: 9(float) GroupFMin 28 Reduce 47 115 50: 9(float) GroupFMin 28 Reduce 49 122 58:11(float64_t) GroupFMin 28 Reduce 57 124 60:11(float64_t) GroupFMin 28 Reduce 59 126 62:11(float64_t) GroupFMin 28 Reduce 61 128 64:11(float64_t) GroupFMin 28 Reduce 63 [all …]
|
| /external/deqp-deps/glslang/Test/baseResults/ |
| D | spv.shaderBallotAMD.comp.out | 96 29: 6(int) GroupSMin 28 Reduce 27 102 36: 7(int) GroupUMin 28 Reduce 35 104 38: 7(int) GroupUMin 28 Reduce 37 111 46: 9(float) GroupFMin 28 Reduce 45 113 48: 9(float) GroupFMin 28 Reduce 47 115 50: 9(float) GroupFMin 28 Reduce 49 122 58:11(float64_t) GroupFMin 28 Reduce 57 124 60:11(float64_t) GroupFMin 28 Reduce 59 126 62:11(float64_t) GroupFMin 28 Reduce 61 128 64:11(float64_t) GroupFMin 28 Reduce 63 [all …]
|
| /external/tensorflow/tensorflow/lite/kernels/ |
| D | reduce.cc | 15 #include "tensorflow/lite/kernels/internal/reference/reduce.h" 31 #include "tensorflow/lite/kernels/internal/optimized/reduce.h" 44 namespace reduce { namespace 201 // mean or temp prod when calculating reduce prod. in InitializeTemporaries() 649 // Apply reduce operation using the 'reducer' function on all of 'input_data'. 650 // and reduce all to single element. 691 // Reduce all data from different workers. in ReduceAllDims() 698 // The underlying logic for Reduce Sum/Prod/Max/Min/Any 863 // Rescaling 8bit reduce sum. in EvalSum() 1001 } // namespace reduce [all …]
|
| /external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/ |
| D | reduce.h | 31 class Reduce : public GPUOperation { 33 Reduce() = default; 34 Reduce(const std::map<Axis, int>& axis_to_reduce, OperationType op_type, 45 Reduce(Reduce&& operation); 46 Reduce& operator=(Reduce&& operation); 47 Reduce(const Reduce&) = delete; 48 Reduce& operator=(const Reduce&) = delete; 60 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce, const BHWC& src_shape, 64 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce,
|
| /external/python/cpython3/Modules/_decimal/libmpdec/literature/ |
| D | umodarith.lisp | 250 ;; dw-reduce is correct 253 (defun dw-reduce (hi lo m base) 258 (defthmd dw-reduce-correct 263 (equal (dw-reduce hi lo m base) 272 (defthmd dw-reduce-aux1 285 (defthm dw-reduce-aux2 292 (defthmd dw-reduce-second-quotient-fits-in-single-word 302 ("Subgoal 1.2" :use ((:instance dw-reduce-aux1 311 (let* ((r (dw-reduce hi lo m base)) 330 :hints (("Goal" :in-theory (disable dw-reduce) [all …]
|
| /external/jsilver/src/com/google/clearsilver/jsilver/syntax/parser/ |
| D | Parser.java | 30 private final static int REDUCE = 1; field in Parser 166 case REDUCE: in parse() 169 case 0: /* reduce ANone1Grammar */ in parse() 175 case 1: /* reduce AOne1Grammar */ in parse() 181 case 2: /* reduce AMany1Grammar */ in parse() 187 case 3: /* reduce ADataCommand */ in parse() 193 case 4: /* reduce AAcommentcommand1Command */ in parse() 199 case 5: /* reduce AAcommentcommand2Command */ in parse() 205 case 6: /* reduce AVarCommand */ in parse() 211 case 7: /* reduce ALvarCommand */ in parse() [all …]
|
| /external/tensorflow/tensorflow/core/kernels/ |
| D | reduction_ops_test.cc | 24 // Creates a Graph which "reduce"s a 3D float tensor of "num" elements 27 static Graph* ToScalar(const string& reduce, int num_x, int num_y) { in ToScalar() argument 34 test::graph::Reduce(g, reduce, test::graph::Constant(g, data), in ToScalar() 39 static Graph* ColReduce(const string& reduce, int num_x, int num_y) { in ColReduce() argument 45 test::graph::Reduce(g, reduce, test::graph::Constant(g, data), in ColReduce() 50 static Graph* RowReduce(const string& reduce, int num_x, int num_y) { in RowReduce() argument 56 test::graph::Reduce(g, reduce, test::graph::Constant(g, data), in RowReduce() 61 static Graph* ThreeDYReduce(const string& reduce, int num_y, int num_z) { in ThreeDYReduce() argument 67 test::graph::Reduce(g, reduce, test::graph::Constant(g, data), in ThreeDYReduce() 72 static Graph* ThreeDXZReduce(const string& reduce, int num_y, int num_z) { in ThreeDXZReduce() argument [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/ |
| D | mhlo_reduce_pretty_print.mlir | 11 // version of mhlo.reduce op. 13 // The test case is eligible for pretty-printing reduce-op. 16 // CHECK-NEXT: mhlo.reduce(%arg0 init: %arg1) applies mhlo.add across dimensions = [1] : (tenso… 19 %0 = "mhlo.reduce"(%arg0, %arg1) ({ 28 // The test case is not eligible for pretty-printing reduce-op. The location of 29 // reduce-op is different. 32 // CHECK-NEXT: mhlo.reduce(%arg0 init: %arg1) 41 %0 = "mhlo.reduce"(%arg0, %arg1) ({ 50 // The test case is not eligible for pretty-printing reduce-op. The location of 57 %0 = "mhlo.reduce"(%arg0, %arg1) ({ [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/ |
| D | print_reduce.mlir | 11 // version of stablehlo.reduce op. 13 // The test case is eligible for pretty-printing reduce-op. 16 // CHECK-NEXT: stablehlo.reduce(%arg0 init: %arg1) applies stablehlo.add across dimensions = [1… 19 %0 = "stablehlo.reduce"(%arg0, %arg1) ({ 28 // The test case is not eligible for pretty-printing reduce-op. The location of 29 // reduce-op is different. 32 // CHECK-NEXT: stablehlo.reduce(%arg0 init: %arg1) 41 %0 = "stablehlo.reduce"(%arg0, %arg1) ({ 50 // The test case is not eligible for pretty-printing reduce-op. The location of 57 %0 = "stablehlo.reduce"(%arg0, %arg1) ({ [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/service/gpu/tests/ |
| D | tree_reduction_rewriter_test.cc | 59 ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add in TEST_F() 67 // CHECK: [[reduce_4:%[^ ]+]] = f32[224]{0} reduce([[bitcast_3]], [[zero_2]]), dimensions={1}, to_a… in TEST_F() 68 // CHECK: ROOT [[out_1_6:%[^ ]+]] = f32[] reduce([[reduce_4]], [[zero_2]]), dimensions={0}, to_appl… in TEST_F() 85 ROOT out = f32[2,4]{0,1} reduce(input, zero), dimensions={2}, to_apply=add in TEST_F() 92 // CHECK: f32[2,4]{0,1} reduce( in TEST_F() 110 ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add in TEST_F() 119 // CHECK: [[reduce_3:%[^ ]+]] = f32[223]{0} reduce([[bitcast_1]], [[zero_2]]), dimensions={1}, to_a… in TEST_F() 120 // CHECK: ROOT [[out_1_5:%[^ ]+]] = f32[] reduce([[reduce_3]], [[zero_2]]), dimensions={0}, to_appl… in TEST_F() 137 ROOT out = f32[100,10] reduce(input, zero), dimensions={2}, to_apply=add in TEST_F() 145 // CHECK: [[reduce_3:%[^ ]+]] = f32[100,10,300]{2,1,0} reduce([[bitcast_0]], [[zero_2]]), dimension… in TEST_F() [all …]
|