/external/rust/crates/rayon/src/iter/ |
D | reduce.rs | 4 pub(super) fn reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T in reduce() 13 reduce_op: &reduce_op, in reduce() 20 reduce_op: &'r R, field 47 reduce_op: self.reduce_op, in into_folder() 77 (self.reduce_op)(left, right) in reduce() 82 reduce_op: &'r R, field 94 reduce_op: self.reduce_op, in consume() 95 item: (self.reduce_op)(self.item, item), in consume() 104 reduce_op: self.reduce_op, in consume_iter() 105 item: iter.into_iter().fold(self.item, self.reduce_op), in consume_iter()
|
D | try_reduce_with.rs | 7 pub(super) fn try_reduce_with<PI, R, T>(pi: PI, reduce_op: R) -> Option<T> in try_reduce_with() 15 reduce_op: &reduce_op, in try_reduce_with() 22 reduce_op: &'r R, field 49 reduce_op: self.reduce_op, in into_folder() 80 let reduce_op = self.reduce_op; in reduce() localVariable 84 (Ok(a), Ok(b)) => Some(reduce_op(a, b)), in reduce() 92 reduce_op: &'r R, field 105 let reduce_op = self.reduce_op; in consume() localVariable 109 Ok(b) => reduce_op(a, b).into_result(), in consume()
|
D | try_reduce.rs | 7 pub(super) fn try_reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T in try_reduce() 17 reduce_op: &reduce_op, in try_reduce() 25 reduce_op: &'r R, field 53 reduce_op: self.reduce_op, in into_folder() 86 (Ok(left), Ok(right)) => (self.reduce_op)(left, right), in reduce() 93 reduce_op: &'r R, field 106 let reduce_op = self.reduce_op; in consume() localVariable 109 Ok(right) => reduce_op(left, right).into_result(), in consume()
|
/external/tensorflow/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/ |
D | lhlo_legalize_to_gpu.cc | 53 ReduceOp reduce_op, ArrayRef<Value> args, in matchAndRewrite() argument 55 auto loc = reduce_op.getLoc(); in matchAndRewrite() 58 for (auto result : reduce_op.out()) { in matchAndRewrite() 70 auto reducing_dimension = *reduce_op.dimensions().int_value_begin(); in matchAndRewrite() 74 for (auto input : reduce_op.operands()) { in matchAndRewrite() 98 for (auto pair : llvm::zip(reduce_op.init_values(), reduce_op.out())) { in matchAndRewrite() 121 auto output = *reduce_op.out().begin(); in matchAndRewrite() 134 auto input_buffer = *reduce_op.operands().begin(); in matchAndRewrite() 138 Value input = *reduce_op.operand_begin(); in matchAndRewrite() 152 mapping.map(reduce_op.body().getArgument(0), accumulator); in matchAndRewrite() [all …]
|
D | lhlo_legalize_to_parallel_loops.cc | 66 void ConvertToReductionOperator(Location loc, scf::ReduceOp reduce_op, in ConvertToReductionOperator() argument 68 Block& loop_reduce_op_body = reduce_op.reductionOperator().front(); in ConvertToReductionOperator() 194 lmhlo::ReduceOp reduce_op, ArrayRef<Value> /*args*/, in matchAndRewrite() argument 197 if (reduce_op.out().size() != 1) return failure(); in matchAndRewrite() 200 CreateReduceOpInNestedParallelLoops(reduce_op, &rewriter); in matchAndRewrite() 201 ConvertToReductionOperator(reduce_op.getLoc(), scf_reduce_op, in matchAndRewrite() 202 &reduce_op.body().front(), &rewriter); in matchAndRewrite() 203 rewriter.replaceOp(reduce_op, llvm::None); in matchAndRewrite() 228 lmhlo::ReduceOp reduce_op, ConversionPatternRewriter* rewriter) const { in CreateReduceOpInNestedParallelLoops() argument 229 auto loc = reduce_op.getLoc(); in CreateReduceOpInNestedParallelLoops() [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | cross_device_ops.py | 84 reduce_op, value, destinations, num_replicas_in_graph): argument 98 if reduce_op == reduce_util.ReduceOp.MEAN: 106 "the given reduce op %s." % (value, reduce_op)) 201 reduce_op): argument 212 if reduce_op == reduce_util.ReduceOp.MEAN: 215 elif reduce_op != reduce_util.ReduceOp.SUM: 255 def reduce(self, reduce_op, per_replica_value, destinations, options=None): argument 300 return self.reduce_implementation(reduce_op, per_replica_value, 381 def batch_reduce(self, reduce_op, value_destination_pairs, options=None): argument 427 return self.batch_reduce_implementation(reduce_op, value_destination_pairs, [all …]
|
D | distribute_lib.py | 1268 def reduce(self, reduce_op, value, axis): argument 1375 if isinstance(reduce_op, six.string_types): 1376 reduce_op = reduce_util.ReduceOp(reduce_op.upper()) 1378 return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access 1379 if reduce_op == reduce_util.ReduceOp.SUM: 1399 return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access 1400 if reduce_op != reduce_util.ReduceOp.MEAN: 1402 "not: %r" % reduce_op) 1954 def reduce(self, reduce_op, value, axis=None): argument 1955 return super(StrategyV1, self).reduce(reduce_op, value, axis) [all …]
|
D | cross_device_ops_test.py | 244 reduced_values = collective.reduce(options.reduce_op, per_replica_value, 290 reduced_values = collective.batch_reduce(options.reduce_op, 320 reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], 323 reduce_op, prefer_unique_instance_key): argument 334 reduce_op=reduce_op, 346 expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5 348 expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5 362 reduce_op=ReduceOp.SUM, 365 reduce_op, prefer_unique_instance_key): argument 377 reduce_op=reduce_op, [all …]
|
D | reduce_util.py | 47 reduce_op = mapping.get(aggregation) 48 if not reduce_op: 51 return reduce_op
|
D | central_storage_strategy.py | 149 def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation argument 213 return super(CentralStorageStrategy, self).reduce(reduce_op, value, axis)
|
D | mirrored_strategy.py | 642 …for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-acc… 646 if reduce_op is None: 719 def _reduce_to(self, reduce_op, value, destinations, options): argument 721 reduce_op == reduce_util.ReduceOp.MEAN): 730 reduce_op, value, destinations, self._num_replicas_in_sync) 736 reduce_op, value, destinations) 738 reduce_op, 743 def _batch_reduce_to(self, reduce_op, value_destination_pairs, options): argument 752 reduce_op,
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.distribute.-cross-device-ops.pbtxt | 11 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 15 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 27 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 31 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-reduction-to-one-device.pbtxt | 12 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 16 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 28 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 32 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-nccl-all-reduce.pbtxt | 13 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 17 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 29 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 33 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-hierarchical-copy-all-reduce.pbtxt | 13 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 17 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 29 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 33 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-strategy-extended.pbtxt | 23 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 31 …argspec: "args=[\'self\', \'reduce_op\', \'value\', \'destinations\', \'options\'], varargs=None, …
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.distribute.-cross-device-ops.pbtxt | 11 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 15 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 27 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 31 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-reduction-to-one-device.pbtxt | 12 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 16 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 28 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 32 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-nccl-all-reduce.pbtxt | 13 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 17 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 29 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 33 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
D | tensorflow.distribute.-hierarchical-copy-all-reduce.pbtxt | 13 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 17 …argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'options\'], varargs=None, … 29 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va… 33 …argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'options\'], va…
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_math_ops.py | 434 def ragged_reduce_aggregate(reduce_op, argument 481 return reduce_op(rt_input, axis, keepdims=keepdims, name=name) 485 return reduce_op( 497 result = reduce_op(rt_input.flat_values, None, keepdims=keepdims, name=name) 525 inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op, 528 return ragged_reduce_aggregate(reduce_op, unsorted_segment_op, 562 ragged_reduce_aggregate(reduce_op, unsorted_segment_op, 571 reduce_op=math_ops.reduce_sum, 582 reduce_op=math_ops.reduce_prod, 593 reduce_op=math_ops.reduce_min, [all …]
|
/external/tensorflow/tensorflow/python/distribute/v1/ |
D | cross_device_ops_test.py | 237 reduce_op, batch_reduce): argument 247 reduce_op, [(per_replica, per_replica)]) 249 result = cross_device_ops_instance.reduce(reduce_op, per_replica, 255 if reduce_op == reduce_util.ReduceOp.SUM: 259 assert reduce_op == reduce_util.ReduceOp.MEAN 395 reduce_op=[reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN], 399 def testIndexedSlicesAllReduce(self, cross_device_ops_instance, reduce_op, argument 403 reduce_op, batch_reduce) 551 def _reduce(test_object, reduce_op, per_replica, destinations): argument 554 return test_object.extended.reduce_to(reduce_op, per_replica, [all …]
|
/external/mesa3d/src/amd/compiler/ |
D | aco_lower_to_hw_instr.cpp | 485 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, P… in emit_reduction() argument 494 identity[0] = Operand(get_reduction_identity(reduce_op, 0)); in emit_reduction() 495 identity[1] = Operand(get_reduction_identity(reduce_op, 1)); in emit_reduction() 527 if (reduce_op == imin8 || reduce_op == imax8) in emit_reduction() 536 if (reduce_op == imin8 || reduce_op == imax8) in emit_reduction() 546 (reduce_op == iadd16 || reduce_op == imax16 || in emit_reduction() 547 reduce_op == imin16 || reduce_op == umin16 || reduce_op == umax16)) { in emit_reduction() 551 if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16) in emit_reduction() 560 if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16) in emit_reduction() 579 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size()); in emit_reduction() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/xnnpack/ |
D | reduce_tester.cc | 37 void ReduceTester::Test(tflite::BuiltinOperator reduce_op, in Test() argument 44 std::vector<char> buffer = CreateTfLiteModel(reduce_op); in Test() 105 tflite::BuiltinOperator reduce_op) const { in CreateTfLiteModel() 108 CreateOperatorCode(builder, reduce_op); in CreateTfLiteModel()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | legalize_hlo.cc | 579 ReductionOp reduce_op = dyn_cast_or_null<ReductionOp>( in MatchBinaryReduceFunction() local 581 if (!reduce_op) return failure(); in MatchBinaryReduceFunction() 582 if (reduce_op.lhs() != body.getArgument(0) || in MatchBinaryReduceFunction() 583 reduce_op.rhs() != body.getArgument(1)) in MatchBinaryReduceFunction() 612 mhlo::ReduceOp reduce_op, ArrayRef<Value> args, in matchAndRewrite() argument 614 if (failed(MatchReduceOpInput(reduce_op))) return failure(); in matchAndRewrite() 616 if (failed(MatchBinaryReduceFunction<BinaryOp>(reduce_op.body()))) in matchAndRewrite() 621 if (failed(MatchInitValue(reduce_op.init_values()[0]))) return failure(); in matchAndRewrite() 623 auto input = reduce_op.operands()[0]; in matchAndRewrite() 626 DenseIntElementsAttr dimension = reduce_op.dimensions(); in matchAndRewrite() [all …]
|