/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorArgMax.h | 134 template<typename ReduceOp, typename Dims, typename XprType> 135 struct traits<TensorTupleReducerOp<ReduceOp, Dims, XprType> > : public traits<XprType> 147 template<typename ReduceOp, typename Dims, typename XprType> 148 struct eval<TensorTupleReducerOp<ReduceOp, Dims, XprType>, Eigen::Dense> 150 typedef const TensorTupleReducerOp<ReduceOp, Dims, XprType>& type; 153 template<typename ReduceOp, typename Dims, typename XprType> 154 struct nested<TensorTupleReducerOp<ReduceOp, Dims, XprType>, 1, 155 typename eval<TensorTupleReducerOp<ReduceOp, Dims, XprType> >::type> 157 typedef TensorTupleReducerOp<ReduceOp, Dims, XprType> type; 162 template<typename ReduceOp, typename Dims, typename XprType> [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | cross_device_ops_test.py | 57 ReduceOp = reduce_util.ReduceOp variable 221 "func_graph"], 2, 0, ReduceOp.SUM, 320 reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], 346 expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5 348 expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5 362 reduce_op=ReduceOp.SUM, 430 reduce_op=ReduceOp.SUM, 442 reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], 470 expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0] 472 expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0] [all …]
|
D | reduce_util.py | 28 class ReduceOp(enum.Enum): class 43 variable_scope.VariableAggregation.SUM: ReduceOp.SUM, 44 variable_scope.VariableAggregation.MEAN: ReduceOp.MEAN,
|
D | strategy_common_test.py | 86 reduce_util.ReduceOp.SUM, value=per_replica_value, axis=None) 108 x_m = strategy.reduce(reduce_util.ReduceOp.MEAN, x, axis=0) 110 x_s = strategy.reduce(reduce_util.ReduceOp.SUM, x, axis=0) 271 reduce_util.ReduceOp.SUM, array_ops.identity(1.), axis=None) 276 reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value, 282 reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM, 289 reduced = strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None) 294 reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value, 300 reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,
|
D | distribute_lib.py | 285 return reduce_util.ReduceOp.SUM 289 return reduce_util.ReduceOp.SUM 290 return reduce_util.ReduceOp.MEAN 1376 reduce_op = reduce_util.ReduceOp(reduce_op.upper()) 1379 if reduce_op == reduce_util.ReduceOp.SUM: 1400 if reduce_op != reduce_util.ReduceOp.MEAN: 1462 …numer = self._extended._reduce(reduce_util.ReduceOp.SUM, numer) # pylint: disable=protected-access 1463 …denom = self._extended._reduce(reduce_util.ReduceOp.SUM, denom) # pylint: disable=protected-access 2304 reduce_op = reduce_util.ReduceOp(reduce_op.upper()) 2305 assert (reduce_op == reduce_util.ReduceOp.SUM or [all …]
|
D | strategy_combinations_test.py | 54 reduce_util.ReduceOp.SUM, one_per_replica, axis=None) 70 reduce_util.ReduceOp.SUM, one_per_replica, axis=None) 83 reduce_util.ReduceOp.SUM, one_per_replica, axis=None)
|
D | cross_device_ops.py | 98 if reduce_op == reduce_util.ReduceOp.MEAN: 212 if reduce_op == reduce_util.ReduceOp.MEAN: 215 elif reduce_op != reduce_util.ReduceOp.SUM: 680 if reduce_op == reduce_util.ReduceOp.MEAN: 1123 if reduce_op == reduce_util.ReduceOp.MEAN: 1256 if reduce_op == reduce_util.ReduceOp.MEAN: 1294 if reduce_op == reduce_util.ReduceOp.MEAN:
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.distribute.-reduce-op.pbtxt | 1 path: "tensorflow.distribute.ReduceOp" 3 is_instance: "<enum \'ReduceOp\'>" 6 mtype: "<enum \'ReduceOp\'>" 10 mtype: "<enum \'ReduceOp\'>"
|
D | tensorflow.distribute.pbtxt | 52 name: "ReduceOp"
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.distribute.-reduce-op.pbtxt | 1 path: "tensorflow.distribute.ReduceOp" 3 is_instance: "<enum \'ReduceOp\'>" 6 mtype: "<enum \'ReduceOp\'>" 10 mtype: "<enum \'ReduceOp\'>"
|
D | tensorflow.distribute.pbtxt | 32 name: "ReduceOp"
|
/external/tensorflow/tensorflow/python/distribute/v1/ |
D | cross_device_ops_test.py | 190 reduce_util.ReduceOp.MEAN, 196 reduce_util.ReduceOp.MEAN, 202 reduce_util.ReduceOp.SUM, 208 reduce_util.ReduceOp.SUM, 216 cross_device_ops.batch_reduce(reduce_util.ReduceOp.MEAN, 222 cross_device_ops.batch_reduce(reduce_util.ReduceOp.SUM, 255 if reduce_op == reduce_util.ReduceOp.SUM: 259 assert reduce_op == reduce_util.ReduceOp.MEAN 374 per_replica, devices[0], math_ops.add_n, reduce_util.ReduceOp.SUM) 395 reduce_op=[reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN], [all …]
|
/external/llvm-project/mlir/test/Dialect/Shape/ |
D | invalid.mlir | 4 // expected-error@+1 {{ReduceOp body is expected to have 3 arguments}} 15 // expected-error@+1 {{argument 0 of ReduceOp body is expected to be of IndexType}} 28 …// expected-error@+1 {{argument 1 of ReduceOp body is expected to be of SizeType if the ReduceOp o… 39 …// expected-error@+1 {{argument 1 of ReduceOp body is expected to be of IndexType if the ReduceOp … 50 // expected-error@+1 {{type mismatch between argument 2 of ReduceOp body and initial value 0}}
|
/external/tensorflow/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/ |
D | lhlo_legalize_to_parallel_loops.cc | 66 void ConvertToReductionOperator(Location loc, scf::ReduceOp reduce_op, in ConvertToReductionOperator() 189 class ReduceOpConverter : public OpConversionPattern<lmhlo::ReduceOp> { 191 using OpConversionPattern<lmhlo::ReduceOp>::OpConversionPattern; 194 lmhlo::ReduceOp reduce_op, ArrayRef<Value> /*args*/, in matchAndRewrite() 199 scf::ReduceOp scf_reduce_op = in matchAndRewrite() 227 scf::ReduceOp CreateReduceOpInNestedParallelLoops( in CreateReduceOpInNestedParallelLoops() 228 lmhlo::ReduceOp reduce_op, ConversionPatternRewriter* rewriter) const { in CreateReduceOpInNestedParallelLoops() 295 return rewriter->create<scf::ReduceOp>(loc, elem); in CreateReduceOpInNestedParallelLoops() 372 scf::ReduceOp reduce_op = CreateReduceOpInNestedParallelLoops( in matchAndRewrite() 415 scf::ReduceOp CreateReduceOpInNestedParallelLoops( in CreateReduceOpInNestedParallelLoops() [all …]
|
D | lhlo_legalize_to_gpu.cc | 48 class LhloReduceToGPULaunchConverter : public OpConversionPattern<ReduceOp> { 53 ReduceOp reduce_op, ArrayRef<Value> args, in matchAndRewrite() 184 target.addIllegalOp<ReduceOp>(); in runOnFunction()
|
D | hlo_legalize_to_lhlo.cc | 406 struct HloToLhloReduceOpConverter : public BaseOpConversion<mhlo::ReduceOp> { 408 using BaseOpConversion<mhlo::ReduceOp>::BaseOpConversion; 411 mhlo::ReduceOp op, ArrayRef<Value> operands, in matchAndRewrite() 426 auto new_op = rewriter.create<lmhlo::ReduceOp>(loc, llvm::None, buffer_args, in matchAndRewrite()
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | normalization_v2.py | 154 y_sum = replica_ctx.all_reduce(reduce_util.ReduceOp.SUM, local_sum) 155 y_squared_sum = replica_ctx.all_reduce(reduce_util.ReduceOp.SUM, 157 global_batch_size = replica_ctx.all_reduce(reduce_util.ReduceOp.SUM,
|
/external/llvm-project/mlir/lib/Dialect/Shape/Transforms/ |
D | ShapeToShapeLowering.cpp | 41 ReduceOp reduce = rewriter.create<ReduceOp>(loc, op.shape(), init); in matchAndRewrite()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | ir_emission_utils.h | 232 bool IsFusedReductionOutputConsistent(mlir::mhlo::ReduceOp inst, 233 mlir::mhlo::ReduceOp first_reduce);
|
/external/tensorflow/tensorflow/compiler/mlir/tfr/examples/mnist/ |
D | mnist_train.py | 163 tf.distribute.ReduceOp.MEAN, per_replica_accuracy, axis=None) 165 tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | ctl_correctness_test.py | 133 total_loss += strategy.reduce(reduce_util.ReduceOp.SUM, 144 total_loss += strategy.reduce(reduce_util.ReduceOp.SUM, 190 return strategy.reduce(reduce_util.ReduceOp.SUM,
|
/external/tensorflow/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/ |
D | map_hlo_to_lhlo_op.h | 75 MAP_HLO_TO_LHLO(ReduceOp);
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_distributed_v1.py | 108 reduce_op = ds_reduce_util.ReduceOp.SUM 112 reduce_op = ds_reduce_util.ReduceOp.MEAN 346 reduce_op = ds_reduce_util.ReduceOp.SUM 350 reduce_op = ds_reduce_util.ReduceOp.MEAN
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | utils.py | 151 return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM,
|
/external/llvm-project/mlir/lib/Conversion/ShapeToStandard/ |
D | ShapeToStandard.cpp | 350 struct ReduceOpConverter : public OpConversionPattern<shape::ReduceOp> { 355 matchAndRewrite(shape::ReduceOp op, ArrayRef<Value> operands, 361 ReduceOpConverter::matchAndRewrite(shape::ReduceOp op, ArrayRef<Value> operands, in matchAndRewrite() 368 shape::ReduceOp::Adaptor transformed(operands); in matchAndRewrite()
|