/external/tensorflow/tensorflow/compiler/tf2xla/ops/ |
D | xla_ops.cc | 537 std::vector<int64> dimensions_to_reduce; in __anonbbc1c2d10502() local 539 c->GetAttr("dimensions_to_reduce", &dimensions_to_reduce)); in __anonbbc1c2d10502() 540 std::set<int64> dims_set(dimensions_to_reduce.begin(), in __anonbbc1c2d10502() 541 dimensions_to_reduce.end()); in __anonbbc1c2d10502() 545 const int dimensions_to_reduce_size = dimensions_to_reduce.size(); in __anonbbc1c2d10502() 547 dims_set.size() != dimensions_to_reduce.size() || in __anonbbc1c2d10502() 548 !absl::c_all_of(dimensions_to_reduce, dim_in_range)) { in __anonbbc1c2d10502() 553 0, c->UnknownShapeOfRank(rank - dimensions_to_reduce.size())); in __anonbbc1c2d10502() 587 std::vector<int64> dimensions_to_reduce; in __anonbbc1c2d10702() local 589 c->GetAttr("dimensions_to_reduce", &dimensions_to_reduce)); in __anonbbc1c2d10702() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | reduction_ops.cc | 133 const std::vector<int64>& dimensions_to_reduce) override { in BuildFinalizer() argument 134 if (dimensions_to_reduce.empty()) { in BuildFinalizer() 137 auto divisor = xla::GetDimensionSize(input, dimensions_to_reduce[0]); in BuildFinalizer() 138 for (int i = 1; i < dimensions_to_reduce.size(); i++) { in BuildFinalizer() 139 auto size = xla::GetDimensionSize(input, dimensions_to_reduce[i]); in BuildFinalizer()
|
D | quantize_and_dequantize_op.cc | 79 std::vector<int64> dimensions_to_reduce; in Compile() local 88 dimensions_to_reduce.reserve(input_rank - 1); in Compile() 91 dimensions_to_reduce.push_back(i); in Compile() 95 dimensions_to_reduce); in Compile() 97 dimensions_to_reduce); in Compile()
|
D | reduction_ops.h | 58 const std::vector<int64>& dimensions_to_reduce);
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | xla_ops_test.py | 351 x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer) 380 x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer) 413 dimensions_to_reduce=dims,
|
/external/tensorflow/tensorflow/compiler/mlir/xla/ir/ |
D | mlir_hlo_builder.h | 146 absl::Span<const int64> dimensions_to_reduce) override;
|
D | mlir_hlo_builder.cc | 158 absl::Span<const int64> dimensions_to_reduce) { in ReduceInternal() argument 165 GetI64ElementsAttr(dimensions_to_reduce, &builder_)); in ReduceInternal()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shape_inference.h | 161 absl::Span<const int64> dimensions_to_reduce,
|
D | hlo_instruction.h | 809 absl::Span<const int64> dimensions_to_reduce, 825 absl::Span<const int64> dimensions_to_reduce,
|
D | hlo_evaluator.cc | 2407 absl::Span<const int64> dimensions_to_reduce(reduce->dimensions()); in HandleReduce() local 2416 operand_shapes, dimensions_to_reduce, in HandleReduce() 2453 for (const int64 dim : dimensions_to_reduce) { in HandleReduce()
|
D | shape_inference.cc | 2115 absl::Span<const int64> dimensions_to_reduce, in InferReduceShape() argument 2142 for (int64 dimension : dimensions_to_reduce) { in InferReduceShape() 2158 for (int64 dim_to_reduce : dimensions_to_reduce) { in InferReduceShape()
|
D | hlo_parser.cc | 1819 optional<std::vector<int64>> dimensions_to_reduce; in ParseInstructionRhs() local 1821 &dimensions_to_reduce}; in ParseInstructionRhs() 1837 arg_shapes, *dimensions_to_reduce, in ParseInstructionRhs() 1850 *dimensions_to_reduce, *reduce_computation)); in ParseInstructionRhs()
|
D | hlo_instruction.cc | 1288 absl::Span<const int64> dimensions_to_reduce, in CreateReduce() argument 1291 shape, {operand, init_value}, dimensions_to_reduce, reduce_computation)); in CreateReduce() 1298 absl::Span<const int64> dimensions_to_reduce, in CreateReduce() argument 1305 shape, all_args, dimensions_to_reduce, reduce_computation); in CreateReduce()
|
D | hlo_instructions.cc | 903 absl::Span<const int64> dimensions_to_reduce, in HloReduceInstruction() argument 906 dimensions_(dimensions_to_reduce.begin(), dimensions_to_reduce.end()) { in HloReduceInstruction()
|
D | shape_inference_test.cc | 65 absl::Span<const int64> dimensions_to_reduce) { in ExpectInferredReduceShape() argument 68 {&arg, &f32_}, dimensions_to_reduce, to_apply); in ExpectInferredReduceShape()
|
D | hlo_instructions.h | 589 absl::Span<const int64> dimensions_to_reduce,
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.h | 685 absl::Span<const int64> dimensions_to_reduce); 690 absl::Span<const int64> dimensions_to_reduce); 695 absl::Span<const int64> dimensions_to_reduce); 1270 absl::Span<const int64> dimensions_to_reduce); 1274 absl::Span<const int64> dimensions_to_reduce); 2146 absl::Span<const int64> dimensions_to_reduce); 2153 absl::Span<const int64> dimensions_to_reduce);
|
D | xla_builder.cc | 2426 absl::Span<const int64> dimensions_to_reduce) { in Reduce() argument 2429 dimensions_to_reduce); in Reduce() 2435 absl::Span<const int64> dimensions_to_reduce) { in Reduce() argument 2454 operand_shape_ptrs, dimensions_to_reduce, called_program_shape)); in Reduce() 2456 dimensions_to_reduce); in Reduce() 2463 absl::Span<const int64> dimensions_to_reduce) { in ReduceInternal() argument 2468 for (int64 dim : dimensions_to_reduce) { in ReduceInternal() 4599 absl::Span<const int64> dimensions_to_reduce) { in Reduce() argument 4601 dimensions_to_reduce); in Reduce() 4609 absl::Span<const int64> dimensions_to_reduce) { in Reduce() argument [all …]
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | xla_client_test.py | 1614 dimensions_to_reduce=[0]) 1631 dimensions_to_reduce=[dim]) 1647 dimensions_to_reduce=dims)
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
D | tf_generated_ops.td | 17926 I64ArrayAttr:$dimensions_to_reduce, 18166 I64ArrayAttr:$dimensions_to_reduce,
|