/external/tensorflow/tensorflow/compiler/xla/service/ |
D | async_collective_creator.cc | 84 std::vector<const Shape*> operand_shapes; in Run() local 85 operand_shapes.reserve(ag->operand_count()); in Run() 87 operand_shapes.push_back(&op->shape()); in Run() 91 ? ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes) in Run() 92 : *operand_shapes[0], in Run() 126 std::vector<const Shape*> operand_shapes; in Run() local 127 absl::c_transform(cp->operands(), std::back_inserter(operand_shapes), in Run() 134 operand_shapes) in Run()
|
D | shape_inference.h | 74 HloOpcode opcode, absl::Span<const Shape* const> operand_shapes); 129 absl::Span<const Shape* const> operand_shapes, 135 absl::Span<const Shape* const> operand_shapes, 146 absl::Span<const Shape* const> operand_shapes); 151 absl::Span<const Shape* const> operand_shapes, int64_t scatter_dimension, 156 absl::Span<const Shape* const> operand_shapes); 170 absl::Span<const Shape* const> operand_shapes); 174 absl::Span<const Shape* const> operand_shapes); 178 absl::Span<const Shape* const> operand_shapes);
|
D | shape_inference.cc | 1069 std::vector<const Shape*> operand_shapes; in InferVariadicOpShape() local 1070 operand_shapes.reserve(operands.size()); in InferVariadicOpShape() 1072 operand_shapes.push_back(&operand->shape()); in InferVariadicOpShape() 1074 return InferVariadicOpShape(opcode, operand_shapes); in InferVariadicOpShape() 1078 HloOpcode opcode, absl::Span<const Shape* const> operand_shapes) { in InferVariadicOpShape() argument 1079 for (const Shape* shape : operand_shapes) { in InferVariadicOpShape() 1085 result.mutable_tuple_shapes()->reserve(operand_shapes.size()); in InferVariadicOpShape() 1086 for (const Shape* shape : operand_shapes) { in InferVariadicOpShape() 1092 if (operand_shapes.size() == 1) { in InferVariadicOpShape() 1093 return *operand_shapes[0]; in InferVariadicOpShape() [all …]
|
D | hlo_verifier.cc | 175 std::vector<const Shape*> operand_shapes; in HandleConcatenate() local 177 operand_shapes.push_back(&operand->shape()); in HandleConcatenate() 181 operand_shapes, concatenate->concatenate_dimension())); in HandleConcatenate() 384 std::vector<const Shape*> operand_shapes; in HandleAllGather() local 386 operand_shapes.push_back(&operand->shape()); in HandleAllGather() 390 operand_shapes, ag->all_gather_dimension(), shard_count)); in HandleAllGather() 397 std::vector<const Shape*> operand_shapes; in HandleAllGatherStart() local 399 operand_shapes.push_back(&operand->shape()); in HandleAllGatherStart() 403 operand_shapes, ag->all_gather_dimension(), shard_count)); in HandleAllGatherStart() 419 std::vector<const Shape*> operand_shapes; in HandleAllReduce() local [all …]
|
D | all_reduce_combiner.cc | 63 std::vector<const Shape*> operand_shapes; in CombineAllReduces() local 76 operand_shapes.push_back(&operand->shape()); in CombineAllReduces() 84 ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes), operands, reduction, in CombineAllReduces()
|
D | hlo_creation_utils.cc | 282 std::vector<const Shape*> operand_shapes; in MakeConcatHlo() local 283 absl::c_transform(operands, std::back_inserter(operand_shapes), in MakeConcatHlo() 287 operand_shapes, dimension)); in MakeConcatHlo() 357 std::vector<const Shape*> operand_shapes; in MakeMapHlo() local 361 operand_shapes.push_back(&operand->shape()); in MakeMapHlo() 369 operand_shapes, map_computation->ComputeProgramShape(), map_dims)); in MakeMapHlo()
|
D | hlo_instruction.cc | 723 std::vector<Shape> operand_shapes; in CreateFromProto() local 726 operand_shapes.reserve(operand_shapes_with_layout.size()); in CreateFromProto() 728 operand_shapes.emplace_back(shape_proto); in CreateFromProto() 732 operand_shapes, proto.backend_config()); in CreateFromProto()
|
D | hlo_evaluator.cc | 3715 absl::InlinedVector<const Shape*, 1> operand_shapes; in HandleReduce() local 3717 operand_shapes.push_back(&operand->shape()); in HandleReduce() 3721 operand_shapes, dimensions_to_reduce, in HandleReduce()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | mlir_emitter.cc | 95 llvm::ArrayRef<Shape> operand_shapes, llvm::Value *result_ptr, in EmitMlirFuncAndCall() argument 105 for (int i = 0; i != operand_shapes.size(); ++i) { in EmitMlirFuncAndCall() 108 ConvertTensorShapeToMemRefType(operand_shapes[i], mlir_builder)); in EmitMlirFuncAndCall() 138 for (int i = 0; i != operand_shapes.size(); ++i) { in EmitMlirFuncAndCall() 139 BuildViewForBuffer(&op_vals, b, operand_shapes[i], operand_ptrs[i]); in EmitMlirFuncAndCall()
|
D | mlir_emitter.h | 37 llvm::ArrayRef<Shape> operand_shapes, llvm::Value *result_ptr,
|
D | dot_op_emitter.cc | 266 Shape operand_shapes[] = {dot_info_.lhs_shape, dot_info_.rhs_shape}; in EmitLinalgMatmul() local 282 mlir_context_, b_, dot_info_.result_shape, operand_shapes, target_ptr, in EmitLinalgMatmul()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | fold_broadcast.cc | 167 std::array<llvm::ArrayRef<int64_t>, 2> operand_shapes; in RewriteOp() local 168 operand_shapes[i] = broadcast_arg_type.getShape(); in RewriteOp() 169 operand_shapes[1 - i] = argument_type.getShape(); in RewriteOp() 174 if (!get_broadcasted_shape(operand_shapes[0], operand_shapes[1], in RewriteOp()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | py_client.cc | 481 absl::Span<Shape const> operand_shapes) { in CreateCallbackArgs() argument 482 std::vector<CpuCallback::Arg> callback_args(operand_shapes.size()); in CreateCallbackArgs() 483 for (int i = 0; i < operand_shapes.size(); ++i) { in CreateCallbackArgs() 484 Shape shape = operand_shapes[i]; in CreateCallbackArgs() 542 pybind11::function callable, absl::Span<Shape const> operand_shapes, in MakePythonCallbackUsingHostSendAndRecv() argument 549 TF_ASSIGN_OR_RETURN(auto callback_args, CreateCallbackArgs(operand_shapes)); in MakePythonCallbackUsingHostSendAndRecv() 575 assign_arg_info(operand_shapes, send_channel_ids, host_callback->operands); in MakePythonCallbackUsingHostSendAndRecv() 591 pybind11::function callable, absl::Span<Shape const> operand_shapes, in GetEmitPythonCallbackDescriptor() argument 602 TF_ASSIGN_OR_RETURN(auto callback_args, CreateCallbackArgs(operand_shapes)); in GetEmitPythonCallbackDescriptor() 692 std::vector<Shape> operand_shapes(operands.size()); in EmitPythonCallback() local [all …]
|
D | py_client.h | 194 absl::Span<Shape const> operand_shapes, 217 pybind11::function callable, absl::Span<Shape const> operand_shapes,
|
D | xla_client_test.py | 2722 operand_shapes=[xla_client.Shape.scalar_shape(np.dtype(np.float32))], 2779 operand_shapes=[xla_client.Shape.scalar_shape(np.dtype(np.uint32))],
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.cc | 233 std::vector<Shape> operand_shapes; in GetOperandShapes() local 234 operand_shapes.reserve(operands.size()); in GetOperandShapes() 237 operand_shapes.push_back(*shape); in GetOperandShapes() 239 return operand_shapes; in GetOperandShapes() 894 TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands)); in Call() 895 absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs), in Call() 1128 TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands)); in ConcatInDim() 1129 absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs), in ConcatInDim() 1312 TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(elements)); in Tuple() 1313 absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs), in Tuple() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_autotuning.proto | 13 repeated xla.ShapeProto operand_shapes = 2; field
|
D | ir_emission_utils.cc | 784 const std::vector<Shape>& operand_shapes, const Shape& output_shape) { in FindTranspose021DimsAndParameters() argument 787 for (int64_t operand_idx = 0; operand_idx < operand_shapes.size(); in FindTranspose021DimsAndParameters() 790 ShapeUtil::FindTranspose021(operand_shapes[operand_idx], output_shape); in FindTranspose021DimsAndParameters()
|
D | ir_emitter_unnested.cc | 1504 std::vector<Shape>* operand_shapes, in ProcessFusionForConversion() argument 1525 operand_shapes->push_back(std::move(shape)); in ProcessFusionForConversion() 2448 std::vector<Shape> operand_shapes, output_shapes; in GetOrCreateSubComputationFromRegion() local 2453 ProcessFusionForConversion(region, &operand_shapes, &output_shapes)); in GetOrCreateSubComputationFromRegion() 2476 CHECK_EQ(operand_shapes.size(), fused_computation->num_parameters()); in GetOrCreateSubComputationFromRegion() 2480 ->mutable_layout() = operand_shapes[i].layout(); in GetOrCreateSubComputationFromRegion()
|
/external/tensorflow/tensorflow/compiler/xla/python/xla_extension/ |
D | __init__.pyi | 397 self, callable: Callable, operand_shapes: Sequence[XlaOp], 405 … operand_shapes: Any, send_channel_ids: Any, recv_channel_ids: Any) -> Any: ...
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner.cc | 2841 std::vector<const Shape*> operand_shapes; in HandleSingleDevice() local 2845 operand_shapes.reserve(old_operands_size); in HandleSingleDevice() 2848 operand_shapes.push_back(&operand->shape()); in HandleSingleDevice() 2851 auto operand_shape = ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes); in HandleSingleDevice() 2867 HloInstruction::CreateGetTupleElement(*operand_shapes[i], param, i))); in HandleSingleDevice()
|