Home
last modified time | relevance | path

Searched refs:buffer_shape (Results 1 – 19 of 19) sorted by relevance

/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dscatter_nd_op.cc34 Status ValidateUpdateShape(const TensorShape& buffer_shape, in ValidateUpdateShape() argument
54 ", buffer_shape: ", buffer_shape.DebugString(), in ValidateUpdateShape()
63 if (buffer_shape.dims() < in ValidateUpdateShape()
68 batch_dim + buffer_shape.dims() - num_index_dims) { in ValidateUpdateShape()
78 buffer_shape.dim_size(d + num_index_dims)) { in ValidateUpdateShape()
95 TensorShape buffer_shape; in Compile() local
96 OP_REQUIRES_OK(context, context->ConstantInputAsShape(2, &buffer_shape)); in Compile()
99 context, TensorShapeUtils::IsVectorOrHigher(buffer_shape), in Compile()
101 "got shape: ", buffer_shape.DebugString())); in Compile()
105 buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 && in Compile()
[all …]
Dsegment_reduction_ops.cc83 TensorShape buffer_shape = data_shape; in Compile() local
84 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile()
85 buffer_shape.InsertDim(0, num_segments); in Compile()
88 xla::Broadcast(InitialValue(builder), buffer_shape.dim_sizes()); in Compile()
Dtensor_list_utils.cc139 Status GetTensorListBufferShape(xla::XlaOp list, xla::Shape* buffer_shape) { in GetTensorListBufferShape() argument
146 *buffer_shape = xla::ShapeUtil::GetTupleElementShape(list_shape, 0); in GetTensorListBufferShape()
213 auto buffer_shape = xla::ShapeUtil::GetTupleElementShape(list_shape, 0); in GetLeadingDimForTensorList() local
214 *leading_dim_is_dynamic = buffer_shape.is_dynamic_dimension(0); in GetLeadingDimForTensorList()
216 *leading_dim = buffer_shape.dimensions(0); in GetLeadingDimForTensorList()
517 const xla::Shape& buffer_shape = in ExecuteTensorListGetItem() local
519 std::vector<xla::XlaOp> start_indices(buffer_shape.dimensions_size(), in ExecuteTensorListGetItem()
524 xla::SpanToVector(buffer_shape.dimensions()); in ExecuteTensorListGetItem()
531 for (int64_t i = 1; i < buffer_shape.dimensions_size(); ++i) { in ExecuteTensorListGetItem()
532 if (buffer_shape.is_dynamic_dimension(i)) { in ExecuteTensorListGetItem()
Dtensor_list_utils.h43 Status GetTensorListBufferShape(xla::XlaOp list, xla::Shape* buffer_shape);
Dtensor_list_ops.cc419 TensorShape buffer_shape; in Compile() local
420 OP_REQUIRES_OK(ctx, XLAShapeToTensorShape(buffer_xla_shape, &buffer_shape)); in Compile()
424 ctx, XlaGather(buffer, buffer_shape, indices, indices_shape, /*axis=*/0, in Compile()
/external/tensorflow/tensorflow/compiler/tf2xla/lib/
Dscatter.cc40 TF_ASSIGN_OR_RETURN(xla::Shape buffer_shape, builder->GetShape(buffer)); in XlaScatter()
51 if (num_index_dims > buffer_shape.rank()) { in XlaScatter()
56 xla::ShapeUtil::HumanString(buffer_shape), ")"); in XlaScatter()
74 if (xla::ShapeUtil::GetDimension(buffer_shape, i) == 0) { in XlaScatter()
77 xla::ShapeUtil::HumanString(buffer_shape)); in XlaScatter()
144 int64_t buffer_rank = buffer_shape.rank(); in XlaScatter()
153 expected_updates_dims.push_back(buffer_shape.dimensions(dim)); in XlaScatter()
179 xla::ShapeUtil::MakeShape(buffer_shape.element_type(), {}); in XlaScatter()
189 VLOG(3) << " Input: " << xla::ShapeUtil::HumanString(buffer_shape); in XlaScatter()
/external/tensorflow/tensorflow/core/tpu/kernels/xla/
Dsegment_reduction_ops.cc73 TensorShape buffer_shape = data_shape; in Compile() local
74 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile()
75 buffer_shape.InsertDim(0, num_segments); in Compile()
78 buffer_shape.dim_sizes()); in Compile()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgpu_transfer_manager.cc102 const Shape& buffer_shape = in ReadDynamicShapes() local
104 if (buffer_shape.IsTuple()) { in ReadDynamicShapes()
115 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapes()
117 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapes()
Dbuffer_comparator.cc613 const Shape& buffer_shape, in DeviceCompare() argument
675 CalculateLaunchDimensions(buffer_shape, gpu_device_info)); in DeviceCompare()
Dgpu_conv_algorithm_picker.cc656 const Shape& buffer_shape) { in PickBestAlgorithmNoCacheCuda() argument
658 InitializeBuffer(stream, buffer_shape.element_type(), &rng_state, buffer); in PickBestAlgorithmNoCacheCuda()
/external/pytorch/aten/src/ATen/native/
DTensorIteratorReduce.cpp46 auto buffer_shape = DimVector(unsqueezed.sizes()); in two_pass_reduction() local
47 buffer_shape[0] = max_threads; in two_pass_reduction()
48 auto buffer = at::empty(buffer_shape, dst.options()); in two_pass_reduction()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dcpu_xfeed.cc283 const Shape& buffer_shape = in ReadDynamicShapesOnCpu() local
285 if (buffer_shape.IsTuple()) { in ReadDynamicShapesOnCpu()
296 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapesOnCpu()
298 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapesOnCpu()
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dcollection_ops_util.cc153 llvm::SmallVector<int64_t, 8> buffer_shape; in CreateInitBufferValue() local
154 buffer_shape.push_back(max_size); in CreateInitBufferValue()
156 buffer_shape.push_back(dim); in CreateInitBufferValue()
164 auto buffer_type = RankedTensorType::get(buffer_shape, element_dtype); in CreateInitBufferValue()
167 ArrayRef<Value>{zero, GetR1Const(buffer_shape, builder, op->getLoc())}); in CreateInitBufferValue()
Dtensor_array_ops_decomposition.cc335 llvm::SmallVector<int64_t, 8> buffer_shape; in HandleTensorArraySplitV3Op() local
336 buffer_shape.push_back(count); in HandleTensorArraySplitV3Op()
337 for (int64_t dim : elem_type.getShape()) buffer_shape.push_back(dim); in HandleTensorArraySplitV3Op()
343 buffer_shape, elem_type.getElementType())}, in HandleTensorArraySplitV3Op()
345 cutil::GetR1Const(buffer_shape, builder, in HandleTensorArraySplitV3Op()
/external/tensorflow/tensorflow/compiler/xla/service/
Dtransfer_manager.cc208 const Shape& buffer_shape = in ReadDynamicShapes() local
210 if (buffer_shape.IsTuple()) { in ReadDynamicShapes()
222 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapes()
224 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapes()
235 ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}), in ReadDynamicShapes()
Dhlo_verifier.cc523 Status CheckBufferOffset(const Shape& buffer_shape, in CheckBufferOffset() argument
542 [&buffer_shape](const Shape& shape) { in CheckBufferOffset()
544 buffer_shape.rank(); in CheckBufferOffset()
551 if (buffer_offset_shape.tuple_shapes_size() != buffer_shape.rank()) { in CheckBufferOffset()
Dlayout_assignment.cc649 const Shape& buffer_shape = instruction->operand(0)->shape(); in AddMandatoryConstraints() local
650 TF_RET_CHECK(buffer_shape.IsArray()); in AddMandatoryConstraints()
653 ->LayoutShapeForChannel(buffer_shape, channel_id); in AddMandatoryConstraints()
/external/tensorflow/tensorflow/compiler/xla/pjrt/
Dpjrt_stream_executor_client.cc1645 const Shape& buffer_shape, in CheckCompatibleShapes() argument
1650 if (strict_shape_checking || buffer_shape.IsTuple()) { in CheckCompatibleShapes()
1651 if (!ShapeUtil::Equal(buffer_shape, execution_shape)) { in CheckCompatibleShapes()
1657 ShapeUtil::HumanStringWithLayout(buffer_shape)); in CheckCompatibleShapes()
1660 if (transfer_manager.GetByteSizeRequirement(buffer_shape) != in CheckCompatibleShapes()
1667 ShapeUtil::HumanStringWithLayout(buffer_shape)); in CheckCompatibleShapes()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dxla_compiler.cc939 xla::Shape buffer_shape; in XLAShapeForArgument() local
941 TensorShapeToXLAShape(arg.type, shape, &buffer_shape)); in XLAShapeForArgument()
943 {buffer_shape, xla::ShapeUtil::MakeShape(xla::S32, {})}); in XLAShapeForArgument()