/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | scatter_nd_op.cc | 34 Status ValidateUpdateShape(const TensorShape& buffer_shape, in ValidateUpdateShape() argument 54 ", buffer_shape: ", buffer_shape.DebugString(), in ValidateUpdateShape() 63 if (buffer_shape.dims() < in ValidateUpdateShape() 68 batch_dim + buffer_shape.dims() - num_index_dims) { in ValidateUpdateShape() 78 buffer_shape.dim_size(d + num_index_dims)) { in ValidateUpdateShape() 95 TensorShape buffer_shape; in Compile() local 96 OP_REQUIRES_OK(context, context->ConstantInputAsShape(2, &buffer_shape)); in Compile() 99 context, TensorShapeUtils::IsVectorOrHigher(buffer_shape), in Compile() 101 "got shape: ", buffer_shape.DebugString())); in Compile() 105 buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 && in Compile() [all …]
|
D | segment_reduction_ops.cc | 83 TensorShape buffer_shape = data_shape; in Compile() local 84 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile() 85 buffer_shape.InsertDim(0, num_segments); in Compile() 88 xla::Broadcast(InitialValue(builder), buffer_shape.dim_sizes()); in Compile()
|
D | tensor_list_utils.cc | 139 Status GetTensorListBufferShape(xla::XlaOp list, xla::Shape* buffer_shape) { in GetTensorListBufferShape() argument 146 *buffer_shape = xla::ShapeUtil::GetTupleElementShape(list_shape, 0); in GetTensorListBufferShape() 213 auto buffer_shape = xla::ShapeUtil::GetTupleElementShape(list_shape, 0); in GetLeadingDimForTensorList() local 214 *leading_dim_is_dynamic = buffer_shape.is_dynamic_dimension(0); in GetLeadingDimForTensorList() 216 *leading_dim = buffer_shape.dimensions(0); in GetLeadingDimForTensorList() 517 const xla::Shape& buffer_shape = in ExecuteTensorListGetItem() local 519 std::vector<xla::XlaOp> start_indices(buffer_shape.dimensions_size(), in ExecuteTensorListGetItem() 524 xla::SpanToVector(buffer_shape.dimensions()); in ExecuteTensorListGetItem() 531 for (int64_t i = 1; i < buffer_shape.dimensions_size(); ++i) { in ExecuteTensorListGetItem() 532 if (buffer_shape.is_dynamic_dimension(i)) { in ExecuteTensorListGetItem()
|
D | tensor_list_utils.h | 43 Status GetTensorListBufferShape(xla::XlaOp list, xla::Shape* buffer_shape);
|
D | tensor_list_ops.cc | 419 TensorShape buffer_shape; in Compile() local 420 OP_REQUIRES_OK(ctx, XLAShapeToTensorShape(buffer_xla_shape, &buffer_shape)); in Compile() 424 ctx, XlaGather(buffer, buffer_shape, indices, indices_shape, /*axis=*/0, in Compile()
|
/external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
D | scatter.cc | 40 TF_ASSIGN_OR_RETURN(xla::Shape buffer_shape, builder->GetShape(buffer)); in XlaScatter() 51 if (num_index_dims > buffer_shape.rank()) { in XlaScatter() 56 xla::ShapeUtil::HumanString(buffer_shape), ")"); in XlaScatter() 74 if (xla::ShapeUtil::GetDimension(buffer_shape, i) == 0) { in XlaScatter() 77 xla::ShapeUtil::HumanString(buffer_shape)); in XlaScatter() 144 int64_t buffer_rank = buffer_shape.rank(); in XlaScatter() 153 expected_updates_dims.push_back(buffer_shape.dimensions(dim)); in XlaScatter() 179 xla::ShapeUtil::MakeShape(buffer_shape.element_type(), {}); in XlaScatter() 189 VLOG(3) << " Input: " << xla::ShapeUtil::HumanString(buffer_shape); in XlaScatter()
|
/external/tensorflow/tensorflow/core/tpu/kernels/xla/ |
D | segment_reduction_ops.cc | 73 TensorShape buffer_shape = data_shape; in Compile() local 74 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile() 75 buffer_shape.InsertDim(0, num_segments); in Compile() 78 buffer_shape.dim_sizes()); in Compile()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_transfer_manager.cc | 102 const Shape& buffer_shape = in ReadDynamicShapes() local 104 if (buffer_shape.IsTuple()) { in ReadDynamicShapes() 115 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapes() 117 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapes()
|
D | buffer_comparator.cc | 613 const Shape& buffer_shape, in DeviceCompare() argument 675 CalculateLaunchDimensions(buffer_shape, gpu_device_info)); in DeviceCompare()
|
D | gpu_conv_algorithm_picker.cc | 656 const Shape& buffer_shape) { in PickBestAlgorithmNoCacheCuda() argument 658 InitializeBuffer(stream, buffer_shape.element_type(), &rng_state, buffer); in PickBestAlgorithmNoCacheCuda()
|
/external/pytorch/aten/src/ATen/native/ |
D | TensorIteratorReduce.cpp | 46 auto buffer_shape = DimVector(unsqueezed.sizes()); in two_pass_reduction() local 47 buffer_shape[0] = max_threads; in two_pass_reduction() 48 auto buffer = at::empty(buffer_shape, dst.options()); in two_pass_reduction()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_xfeed.cc | 283 const Shape& buffer_shape = in ReadDynamicShapesOnCpu() local 285 if (buffer_shape.IsTuple()) { in ReadDynamicShapesOnCpu() 296 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapesOnCpu() 298 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapesOnCpu()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | collection_ops_util.cc | 153 llvm::SmallVector<int64_t, 8> buffer_shape; in CreateInitBufferValue() local 154 buffer_shape.push_back(max_size); in CreateInitBufferValue() 156 buffer_shape.push_back(dim); in CreateInitBufferValue() 164 auto buffer_type = RankedTensorType::get(buffer_shape, element_dtype); in CreateInitBufferValue() 167 ArrayRef<Value>{zero, GetR1Const(buffer_shape, builder, op->getLoc())}); in CreateInitBufferValue()
|
D | tensor_array_ops_decomposition.cc | 335 llvm::SmallVector<int64_t, 8> buffer_shape; in HandleTensorArraySplitV3Op() local 336 buffer_shape.push_back(count); in HandleTensorArraySplitV3Op() 337 for (int64_t dim : elem_type.getShape()) buffer_shape.push_back(dim); in HandleTensorArraySplitV3Op() 343 buffer_shape, elem_type.getElementType())}, in HandleTensorArraySplitV3Op() 345 cutil::GetR1Const(buffer_shape, builder, in HandleTensorArraySplitV3Op()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | transfer_manager.cc | 208 const Shape& buffer_shape = in ReadDynamicShapes() local 210 if (buffer_shape.IsTuple()) { in ReadDynamicShapes() 222 Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape); in ReadDynamicShapes() 224 int64_t metadata_size = shape_size_fn(buffer_shape) - offset; in ReadDynamicShapes() 235 ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}), in ReadDynamicShapes()
|
D | hlo_verifier.cc | 523 Status CheckBufferOffset(const Shape& buffer_shape, in CheckBufferOffset() argument 542 [&buffer_shape](const Shape& shape) { in CheckBufferOffset() 544 buffer_shape.rank(); in CheckBufferOffset() 551 if (buffer_offset_shape.tuple_shapes_size() != buffer_shape.rank()) { in CheckBufferOffset()
|
D | layout_assignment.cc | 649 const Shape& buffer_shape = instruction->operand(0)->shape(); in AddMandatoryConstraints() local 650 TF_RET_CHECK(buffer_shape.IsArray()); in AddMandatoryConstraints() 653 ->LayoutShapeForChannel(buffer_shape, channel_id); in AddMandatoryConstraints()
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | pjrt_stream_executor_client.cc | 1645 const Shape& buffer_shape, in CheckCompatibleShapes() argument 1650 if (strict_shape_checking || buffer_shape.IsTuple()) { in CheckCompatibleShapes() 1651 if (!ShapeUtil::Equal(buffer_shape, execution_shape)) { in CheckCompatibleShapes() 1657 ShapeUtil::HumanStringWithLayout(buffer_shape)); in CheckCompatibleShapes() 1660 if (transfer_manager.GetByteSizeRequirement(buffer_shape) != in CheckCompatibleShapes() 1667 ShapeUtil::HumanStringWithLayout(buffer_shape)); in CheckCompatibleShapes()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_compiler.cc | 939 xla::Shape buffer_shape; in XLAShapeForArgument() local 941 TensorShapeToXLAShape(arg.type, shape, &buffer_shape)); in XLAShapeForArgument() 943 {buffer_shape, xla::ShapeUtil::MakeShape(xla::S32, {})}); in XLAShapeForArgument()
|