/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_layout.cc | 26 if (!ShapeUtil::Compatible(other_shape, shape_)) { in CopyLayoutFromShape() 31 shape_ = other_shape; in CopyLayoutFromShape() 36 if (!ShapeUtil::Compatible(*to_shape, shape_)) { in AssignLayoutToShape() 41 *to_shape = shape_; in AssignLayoutToShape() 46 LayoutUtil::SetToDefaultLayout(&shape_); in SetToDefaultLayout() 70 ShapeUtil::ForEachSubshape(shape_, check_tiling); in MatchesLayoutInShape() 79 return equal(shape, shape_); in MatchesLayoutInShape() 84 CHECK(!shape_.IsTuple()); in layout() 85 return shape_.layout(); in layout() 88 void ShapeLayout::Clear() { LayoutUtil::ClearLayout(&shape_); } in Clear() [all …]
|
D | shape_layout.h | 38 explicit ShapeLayout(const Shape& shape) : shape_(shape) {} in ShapeLayout() 65 const Shape& shape() const { return shape_; } in shape() 69 void ClearDynamicShape() { shape_.clear_dynamic_dimensions(); } in ClearDynamicShape() 88 std::string ToString() const { return shape_.ToString(true); } in ToString() 95 Shape shape_;
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | defuser_test.cc | 42 const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2}); member in xla::__anon5681e5b90111::DefuserTest 49 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 51 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 53 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() 65 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 67 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 69 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() 89 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 91 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 93 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/coreml/builders/ |
D | reshape_op_builder.cc | 45 for (int dim : shape_) { in Build() 68 std::back_inserter(shape_)); in SetShapeFromIntArray() 72 for (int i = 0; i < shape_.size(); ++i) { in SetShapeFromIntArray() 73 if (shape_[i] == -1) { in SetShapeFromIntArray() 76 reshape_size *= shape_[i]; in SetShapeFromIntArray() 81 shape_[negative_index] = input_size / reshape_size; in SetShapeFromIntArray() 84 if (shape_.size() == 2) { in SetShapeFromIntArray() 85 shape_ = {shape_[1], 1, shape_[0]}; in SetShapeFromIntArray() 86 } else if (shape_.size() == 3) { in SetShapeFromIntArray() 87 shape_ = {shape_[2], shape_[0], shape_[1]}; in SetShapeFromIntArray() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | loop_emitter.cc | 35 : body_emitter_(body_emitter), shape_(shape), b_(b) {} in LoopEmitter() 41 CHECK_EQ(dynamic_dims.size(), shape_.dimensions_size()); in LoopEmitter() 49 shape_(target_array.GetShape()), 57 shape_(target_arrays[0].GetShape()), in LoopEmitter() 62 CHECK(ShapeUtil::SameDimensions(shape_, array.GetShape())) in LoopEmitter() 63 << ": '" << shape_.ShortDebugString() << "' does not match '" in LoopEmitter() 108 std::vector<llvm::Value*> array_multi_index(shape_.dimensions_size()); in EmitStaticIndex() 109 for (int i = 0; i < LayoutUtil::MinorToMajor(shape_).size(); ++i) { in EmitStaticIndex() 110 int64_t dimension = LayoutUtil::Major(shape_.layout(), i); in EmitStaticIndex() 113 /*end_index=*/shape_.dimensions(dimension), in EmitStaticIndex() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/ |
D | converter.cc | 113 shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h, in Init() 115 if (shape_.b != 1) { in Init() 161 if (input_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) { in Convert() 165 if (output_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) { in Convert() 172 int4(static_cast<int32_t>(shape_.w), static_cast<int32_t>(shape_.h), in Convert() 173 static_cast<int32_t>(shape_.c), 0)})); in Convert() 176 return Dispatch(uint3(shape_.w, shape_.h, shape_.c)); in Convert() 179 BHWC shape_; member in tflite::gpu::gl::__anon1e444b000111::FromTensorConverter 201 shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h, in Init() 203 if (shape_.b != 1) { in Init() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/array_ops/ |
D | matrix_band_part_op_test.py | 42 def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_): argument 46 mat = np.ones(shape_).astype(dtype_) 48 for lower in -1, 0, 1, shape_[-2] - 1: 49 for upper in -1, 0, 1, shape_[-1] - 1: 72 def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_): argument 76 shape = batch_shape_ + shape_ 79 for lower in -1, 0, 1, shape_[-2] - 1: 80 for upper in -1, 0, 1, shape_[-1] - 1: 109 for shape_ in self.shapes: 114 matrix = variables.Variable(array_ops.ones(shape_)) [all …]
|
/external/tensorflow/tensorflow/lite/kernels/shim/ |
D | tensor_view.h | 77 : data_(t->Data<DType>()), shape_(t->Shape()) { in Tensor() 78 DCHECK_EQ(RANK, shape_.size()); in Tensor() 83 : data_(t->Data<DType>()), shape_(t->Shape()) { in Tensor() 84 DCHECK_EQ(RANK, shape_.size()); in Tensor() 113 return shape_[dim_i]; in Dim() 135 row_sizes_[i] = row_sizes_[i + 1] * shape_[i + 1]; in ComputeRowSizes() 141 const absl::Span<int> shape_; variable 168 absl::Span<int> Shape() { return shape_; } in Shape() 169 /*[[nodiscard]]*/ const absl::Span<int> Shape() const { return shape_; } in Shape() 208 : shape_(shape), in TensorView() [all …]
|
/external/XNNPACK/test/ |
D | transpose-operator-tester.h | 48 this->shape_ = shape; in shape() 52 inline const std::vector<size_t>& dims() const { return this->shape_; } in dims() 69 input_stride[i - 1] = input_stride[i] * shape_[i]; in TestX8() 70 output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; in TestX8() 88 num_dims(), shape_.data(), perm_.data(), in TestX8() 109 input_stride[i - 1] = input_stride[i] * shape_[i]; in TestRunX8() 110 output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; in TestRunX8() 121 num_dims(), shape_.data(), perm_.data(), in TestRunX8() 138 input_stride[i - 1] = input_stride[i] * shape_[i]; in TestX16() 139 output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; in TestX16() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | shape_partition.cc | 30 for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) { in Run() 31 const int64_t dimension = shape_.layout().minor_to_major(i); in Run() 33 outer_dim_size *= shape_.dimensions(dimension); in Run() 56 std::min(static_cast<int64_t>(shape_.dimensions(outer_dims[i])), in Run() 80 shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i], in Run() 102 : shape_(shape), in ShapePartitionIterator() 109 dimensions_[i] = shape_.layout().minor_to_major( in ShapePartitionIterator() 110 shape_.layout().minor_to_major_size() - 1 - i); in ShapePartitionIterator() 117 const int64_t dim_size = shape_.dimensions(dimensions_[i]); in ShapePartitionIterator() 145 shape_.dimensions(dimensions_[i]) - partition[i].first; in GetPartition()
|
D | parallel_loop_emitter.cc | 43 CHECK(!shape_.IsTuple()); in EmitIndexAndSetExitBasicBlock() 44 CHECK(!ShapeUtil::IsScalar(shape_)); in EmitIndexAndSetExitBasicBlock() 47 const int64_t num_dims = shape_.dimensions_size(); in EmitIndexAndSetExitBasicBlock() 51 for (int i = LayoutUtil::MinorToMajor(shape_).size() - 1; i >= 0; --i) { in EmitIndexAndSetExitBasicBlock() 52 const int64_t dimension = LayoutUtil::Minor(shape_.layout(), i); in EmitIndexAndSetExitBasicBlock() 68 /*end_index=*/shape_.dimensions(dimension), in EmitIndexAndSetExitBasicBlock() 80 llvm_ir::IrArray::Index array_index(array_multi_index, shape_, index_type); in EmitIndexAndSetExitBasicBlock()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | buffer_comparator.cc | 765 switch (shape_.element_type()) { in CompareEqual() 768 stream, lhs, rhs, shape_, config_, "__xla_fp16_comparison"); in CompareEqual() 771 stream, lhs, rhs, shape_, config_, "__xla_bf16_comparison"); in CompareEqual() 774 stream, lhs, rhs, shape_, config_, "__xla_fp32_comparison"); in CompareEqual() 777 stream, lhs, rhs, shape_, config_, "__xla_fp64_comparison"); in CompareEqual() 780 stream, lhs, rhs, shape_, config_, "__xla_int8_comparison"); in CompareEqual() 783 stream, lhs, rhs, shape_, config_, "__xla_int32_comparison"); in CompareEqual() 791 : shape_(shape), config_(config) { in BufferComparator() 795 int64_t prev_zero_dim_size = shape_.dimensions(0); in BufferComparator() 796 shape_.set_dimensions(0, prev_zero_dim_size * 2); in BufferComparator() [all …]
|
D | parallel_loop_emitter.cc | 41 shape_(shape), in ParallelLoopEmitter() 55 shape_(target_arrays[0].GetShape()), in ParallelLoopEmitter() 160 std::vector<llvm::Value*> multidim(shape_.rank(), nullptr); in EmitLogicalIndexAndSetExitBasicBlock() 168 auto dims = shape_.dimensions(); in EmitLogicalIndexAndSetExitBasicBlock() 176 array_indices.emplace_back(multidim, shape_, index_type); in EmitLogicalIndexAndSetExitBasicBlock() 224 CHECK_EQ(shape_.dimensions().back(), in EmitIndexAndSetExitBasicBlock() 253 array_indices.emplace_back(linear_index_base, shape_, b_); in EmitIndexAndSetExitBasicBlock() 261 std::vector<llvm::Value*> multidim(shape_.rank(), nullptr); in EmitIndexAndSetExitBasicBlock() 263 array_indices.emplace_back(linear_index_base, multidim, shape_, b_); in EmitIndexAndSetExitBasicBlock() 272 array_indices.emplace_back(linear_index, shape_, b_); in EmitIndexAndSetExitBasicBlock() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/ |
D | direct_tpu_driver.cc | 48 ::TpuAllocationShape shape_; in GetTpuAllocationShape() 49 shape_.size = shape.ByteSizeLong(); in GetTpuAllocationShape() 50 shape_.bytes = malloc(shape_.size); in GetTpuAllocationShape() 51 if (!shape.SerializeToArray(shape_.bytes, shape_.size)) { in GetTpuAllocationShape() 53 free(shape_.bytes); in GetTpuAllocationShape() 54 shape_.size = 0; in GetTpuAllocationShape() 55 shape_.bytes = nullptr; in GetTpuAllocationShape() 57 return shape_; in GetTpuAllocationShape() 200 ::TpuAllocationShape shape_ = GetTpuAllocationShape(shape); in ComputeLinearizedBytesFromShape() 202 driver_fn_->TpuDriver_ComputeLinearizedBytesFromShape(driver_, shape_); in ComputeLinearizedBytesFromShape() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_conditional_accumulator.h | 83 if (shape_.dims() > tensor_shape->NumElements()) { in ValidateShape() 85 "Shape mismatch: expected shape rank at least ", shape_.dims(), in ValidateShape() 89 for (int64_t i = 0; i < shape_.dims(); i++) { in ValidateShape() 90 if (shape_.dim_size(i) != -1 && in ValidateShape() 91 shape_.dim_size(i) != tensor_shape_flat(i)) { in ValidateShape() 93 i, " to be ", shape_.dim_size(i), in ValidateShape() 99 if (shape_.dims() > 0 && shape_.dim_size(0) != -1 && in ValidateShape() 102 if (tensor_idx->vec<int64_t>()(i) >= shape_.dim_size(0)) { in ValidateShape() 106 shape_.dim_size(0)); in ValidateShape() 127 if (shape_.dims() > grad_dims) { in ValidateShape() [all …]
|
/external/tensorflow/tensorflow/core/util/sparse/ |
D | sparse_tensor.cc | 70 result->shape_.assign(shape.begin(), shape.end()); in Create() 102 shape_(shape.begin(), shape.end()), in SparseTensor() 122 DCHECK_EQ(shape_.size(), 1); in IndicesValidVectorFastPath() 125 const int64_t max_index = shape_[0]; in IndicesValidVectorFastPath() 156 const int64_t* const shape_ptr = shape_.data(); in IndicesValidMatrix32BitFastPath() 158 DCHECK_EQ(shape_.size(), 2); in IndicesValidMatrix32BitFastPath() 227 const int64_t* const shape_ptr = shape_.data(); in IndicesValidHelper() 260 str_util::Join(shape_, ","), "]"); in IndicesValidHelper() 279 if (shape_.size() == 1 && IndicesValidVectorFastPath()) { in IndicesValid() 294 if (shape_.size() == 1) { in IndicesValid() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_resource.cc | 72 shape_(std::move(shape)), in XlaResource() 84 /*name=*/absl::StrCat("TensorArrayGrad: ", name_), type_, shape_, in XlaResource() 102 if (initialized() && shape_ != shape) { in SetTypeAndShape() 107 shape_.DebugString(), ", new shape is ", shape.DebugString(), in SetTypeAndShape() 111 shape_ = shape; in SetTypeAndShape() 136 xla::Broadcast(XlaHelpers::Zero(builder, type_), shape_.dim_sizes()); in SetZeroValue() 142 ta_shape.AppendShape(shape_); in SetZeroValue() 150 ta_shape.AppendShape(shape_); in SetZeroValue() 175 ta_shape.AppendShape(shape_); in GetOrCreateTensorArrayGradient() 181 type_, shape_, gradient_value, max_array_size_, in GetOrCreateTensorArrayGradient()
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/convert/ |
D | weights.cc | 31 : shape_(0, DimsAdapter::StorageType{}), type_(type), volume_(0) {} in TRT_ShapedWeights() 36 weights.shape_ = dims; in CreateWithTensor() 38 weights.volume_ = weights.shape_.Volume(); in CreateWithTensor() 39 if (weights.shape_.NumDims() == 0) { in CreateWithTensor() 40 DCHECK(weights.shape_.IsEmpty() || weights.shape_.IsScalar()); in CreateWithTensor() 51 VLOG(2) << "Changing shape from " << shape_.DebugString() << ", to " in SetShape() 55 shape_ = std::move(dims); in SetShape() 79 "TRT_ShapedWeights(shape=", shape_.DebugString(), in DebugString()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/task/ |
D | tensor_desc.h | 156 shape_ = BHWDC(new_shape.b, new_shape.h, new_shape.w, 1, new_shape.c); in SetBHWCShape() 158 void SetBHWDCShape(const BHWDC& new_shape) { shape_ = new_shape; } in SetBHWDCShape() 160 return BHWC(shape_.b, shape_.h, shape_.w, shape_.c); in GetBHWCShape() 162 BHWDC GetBHWDCShape() const { return shape_; } in GetBHWDCShape() 288 BHWDC shape_; variable 315 shape_ = BHWDC(src.shape.b, src.shape.h, src.shape.w, 1, src.shape.c); in UploadData() 321 dst->shape = BHWC(shape_.b, shape_.h, shape_.w, shape_.c); in DownloadData() 328 shape_ = src.shape; in UploadData() 334 dst->shape = shape_; in DownloadData() 341 data_.resize(GetSizeInBytesForShape(shape_)); in UploadData() [all …]
|
/external/libtextclassifier/native/utils/ |
D | tensor-view.h | 36 : data_(data), shape_(shape), size_(internal::NumberOfElements(shape)) {} in TensorView() 46 const std::vector<int>& shape() const { return shape_; } in shape() 48 int dim(int i) const { return shape_[i]; } in dim() 50 int dims() const { return shape_.size(); } in dims() 71 const std::vector<int> shape_; variable
|
/external/tensorflow/tensorflow/python/kernel_tests/linalg/ |
D | qr_op_test.py | 78 def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_): argument 129 low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_) 133 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 161 CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:])) 188 def _GetQrGradOpTest(dtype_, shape_, full_matrices_): argument 191 a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) 194 low=-1.0, high=1.0, size=shape_).astype(dtype_) 246 for shape_ in self.shapes: 251 low=-1.0, high=1.0, size=shape_).astype(np.float32) 259 name="QR_cpu_{shape}".format(shape=shape_)) [all …]
|
D | svd_op_test.py | 120 def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_, argument 179 low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_) 183 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 220 CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol) 222 min(shape_[-2:]), tol) 257 def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_): argument 264 a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) 267 low=-1.0, high=1.0, size=shape_).astype(dtype_) 301 def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_): argument 306 a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor.cc | 699 Tensor::Tensor(DataType type) : shape_(type), buf_(nullptr) {} in Tensor() 702 : shape_(shape), buf_(buf) { in Tensor() 709 : shape_(std::move(shape)), buf_(buf.release()) { in Tensor() 715 shape_.num_elements() == 0; in IsInitialized() 753 shape_ = shape; in BitcastFrom() 754 shape_.set_data_type(dtype); in BitcastFrom() 818 : shape_(shape), buf_(nullptr) { in Tensor() 821 if (shape_.num_elements() > 0 || a->AllocatesOpaqueHandle()) { in Tensor() 832 : shape_(shape), buf_(nullptr) { in Tensor() 835 if (shape_.num_elements() > 0 || a->AllocatesOpaqueHandle()) { in Tensor() [all …]
|
/external/tensorflow/tensorflow/c/experimental/saved_model/core/ |
D | tensor_spec.cc | 26 : shape_(std::initializer_list<int64_t>()), dtype_(DT_FLOAT) {} in TensorSpec() 29 : shape_(std::move(shape)), dtype_(dtype) {} in TensorSpec() 32 : shape_(proto.shape()), dtype_(proto.dtype()) {} in TensorSpec() 34 const PartialTensorShape& TensorSpec::shape() const { return shape_; } in shape()
|
/external/tensorflow/tensorflow/lite/delegates/xnnpack/ |
D | leaky_relu_tester.h | 38 shape_ = std::vector<int32_t>(shape.begin(), shape.end()); in Shape() 39 size_ = LeakyReluTester::ComputeSize(shape_); in Shape() 43 inline const std::vector<int32_t>& Shape() const { return shape_; } in Shape() 61 std::vector<int32_t> shape_; variable
|