/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_layout.cc | 26 if (!ShapeUtil::Compatible(other_shape, shape_)) { in CopyLayoutFromShape() 31 shape_ = other_shape; in CopyLayoutFromShape() 36 if (!ShapeUtil::Compatible(*to_shape, shape_)) { in AssignLayoutToShape() 41 *to_shape = shape_; in AssignLayoutToShape() 46 LayoutUtil::SetToDefaultLayout(&shape_); in SetToDefaultLayout() 55 return equal(shape, shape_); in MatchesLayoutInShape() 60 CHECK(!shape_.IsTuple()); in layout() 61 return shape_.layout(); in layout() 64 void ShapeLayout::Clear() { LayoutUtil::ClearLayout(&shape_); } in Clear() 66 bool ShapeLayout::LayoutIsSet() const { return LayoutUtil::HasLayout(shape_); } in LayoutIsSet() [all …]
|
D | shape_layout.h | 38 explicit ShapeLayout(const Shape& shape) : shape_(shape) {} in ShapeLayout() 62 const Shape& shape() const { return shape_; } in shape() 81 string ToString() const { return ShapeUtil::HumanStringWithLayout(shape_); } in ToString() 88 Shape shape_;
|
D | shape_tree.h | 123 const Shape& shape() const { return *shape_; } in shape() 135 shape_ = shape; in replace_shape_ptr() 336 const Shape* shape_; variable 486 shape_(shape_storage_.get()) { in ShapeTree() 487 const int64 count = CountSubshapes(*shape_); in ShapeTree() 493 InitChildren(*shape_, &nodes_[0], &index_table_[0]); in ShapeTree() 497 ShapeTree<T>::ShapeTree(const Shape* shape) : shape_(shape) { in ShapeTree() 498 const int64 count = CountSubshapes(*shape_); in ShapeTree() 504 InitChildren(*shape_, &nodes_[0], &index_table_[0]); in ShapeTree() 509 : shape_storage_(shape), shape_(shape_storage_.get()) { in ShapeTree() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/ |
D | tensor.h | 60 int Width() const { return shape_.w; } in Width() 61 int Height() const { return shape_.h; } in Height() 62 int Depth() const { return shape_.d; } in Depth() 63 int Channels() const { return shape_.c; } in Channels() 64 int Slices() const { return IntegralDivideRoundUp(shape_.c, 4); } in Slices() 65 int Batch() const { return shape_.b; } in Batch() 69 return int4(shape_.w * shape_.b, shape_.h, Slices(), shape_.b); in GetWBatchedHSB() 72 return int4(shape_.w * shape_.b, shape_.h, shape_.d, Slices()); in GetWBatchedHDS() 75 int4 GetWHSB() const { return int4(shape_.w, shape_.h, Slices(), shape_.b); } in GetWHSB() 76 int4 GetWHDS() const { return int4(shape_.w, shape_.h, shape_.d, Slices()); } in GetWHDS() [all …]
|
D | tensor.cc | 84 shape_(shape.b, shape.h, shape.w, 1, shape.c), in Tensor() 92 shape_(shape), in Tensor() 100 shape_(shape.b, shape.h, shape.w, 1, shape.c), in Tensor() 108 shape_(shape), in Tensor() 115 shape_(tensor.shape_), in Tensor() 126 std::swap(shape_, tensor.shape_); in operator =() 149 return {shape_.w * shape_.b, shape_.h, shape_.d * Slices()}; in GetFullTensorRegion() 151 return {shape_.w * shape_.b * shape_.d, shape_.h * Slices(), 1}; in GetFullTensorRegion() 153 return {shape_.w * shape_.b * shape_.d, shape_.h, 1}; in GetFullTensorRegion() 160 if (shape.b != shape_.b) { in IsValid() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | defuser_test.cc | 42 const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2}); member in xla::__anon9f6cf5ff0111::DefuserTest 49 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 51 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 53 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() 65 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 67 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 69 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() 89 builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); in TEST_F() 91 builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); in TEST_F() 93 HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/ |
D | converter.cc | 108 shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h, in Init() 110 if (shape_.b != 1) { in Init() 156 if (input_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) { in Convert() 160 if (output_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) { in Convert() 167 int4(static_cast<int32_t>(shape_.w), static_cast<int32_t>(shape_.h), in Convert() 168 static_cast<int32_t>(shape_.c), 0)})); in Convert() 171 return Dispatch(uint3(shape_.w, shape_.h, shape_.c)); in Convert() 174 BHWC shape_; member in tflite::gpu::gl::__anondba7f7c60111::FromTensorConverter 196 shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h, in Init() 198 if (shape_.b != 1) { in Init() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | loop_emitter.cc | 36 : body_emitter_(body_emitter), shape_(shape), b_(b) {} in LoopEmitter() 47 shape_(target_array.GetShape()), 76 shape_(target_arrays[0].GetShape()), in LoopEmitter() 81 CHECK(ShapeUtil::SameDimensions(shape_, array.GetShape())) in LoopEmitter() 82 << ": '" << shape_.ShortDebugString() << "' does not match '" in LoopEmitter() 90 if (ShapeUtil::IsScalar(shape_)) { in EmitIndexAndSetExitBasicBlock() 101 std::vector<llvm::Value*> array_multi_index(shape_.dimensions_size()); in EmitIndexAndSetExitBasicBlock() 102 for (int i = 0; i < LayoutUtil::MinorToMajor(shape_).size(); ++i) { in EmitIndexAndSetExitBasicBlock() 103 int64 dimension = LayoutUtil::Major(shape_.layout(), i); in EmitIndexAndSetExitBasicBlock() 106 /*end_index=*/shape_.dimensions(dimension), in EmitIndexAndSetExitBasicBlock() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | matrix_band_part_op_test.py | 46 def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_): argument 50 mat = np.ones(shape_).astype(dtype_) 52 for lower in -1, 0, 1, shape_[-2] - 1: 53 for upper in -1, 0, 1, shape_[-1] - 1: 76 def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_): argument 80 shape = batch_shape_ + shape_ 83 for lower in -1, 0, 1, shape_[-2] - 1: 84 for upper in -1, 0, 1, shape_[-1] - 1: 113 for shape_ in self.shapes: 118 matrix = variables.Variable(array_ops.ones(shape_)) [all …]
|
D | qr_op_test.py | 79 def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_): argument 128 low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_) 132 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 160 CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:])) 171 def _GetQrGradOpTest(dtype_, shape_, full_matrices_): argument 176 a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) 179 low=-1.0, high=1.0, size=shape_).astype(dtype_) 192 low=-1.0, high=1.0, size=shape_).astype(dtype_) 195 low=-1.0, high=1.0, size=shape_).astype(dtype_) 231 for shape_ in self.shapes: [all …]
|
D | svd_op_test.py | 92 def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_, argument 148 low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_) 152 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 188 CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol) 191 min(shape_[-2:]), tol) 226 def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_): argument 231 a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) 234 low=-1.0, high=1.0, size=shape_).astype(dtype_) 255 low=-1.0, high=1.0, size=shape_).astype(dtype_) 258 low=-1.0, high=1.0, size=shape_).astype(dtype_) [all …]
|
D | linalg_grad_test.py | 62 def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_): argument 70 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 103 shape_, argument 117 size=np.prod(shape_)).reshape(shape_).astype(dtype_) 122 size=np.prod(shape_)).reshape(shape_).astype(dtype_)
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | shape_partition.cc | 29 for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) { in Run() 30 const int64 dimension = shape_.layout().minor_to_major(i); in Run() 32 outer_dim_size *= shape_.dimensions(dimension); in Run() 55 std::min(static_cast<int64>(shape_.dimensions(outer_dims[i])), in Run() 79 shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i], in Run() 101 : shape_(shape), in ShapePartitionIterator() 108 dimensions_[i] = shape_.layout().minor_to_major( in ShapePartitionIterator() 109 shape_.layout().minor_to_major_size() - 1 - i); in ShapePartitionIterator() 116 const int64 dim_size = shape_.dimensions(dimensions_[i]); in ShapePartitionIterator() 144 shape_.dimensions(dimensions_[i]) - partition[i].first; in GetPartition()
|
D | parallel_loop_emitter.cc | 37 CHECK(!shape_.IsTuple()); in EmitIndexAndSetExitBasicBlock() 38 CHECK(!ShapeUtil::IsScalar(shape_)); in EmitIndexAndSetExitBasicBlock() 41 const int64 num_dims = shape_.dimensions_size(); in EmitIndexAndSetExitBasicBlock() 45 for (int i = LayoutUtil::MinorToMajor(shape_).size() - 1; i >= 0; --i) { in EmitIndexAndSetExitBasicBlock() 46 const int64 dimension = LayoutUtil::Minor(shape_.layout(), i); in EmitIndexAndSetExitBasicBlock() 62 /*end_index=*/shape_.dimensions(dimension), in EmitIndexAndSetExitBasicBlock() 74 llvm_ir::IrArray::Index array_index(array_multi_index, shape_, index_type); in EmitIndexAndSetExitBasicBlock()
|
D | xfeed_manager_test.cc | 36 : shape_(ShapeUtil::MakeShape(U8, {length})), in TestInfeedBuffer() 48 EXPECT_EQ(expect_shape_match_, ShapeUtil::Equal(shape_, shape.ValueOrDie())) in Done() 49 << "want " << ShapeUtil::HumanString(shape_) << " " in Done() 54 const Shape& shape() const { return shape_; } in shape() 57 Shape shape_; member in xla::__anonce6b4c6e0111::TestInfeedBuffer
|
D | shape_partition.h | 67 ShapePartitionAssigner(const Shape& shape) : shape_(shape) {} in ShapePartitionAssigner() 77 const Shape& shape_; 94 const Shape& shape_;
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | buffer_comparator.cc | 694 switch (shape_.element_type()) { in CompareEqual() 697 stream, lhs, rhs, shape_, config_, "__xla_fp16_comparison"); in CompareEqual() 700 stream, lhs, rhs, shape_, config_, "__xla_fp32_comparison"); in CompareEqual() 703 stream, lhs, rhs, shape_, config_, "__xla_fp64_comparison"); in CompareEqual() 706 stream, lhs, rhs, shape_, config_, "__xla_int8_comparison"); in CompareEqual() 714 : shape_(shape), config_(config) { in BufferComparator() 718 int64 prev_zero_dim_size = shape_.dimensions(0); in BufferComparator() 719 shape_.set_dimensions(0, prev_zero_dim_size * 2); in BufferComparator() 722 if (shape_.element_type() == PrimitiveType::C64) { in BufferComparator() 724 shape_.set_element_type(PrimitiveType::F32); in BufferComparator() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/ |
D | direct_tpu_driver.cc | 42 ::TpuAllocationShape shape_; in GetTpuAllocationShape() 43 shape_.size = shape.ByteSizeLong(); in GetTpuAllocationShape() 44 shape_.bytes = malloc(shape_.size); in GetTpuAllocationShape() 45 if (!shape.SerializeToArray(shape_.bytes, shape_.size)) { in GetTpuAllocationShape() 47 free(shape_.bytes); in GetTpuAllocationShape() 48 shape_.size = 0; in GetTpuAllocationShape() 49 shape_.bytes = nullptr; in GetTpuAllocationShape() 51 return shape_; in GetTpuAllocationShape() 194 ::TpuAllocationShape shape_ = GetTpuAllocationShape(shape); in ComputeLinearizedBytesFromShape() 196 driver_fn_->TpuDriver_ComputeLinearizedBytesFromShape(driver_, shape_); in ComputeLinearizedBytesFromShape() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_conditional_accumulator.h | 95 if (shape_.dims() > tensor_shape->NumElements()) { in ValidateShape() 97 "Shape mismatch: expected shape rank at least ", shape_.dims(), in ValidateShape() 101 for (int64 i = 0; i < shape_.dims(); i++) { in ValidateShape() 102 if (shape_.dim_size(i) != -1 && in ValidateShape() 103 shape_.dim_size(i) != tensor_shape_flat(i)) { in ValidateShape() 105 i, " to be ", shape_.dim_size(i), in ValidateShape() 111 if (shape_.dims() > 0 && shape_.dim_size(0) != -1 && in ValidateShape() 114 if (tensor_idx->vec<int64>()(i) >= shape_.dim_size(0)) { in ValidateShape() 118 shape_.dim_size(0)); in ValidateShape() 139 if (shape_.dims() > grad_dims) { in ValidateShape() [all …]
|
/external/libtextclassifier/native/utils/ |
D | tensor-view.h | 36 : data_(data), shape_(shape), size_(internal::NumberOfElements(shape)) {} in TensorView() 46 const std::vector<int>& shape() const { return shape_; } in shape() 48 int dim(int i) const { return shape_[i]; } in dim() 50 int dims() const { return shape_.size(); } in dims() 71 const std::vector<int> shape_; variable
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_resource.cc | 72 shape_(std::move(shape)), in XlaResource() 83 /*name=*/absl::StrCat("TensorArrayGrad: ", name_), type_, shape_, in XlaResource() 101 if (initialized() && shape_ != shape) { in SetTypeAndShape() 105 shape_.DebugString(), ", new shape is ", in SetTypeAndShape() 109 shape_ = shape; in SetTypeAndShape() 132 xla::Broadcast(XlaHelpers::Zero(builder, type_), shape_.dim_sizes()); in SetZeroValue() 138 ta_shape.AppendShape(shape_); in SetZeroValue() 146 ta_shape.AppendShape(shape_); in SetZeroValue() 171 ta_shape.AppendShape(shape_); in GetOrCreateTensorArrayGradient() 177 type_, shape_, gradient_value, max_array_size_, in GetOrCreateTensorArrayGradient()
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor.cc | 645 Tensor::Tensor(DataType type) : shape_(type), buf_(nullptr) {} in Tensor() 648 : shape_(shape), buf_(buf) { in Tensor() 655 shape_.num_elements() == 0; in IsInitialized() 683 shape_ = shape; in CopyFromInternal() 707 shape_ = shape; in BitcastFrom() 708 shape_.set_data_type(dtype); in BitcastFrom() 772 : shape_(shape), buf_(nullptr) { in Tensor() 775 if (shape_.num_elements() > 0 || a->AllocatesOpaqueHandle()) { in Tensor() 786 : shape_(shape), buf_(nullptr) { in Tensor() 789 if (shape_.num_elements() > 0 || a->AllocatesOpaqueHandle()) { in Tensor() [all …]
|
D | tensor.h | 245 DataType dtype() const { return shape_.data_type(); } in dtype() 248 const TensorShape& shape() const { return shape_; } in shape() 645 void set_dtype(DataType t) { shape_.set_data_type(t); } in set_dtype() 653 TensorShape shape_; variable 683 shape_ = shape; in set_shape() 747 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 750 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 763 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 766 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 883 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS)); in flat_inner_dims() [all …]
|
/external/tensorflow/tensorflow/core/util/sparse/ |
D | sparse_tensor.cc | 98 shape_(shape.begin(), shape.end()), in SparseTensor() 121 const int64* const shape_ptr = shape_.data(); in IndicesValid32BitFastPath() 123 DCHECK_EQ(shape_.size(), 2); in IndicesValid32BitFastPath() 192 const int64* const shape_ptr = shape_.data(); in IndicesValidHelper() 225 str_util::Join(shape_, ","), "]"); in IndicesValidHelper() 255 if (shape_.size() == 2 && shape_[0] <= std::numeric_limits<int32>::max() && in IndicesValid() 256 shape_[1] <= std::numeric_limits<int32>::max()) { in IndicesValid()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | fake_param_op.cc | 35 OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, tensor_shape, &shape_)); in XlaFakeParamOp() 40 ctx->SetOutput(0, xla::Zeros(b, shape_)); in Compile() 44 xla::Shape shape_; member in tensorflow::XlaFakeParamOp
|