/external/tensorflow/tensorflow/core/kernels/ |
D | tensor_map.h | 69 TensorMap() : tensors_(new Tensors) {} in TensorMap() 72 TensorMap(const TensorMap& other) : tensors_(other.tensors_) { in TensorMap() 73 tensors_->Ref(); in TensorMap() 76 TensorMap(TensorMap&& rhs) : tensors_(rhs.tensors_) { in TensorMap() 77 rhs.tensors_ = nullptr; in TensorMap() 82 tensors_->Unref(); 83 tensors_ = rhs.tensors_; 84 tensors_->Ref(); 90 std::swap(tensors_, rhs.tensors_); 107 return tensors_->values_; in tensors() [all …]
|
D | tensor_list.h | 67 TensorList() : tensors_(new Tensors) {} in TensorList() 74 tensors_(other.tensors_) { in TensorList() 75 tensors_->Ref(); in TensorList() 82 tensors_(rhs.tensors_) { in TensorList() 83 rhs.tensors_ = nullptr; in TensorList() 91 tensors_->Unref(); 92 tensors_ = rhs.tensors_; 93 tensors_->Ref(); 102 std::swap(tensors_, rhs.tensors_); 126 std::vector<Tensor>& tensors() { return tensors_->values_; } in tensors() [all …]
|
D | tensor_array.cc | 90 if (tensors_.size() != rhs->tensors_.size()) { in CopyShapesFrom() 93 handle_.vec<tstring>()(1), " has size ", tensors_.size(), " but rhs ", in CopyShapesFrom() 94 rhs->handle_.vec<tstring>()(1), " has size ", rhs->tensors_.size()); in CopyShapesFrom() 96 for (std::size_t i = 0; i < tensors_.size(); ++i) { in CopyShapesFrom() 98 if (!rhs->tensors_[i].written) continue; in CopyShapesFrom() 102 tensors_[i].shape = *shape_to_prepend; in CopyShapesFrom() 103 tensors_[i].shape.AppendShape(rhs->tensors_[i].shape); in CopyShapesFrom() 105 tensors_[i].shape = rhs->tensors_[i].shape; in CopyShapesFrom() 111 tensors_[i].written = true; in CopyShapesFrom()
|
D | tensor_array.h | 155 tensors_(N) {} in TensorArray() 265 return strings::StrCat("TensorArray[", tensors_.size(), "]"); in DebugString() 277 *size = tensors_.size(); in Size() 303 *size = is_grad_ ? marked_size_ : tensors_.size(); in PackOrConcatSize() 340 tensors_.clear(); in ClearAndMarkClosed() 438 std::vector<TensorAndState> tensors_ TF_GUARDED_BY(mu_); 447 if (index < 0 || (!dynamic_size_ && index_size >= tensors_.size())) { in LockedWriteOrAggregate() 450 index, " but array is not resizeable and size is: ", tensors_.size()); in LockedWriteOrAggregate() 454 if (index_size >= tensors_.capacity()) { in LockedWriteOrAggregate() 455 tensors_.reserve(2 * (index_size + 1)); in LockedWriteOrAggregate() [all …]
|
D | ops_testutil.cc | 91 for (auto& temp : tensors_) { in ~OpsTestBase() 97 tensors_.clear(); in ~OpsTestBase() 230 tensors_.push_back(input); in AddInput() 252 tensors_.push_back(input); in AddResourceInputInternal()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | unbatch_dataset_op.cc | 133 out_tensors->reserve(tensors_.size()); in GetNextInternal() 134 for (int i = 0; i < tensors_.size(); ++i) { in GetNextInternal() 135 out_tensors->emplace_back(ctx->allocator({}), tensors_[i].dtype(), in GetNextInternal() 138 &tensors_[i], &out_tensors->back(), current_index_)); in GetNextInternal() 146 tensors_.clear(); in GetNextInternal() 148 input_impl_->GetNext(ctx, &tensors_, end_of_sequence)); in GetNextInternal() 150 for (size_t i = 0; i < tensors_.size(); ++i) { in GetNextInternal() 151 if (tensors_[i].dims() == 0) { in GetNextInternal() 156 if (tensors_[i].dim_size(0) != tensors_[0].dim_size(0)) { in GetNextInternal() 160 tensors_[0].dim_size(0), " but component ", i, in GetNextInternal() [all …]
|
D | rebatch_dataset_op.cc | 403 tensors_.clear(); in GetNextInternal() 405 input_impl_->GetNext(ctx, &tensors_, &end_of_sequence_)); in GetNextInternal() 415 tensors_[0].dim_size(0)); in GetNextInternal() 418 slices.reserve(tensors_.size()); in GetNextInternal() 419 for (const auto& tensor : tensors_) { in GetNextInternal() 426 if (offset_ == tensors_[0].dim_size(0)) { in GetNextInternal() 534 for (int i = 0; i < tensors_.size(); ++i) { in SaveInternal() 536 full_name(strings::StrCat("tensors[", i, "]")), tensors_[i])); in SaveInternal() 554 tensors_.clear(); in RestoreInternal() 556 tensors_.resize(dataset()->output_dtypes().size()); in RestoreInternal() [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | variant_tensor_data.cc | 27 int VariantTensorData::tensors_size() const { return tensors_.size(); } in tensors_size() 30 return tensors_[index]; in tensors() 34 return tensors_; in tensors() 38 tensors_.emplace_back(); in add_tensors() 39 return &(tensors_[tensors_.size() - 1]); in add_tensors() 46 for (const auto& tensor : tensors_) { in ToProto() 58 tensors_.push_back(tmp); in FromProto() 69 tensors_.push_back(tmp); in FromConstProto() 95 for (const auto& t : tensors_) { in DebugString()
|
D | variant_tensor_data.h | 96 std::vector<Tensor> tensors_; variable 130 tensors_.emplace_back(std::forward<TensorConstructorArgs>(args)...); in add_tensor() 131 return &tensors_.back(); in add_tensor()
|
D | session_state.h | 53 std::unordered_map<string, Tensor> tensors_; variable 86 std::unordered_map<string, TensorAndKey> tensors_ TF_GUARDED_BY(lock_);
|
/external/tensorflow/tensorflow/lite/delegates/coreml/builders/ |
D | util_test.cc | 31 tensors_.resize(input_size); in SetUp() 32 context_.tensors = tensors_.data(); in SetUp() 38 for (auto& tensor : tensors_) { in SetUp() 50 for (int i = 0; i < tensors_.size(); ++i) { in SetInputShapes() 51 tensors_[i].dims = TfLiteIntArrayCreate(shapes[i].size()); in SetInputShapes() 52 std::copy(shapes[i].begin(), shapes[i].end(), tensors_[i].dims->data); in SetInputShapes() 57 for (auto& tensor : tensors_) { in FreeInputShapes() 67 std::vector<TfLiteTensor> tensors_; member in __anon901ad5410111::IsBinaryOpSupportedTest 94 tensors_[1].allocation_type = kTfLiteMmapRo; in TEST_F()
|
D | op_builder.cc | 137 if (tensors_.size() <= tf_tensor_id) { in AddTensorWithID() 138 tensors_.resize(tf_tensor_id + 1); in AddTensorWithID() 141 tensors_[tf_tensor_id] = tensor_id; in AddTensorWithID() 153 tensor_id, static_cast<int>(tensors_.size())); in GetTensorID() 158 return tensors_[tensor_id]; in GetTensorID() 162 if (tensors_.size() <= tflite_tensor_index) { in HasTensor() 165 return tensors_[tflite_tensor_index].NodeID() != -1; in HasTensor()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | session_state.cc | 28 auto it = tensors_.find(handle); in GetTensor() 29 if (it == tensors_.end()) { in GetTensor() 39 if (!tensors_.insert({handle, tensor}).second) { in AddTensor() 48 if (tensors_.erase(handle) == 0) { in DeleteTensor() 62 if (!tensors_.insert({name, tk}).second) { in AddTensor() 73 if (!tensors_.empty()) { in SaveTensors() 78 auto it = tensors_.find(op_name); in SaveTensors() 79 if (it != tensors_.end()) { in SaveTensors()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | tensor_slice_dataset_op.cc | 41 : DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) { in Dataset() 42 for (const Tensor& t : tensors_) { in Dataset() 64 absl::make_unique<IndexSplitProvider>(tensors_[0].dim_size(0)); in MakeSplitProvider() 78 int64 Cardinality() const override { return tensors_[0].dim_size(0); } in Cardinality() 91 components.reserve(tensors_.size()); in AsGraphDefInternal() 92 for (const Tensor& t : tensors_) { in AsGraphDefInternal() 120 dataset()->tensors_[0].dim_size(0)); in Initialize() 135 out_tensors->reserve(dataset()->tensors_.size()); in GetNextInternal() 136 for (size_t i = 0; i < dataset()->tensors_.size(); ++i) { in GetNextInternal() 137 const Tensor& t = dataset()->tensors_[i]; in GetNextInternal() [all …]
|
D | tensor_dataset_op.cc | 40 : DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) { in Dataset() 41 dtypes_.reserve(tensors_.size()); in Dataset() 42 shapes_.reserve(tensors_.size()); in Dataset() 43 for (const Tensor& t : tensors_) { in Dataset() 78 components.reserve(tensors_.size()); in AsGraphDefInternal() 79 for (const Tensor& t : tensors_) { in AsGraphDefInternal() 108 *out_tensors = dataset()->tensors_; in GetNextInternal() 144 const std::vector<Tensor> tensors_; member in tensorflow::data::TensorDatasetOp::Dataset
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/ |
D | gpu_api_delegate.cc | 237 if (tensor_index >= tensors_.size()) { in BindGlBufferToTensor() 238 tensors_.resize(tensor_index + 1); in BindGlBufferToTensor() 249 tensors_[tensor_index] = std::make_pair(obj, def); in BindGlBufferToTensor() 253 if (index < tensors_.size() && IsValid(tensors_[index].second)) { in GetObjectDef() 254 return tensors_[index].second.object_def; in GetObjectDef() 265 if (index < tensors_.size() && in GetTensorObject() 266 IsValid(tensors_[index].second, tensors_[index].first)) { in GetTensorObject() 267 return tensors_[index].first; in GetTensorObject() 301 std::vector<std::pair<TensorObject, TensorObjectDef>> tensors_; member in tflite::gpu::cl::__anon7ea745220111::Delegate
|
/external/tensorflow/tensorflow/lite/delegates/gpu/ |
D | gl_delegate.cc | 155 tensors_.reserve(values.back()->id + 1); in Prepare() 157 if (tensors_.size() <= value->id) { in Prepare() 158 tensors_.resize(value->id + 1); in Prepare() 160 tensors_[value->id] = {value->tensor.shape, 0}; in Prepare() 187 tensors_[input->id].tensor_index = tensor_index; in Prepare() 227 tensors_[output->id].tensor_index = tensor_index; in Prepare() 289 const ValueRef& ref = tensors_[id]; in Invoke() 294 if (!IsPHWC4(tensors_[id].shape)) { in Invoke() 313 const ValueRef& ref = tensors_[id]; in Invoke() 318 if (!IsPHWC4(tensors_[id].shape)) { in Invoke() [all …]
|
/external/tensorflow/tensorflow/core/kernels/sparse/ |
D | sparse_matrix.h | 328 if (p.tensors_.empty()) return false; in Decode() 336 if (p.tensors_.size() != 5) return false; in Decode() 338 Tensor dense_shape = p.tensors_[0]; in Decode() 344 Tensor batch_pointers(p.tensors_[1]); in Decode() 345 Tensor row_pointers(p.tensors_[2]); in Decode() 346 Tensor col_indices(p.tensors_[3]); in Decode() 347 Tensor values(p.tensors_[4]); in Decode() 372 p->tensors_.reserve(5); in Encode() 373 p->tensors_.push_back(dense_shape_); in Encode() 374 p->tensors_.push_back(batch_pointers_); in Encode() [all …]
|
/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | kernel_runner.cc | 40 tensors_(tensors) { in KernelRunner() 82 return &runner->tensors_[tensor_index]; in GetTensor() 98 eval_tensor->data = runner->tensors_[tensor_index].data; in GetEvalTensor() 99 eval_tensor->dims = runner->tensors_[tensor_index].dims; in GetEvalTensor() 100 eval_tensor->type = runner->tensors_[tensor_index].type; in GetEvalTensor()
|
/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/ |
D | op_builder.h | 275 return tensors_[tflite_tensor_index]; in GetHexagonTensorId() 280 if (tensors_.size() <= tflite_tensor_index) { in HasTensor() 285 return tensors_[tflite_tensor_index].first != 0; in HasTensor() 327 if (tensors_.size() <= tflite_tensor_id) { 328 tensors_.resize(tflite_tensor_id + 1); 336 tensors_[tflite_tensor_id] = 394 std::vector<OpBuilder::TensorID> tensors_; variable
|
/external/tensorflow/tensorflow/core/util/ |
D | tensor_slice_reader.cc | 175 ss_slice, &tensors_); in LoadShard() 192 const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); in FindTensorSlice() 200 for (auto& temp : tensors_) { in ~TensorSliceReader() 203 tensors_.clear(); in ~TensorSliceReader() 209 const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); in HasTensor() 214 tss = gtl::FindPtrOrNull(tensors_, name); in HasTensor() 236 const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); in GetTensor()
|
/external/tensorflow/tensorflow/c/eager/parallel_device/ |
D | parallel_device_lib.h | 173 size_t num_tensors() const { return tensors_.size(); } in num_tensors() 174 TFE_TensorHandle* tensor(size_t index) const { return tensors_[index].get(); } in tensor() 190 tensors_(std::move(tensors)), in ParallelTensor() 196 tensors_(std::move(tensors)), in ParallelTensor() 201 const std::vector<TensorHandlePtr> tensors_; variable
|
/external/tensorflow/tensorflow/lite/core/ |
D | subgraph.cc | 235 tensors_.reserve(kTensorsReservedCapacity); in Subgraph() 422 TfLiteTensor* tensor = &tensors_[tensor_index]; in ReplaceNodeSubsetsWithDelegateKernels() 714 for (auto& tensor : tensors_) { in ResetVariableTensors() 1041 TfLiteTensor* tensor = &tensors_[tensor_index]; in Invoke() 1153 const size_t base_index = tensors_.size(); in AddTensors() 1155 tensors_.resize(tensors_.size() + tensors_to_add); in AddTensors() 1156 for (size_t i = base_index; i < tensors_.size(); i++) { in AddTensors() 1157 memset(&tensors_[i], 0, sizeof(tensors_[i])); in AddTensors() 1158 tensors_[i].buffer_handle = kTfLiteNullBufferHandle; in AddTensors() 1160 context_.tensors = tensors_.data(); in AddTensors() [all …]
|
/external/tensorflow/tensorflow/lite/ |
D | graph_info_test.cc | 63 size_t num_tensors() const override { return tensors_.size(); } in num_tensors() 64 TfLiteTensor* tensor(size_t index) override { return &tensors_[index]; } in tensor() 77 void AddTensors(int count) { tensors_.resize(count + tensors_.size()); } in AddTensors() 88 std::vector<TfLiteTensor> tensors_; member in tflite::__anonb6ed36e90111::SimpleTestGraph
|
D | arena_planner_test.cc | 96 tensors_.push_back(TfLiteTensor()); in TestGraph() 99 tensors_.back().allocation_type = kTfLiteArenaRw; in TestGraph() 100 tensors_.back().bytes = (i + 1) * 3; in TestGraph() 113 std::vector<TfLiteTensor>* tensors() { return &tensors_; } in tensors() 124 std::swap(tensors_, other->tensors_); in Swap() 132 std::vector<TfLiteTensor> tensors_; member in tflite::__anond3fd21620111::TestGraph
|