/external/tensorflow/tensorflow/core/profiler/internal/ |
D | tfprof_tensor.cc | 36 tfprof_tensor_pb_.set_dtype(tensor_->dtype()); in Build() 38 switch (tensor_->dtype()) { in Build() 43 if (tensor_->dtype() == DataType::DT_FLOAT) { in Build() 45 } else if (tensor_->dtype() == DataType::DT_DOUBLE) { in Build() 55 if (tensor_->dtype() == DataType::DT_INT32) { in Build() 57 } else if (tensor_->dtype() == DataType::DT_INT64) { in Build() 71 fprintf(stderr, "Not Supported type %d\n", tensor_->dtype()); in Build()
|
D | tfprof_tensor.h | 39 : tensor_(std::move(tensor)) { in TFProfTensor() 89 if (tensor_->dims() == 0 && values.size() == 1) { in BuildOutput() 115 for (int i = 0; i < tensor_->dim_size(depth); i++) { in BuildOutput() 117 if (depth == tensor_->dims() - 1) { in BuildOutput() 160 if (tensor_->NumElements() > kTFProfTensorMaxWarnLen) { in GetValueVec() 163 auto values = tensor_->flat<T>(); in GetValueVec() 164 for (int64 i = 0; i < tensor_->NumElements(); i++) { in GetValueVec() 170 std::unique_ptr<Tensor> tensor_; variable
|
/external/tensorflow/tensorflow/core/framework/ |
D | resource_var.h | 62 explicit Var(DataType dtype) : tensor_(dtype) {} in Var() 68 Tensor* tensor() { return &tensor_; } in tensor() 71 return strings::StrCat(DataTypeString(tensor_.dtype()), "/", in DebugString() 72 tensor_.shape().DebugString()); in DebugString() 92 Tensor tensor_; variable
|
D | op_kernel.h | 245 explicit PersistentTensor(const Tensor& tensor) : tensor_(tensor) {} in PersistentTensor() 254 bool IsInitialized() const { return tensor_.IsInitialized(); } in IsInitialized() 256 int64 NumElements() const { return tensor_.NumElements(); } in NumElements() 258 int64 AllocatedBytes() const { return tensor_.AllocatedBytes(); } in AllocatedBytes() 261 Tensor tensor_;
|
/external/tensorflow/tensorflow/core/kernels/ |
D | host_constant_op.cc | 28 : OpKernel(ctx), tensor_(ctx->output_type(0)) { in _HostConstantOp() 34 ctx, ctx->device()->MakeTensorFromProto(*proto, alloc_attr, &tensor_)); in _HostConstantOp() 36 ctx, ctx->output_type(0) == tensor_.dtype(), in _HostConstantOp() 38 DataTypeString(tensor_.dtype()), ") and dtype (", in _HostConstantOp() 43 ctx->set_output(0, tensor_); in Compute()
|
D | collective_nccl_reducer_test.cc | 178 Tensor* dev_tensor = &instances_[rank]->tensor_; in RunTest() 232 tensor_ = in InitTensor() 240 &cpu_tensor, device_, &tensor_, in InitTensor() 254 inputs.push_back(TensorValue(&tensor_)); in DoReduce() 276 parent_->GetCollectiveReduce(col_params_, &tensor_, device_); in DoReduce() 283 TF_CHECK_OK(ctx.forward_input_or_allocate_output({0}, 0, tensor_.shape(), in DoReduce() 293 kStepId, &tensor_, &tensor_); in DoReduce() 299 CHECK(tensor_.CopyFrom(*ctx.mutable_output(0), tensor_.shape())); in DoReduce() 308 Tensor tensor_; member in tensorflow::NcclReducerTest::DeviceInstance
|
D | sparse_cross_op.cc | 105 explicit DenseTensorColumn(const Tensor& tensor) : tensor_(tensor) {} in DenseTensorColumn() 107 int64 FeatureCount(int64 batch) const override { return tensor_.dim_size(1); } in FeatureCount() 114 const Tensor& tensor_; member in tensorflow::__anon9a95f3020111::DenseTensorColumn 120 if (DT_STRING == tensor_.dtype()) in Feature() 121 return Fingerprint64(tensor_.matrix<string>()(batch, n)); in Feature() 122 return tensor_.matrix<int64>()(batch, n); in Feature() 128 if (DT_STRING == tensor_.dtype()) return tensor_.matrix<string>()(batch, n); in Feature() 129 return std::to_string(tensor_.matrix<int64>()(batch, n)); in Feature() 135 return tensor_.matrix<string>()(batch, n); in Feature()
|
D | variable_ops.cc | 30 explicit LegacyVar(DataType dtype) : tensor_(dtype) {} in LegacyVar() 36 Tensor* tensor() { return &tensor_; } in tensor() 39 return strings::StrCat(DataTypeString(tensor_.dtype()), "/", in DebugString() 40 tensor_.shape().DebugString()); in DebugString() 45 Tensor tensor_; member in tensorflow::LegacyVar
|
D | constant_op.cc | 72 tensor_(ctx->output_type(0)) { in ConstantOp() 76 *proto, AllocatorAttributes(), &tensor_)); in ConstantOp() 78 ctx, ctx->output_type(0) == tensor_.dtype(), in ConstantOp() 80 DataTypeString(tensor_.dtype()), ") and dtype (", in ConstantOp() 85 ctx->set_output(0, tensor_); in Compute() 87 ctx->record_persistent_memory_allocation(tensor_.AllocatedBytes()); in Compute()
|
D | host_constant_op.h | 36 Tensor tensor_;
|
D | constant_op.h | 35 Tensor tensor_;
|
/external/tensorflow/tensorflow/core/common_runtime/eager/ |
D | tensor_handle.cc | 52 tensor_(t), in TensorHandle() 67 tensor_(dtype), in TensorHandle() 147 *t = &tensor_; in Tensor() 154 *t = tensorflow::TensorValue(&tensor_); in TensorValue() 168 *tensor = &tensor_; in TensorAndDevice() 182 *shape = tensor_.shape(); in Shape() 196 *num_dims = tensor_.dims(); in NumDims() 211 *dim = tensor_.dim_size(dim_index); in Dim() 226 *num_elements = tensor_.NumElements(); in NumElements() 248 tensor_ = tensor; in SetTensor() [all …]
|
D | tensor_handle.h | 147 tensorflow::Tensor tensor_; variable
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | tensor_coding.cc | 39 tensor_ = Tensor(); in ClearTensor() 57 if (!tensor_.FromProto(allocator_, meta_.tensor())) { in InitFrom() 61 s = device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_); in InitFrom() 79 tensor_ = std::move(t); in InitPartial() 92 device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_); in ParseFrom() 160 tensor_ = std::move(t); in ParseTensorSubmessage() 207 tensor_ = std::move(t); in ParseTensorSubmessage() 283 tensor_ = std::move(parsed); in ParseSlow()
|
D | tensor_coding.h | 84 const Tensor& tensor() const { return tensor_; } in tensor() 105 Tensor tensor_; variable
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | ring_reducer_test.cc | 319 Tensor* inst = &instances_[di]->tensor_; in RunTest() 444 tensor_ = in InitTensor() 447 init_f(&tensor_); in InitTensor() 455 &cpu_tensor, device_, &tensor_, [¬e](const Status& s) { in InitTensor() 476 inputs.push_back(TensorValue(&tensor_)); in DoReduce() 498 col_params_, &tensor_, DEVICE_CPU, device_); in DoReduce() 505 TF_CHECK_OK(ctx.forward_input_or_allocate_output({0}, 0, tensor_.shape(), in DoReduce() 515 kStepId, &tensor_, &tensor_); in DoReduce() 521 CHECK(tensor_.CopyFrom(*ctx.mutable_output(0), tensor_.shape())); in DoReduce() 527 const Tensor& tensor() { return tensor_; } in tensor() [all …]
|
D | hierarchical_tree_broadcaster_test.cc | 432 const Tensor* t = &instances_[broadcast_dev_id]->tensor_; in RunTest() 464 Tensor* inst = &instances_[di]->tensor_; in RunTest() 608 tensor_ = in InitTensor() 611 f(&tensor_); in InitTensor() 619 &cpu_tensor, device_, &tensor_, [¬ification](Status s) { in InitTensor() 635 inputs.push_back(TensorValue(&tensor_)); in DoBroadcast() 661 ? parent_->GetCollectiveBcastSend(col_params_, &tensor_, in DoBroadcast() 663 : parent_->GetCollectiveBcastRecv(col_params_, tensor_.shape(), in DoBroadcast() 671 {0}, 0, tensor_.shape(), &output_tensor_ptr)); in DoBroadcast() 674 ctx.allocate_output(0, tensor_.shape(), &output_tensor_ptr)); in DoBroadcast() [all …]
|
/external/tensorflow/tensorflow/contrib/layers/kernels/ |
D | sparse_feature_cross_kernel.cc | 105 explicit DenseTensorColumn(const Tensor& tensor) : tensor_(tensor) {} in DenseTensorColumn() 107 int64 FeatureCount(int64 batch) const override { return tensor_.dim_size(1); } in FeatureCount() 114 const Tensor& tensor_; member in tensorflow::__anon2da671160111::DenseTensorColumn 120 if (DT_STRING == tensor_.dtype()) in Feature() 121 return Fingerprint64(tensor_.matrix<string>()(batch, n)); in Feature() 122 return tensor_.matrix<int64>()(batch, n); in Feature() 128 if (DT_STRING == tensor_.dtype()) return tensor_.matrix<string>()(batch, n); in Feature() 129 return std::to_string(tensor_.matrix<int64>()(batch, n)); in Feature() 135 return tensor_.matrix<string>()(batch, n); in Feature()
|
/external/tensorflow/tensorflow/stream_executor/ |
D | dnn.cc | 242 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in BatchDescriptor() 277 tensor_ = other.tensor_; in CloneFrom() 376 TensorDescriptorProto ret = tensor_; in ToProto() 384 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in FilterDescriptor() 393 tensor_ = other.tensor_; in CloneFrom() 448 TensorDescriptorProto ret = tensor_; in ToProto()
|
D | dnn.h | 258 int64 count() const { return tensor_.dimensions(0); } 259 int64 feature_map_count() const { return tensor_.dimensions(1); } 266 DataLayout layout() const { return tensor_.data_layout(); } 280 tensor_.set_dimensions(0, value); 284 tensor_.set_dimensions(1, value); 308 tensor_.set_data_layout(layout); 348 return AsInt64Slice(tensor_.dimensions()).subspan(2); 352 return AsInt64Slice(tensor_.mutable_dimensions()).subspan(2); 355 TensorDescriptorProto tensor_; 400 tensor_.set_dimensions(0, value); [all …]
|
/external/tensorflow/tensorflow/cc/framework/ |
D | ops.h | 214 tensor_(init.tensor) {} 218 tensor_(t) {} 228 tensor_ = Initializer(init).tensor; 241 const Tensor& tensor() const { return tensor_; } 246 Tensor tensor_;
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/convert/ |
D | convert_nodes.h | 189 return const_cast<char*>(tensor_.tensor_data().data()); in GetValues() 200 return absl::Span<const T>(tensor_.flat<T>().data(), count()); in GetSpan() 222 Tensor tensor_; variable 320 nvinfer1::ITensor* tensor_ = nullptr; // Not owned. variable
|
D | convert_nodes.cc | 617 : shape_(dims), type_(type), tensor_(tensor) {} in TRT_ShapedWeights() 620 : shape_(rhs.shape_), type_(rhs.type_), tensor_(rhs.tensor_) {} in TRT_ShapedWeights() 702 : tensor_(tensor), in TRT_TensorOrWeights() 719 : tensor_(rhs.tensor_), in TRT_TensorOrWeights() 727 tensor_ = rhs.tensor_; in operator =() 737 return tensor_ == nullptr ? simple_itensor_.get() : tensor_; in tensor() 742 return tensor_ == nullptr ? simple_itensor_.get() : tensor_; in tensor()
|
/external/tensorflow/tensorflow/python/util/ |
D | util.cc | 448 explicit SparseTensorValueIterator(PyObject* tensor) : tensor_(tensor) { in SparseTensorValueIterator() 452 Safe_PyObjectPtr next() override { return std::move(tensor_); } in next() 455 Safe_PyObjectPtr tensor_; member in tensorflow::swig::__anonee0a9dce0111::SparseTensorValueIterator
|
/external/tensorflow/tensorflow/contrib/verbs/ |
D | rdma.cc | 1014 void RdmaTensorResponse::Resume() { SendContent(*tensor_, *proto_, is_dead_); } in Resume() 1152 tensor_ = new Tensor(allocator, in.dtype(), in.shape()); in Clone() 1153 memcpy(DMAHelper::base(tensor_), DMAHelper::base(&in), in.TotalBytes()); in Clone() 1155 tensor_ = new Tensor(in.dtype(), in.shape()); in Clone() 1259 if (tensor_ != nullptr) { in Destroy() 1260 delete tensor_; in Destroy()
|