• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/c/tf_tensor.h"
17 
18 #include <memory>
19 #include <vector>
20 
21 #include "tensorflow/c/tf_status.h"
22 #include "tensorflow/c/tf_status_helper.h"
23 #include "tensorflow/c/tf_tensor_internal.h"
24 #include "tensorflow/core/framework/allocation_description.pb.h"
25 #include "tensorflow/core/framework/log_memory.h"
26 #include "tensorflow/core/framework/tensor.h"
27 #include "tensorflow/core/framework/tensor_shape.pb.h"
28 #include "tensorflow/core/framework/types.pb.h"
29 #include "tensorflow/core/lib/core/coding.h"
30 #include "tensorflow/core/platform/casts.h"
31 
32 using tensorflow::Status;
33 using tensorflow::Tensor;
34 using tensorflow::TensorBuffer;
35 using tensorflow::errors::FailedPrecondition;
36 using tensorflow::errors::InvalidArgument;
37 
38 namespace tensorflow {
allocate_tensor(const char * operation,size_t len,Allocator * allocator)39 void* allocate_tensor(const char* operation, size_t len, Allocator* allocator) {
40   void* data = allocator->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, len);
41   if (LogMemory::IsEnabled() && data != nullptr) {
42     LogMemory::RecordRawAllocation(
43         operation, LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, len, data,
44         allocator);
45   }
46   return data;
47 }
48 
allocate_tensor(const char * operation,size_t len)49 void* allocate_tensor(const char* operation, size_t len) {
50   return allocate_tensor(operation, len, cpu_allocator());
51 }
52 
deallocate_buffer(void * data,size_t len,void * arg)53 void deallocate_buffer(void* data, size_t len, void* arg) {
54   Allocator* allocator = nullptr;
55   if (arg == nullptr) {
56     allocator = cpu_allocator();
57   } else {
58     allocator = reinterpret_cast<Allocator*>(arg);
59   }
60   if (LogMemory::IsEnabled() && data != nullptr) {
61     LogMemory::RecordRawDeallocation(
62         "TensorFlow C Api", LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data,
63         allocator, false);
64   }
65   allocator->DeallocateRaw(data);
66 }
67 }  // namespace tensorflow
68 
69 namespace {
CreateTensor(TF_ManagedBuffer * buf,TF_DataType dtype,const int64_t * dims,int num_dims,size_t len)70 TF_Tensor* CreateTensor(TF_ManagedBuffer* buf, TF_DataType dtype,
71                         const int64_t* dims, int num_dims, size_t len) {
72   std::vector<tensorflow::int64> dimvec(num_dims);
73   for (int i = 0; i < num_dims; ++i) {
74     dimvec[i] = static_cast<tensorflow::int64>(dims[i]);
75   }
76 
77   // TODO(gjn): Make the choice of interface a compile-time configuration.
78   tensorflow::TensorInterface ret(
79       Tensor(static_cast<tensorflow::DataType>(dtype),
80              tensorflow::TensorShape(dimvec), buf));
81   buf->Unref();
82   size_t elem_size = TF_DataTypeSize(dtype);
83   if (elem_size > 0 && len < (elem_size * ret.NumElements())) {
84     return nullptr;
85   }
86   return new TF_Tensor{new tensorflow::TensorInterface(ret)};
87 }
88 }  // namespace
89 
TF_AllocateTensor(TF_DataType dtype,const int64_t * dims,int num_dims,size_t len)90 TF_Tensor* TF_AllocateTensor(TF_DataType dtype, const int64_t* dims,
91                              int num_dims, size_t len) {
92   void* data = tensorflow::allocate_tensor("TF_AllocateTensor", len,
93                                            tensorflow::cpu_allocator());
94   TF_ManagedBuffer* buf =
95       new TF_ManagedBuffer(data, len, tensorflow::deallocate_buffer,
96                            tensorflow::cpu_allocator(), /*owns_memory=*/true);
97   return CreateTensor(buf, dtype, dims, num_dims, len);
98 }
99 
TF_NewTensor(TF_DataType dtype,const int64_t * dims,int num_dims,void * data,size_t len,void (* deallocator)(void * data,size_t len,void * arg),void * deallocator_arg)100 TF_Tensor* TF_NewTensor(TF_DataType dtype, const int64_t* dims, int num_dims,
101                         void* data, size_t len,
102                         void (*deallocator)(void* data, size_t len, void* arg),
103                         void* deallocator_arg) {
104   TF_ManagedBuffer* buf = nullptr;
105   if (dtype != TF_STRING && dtype != TF_RESOURCE &&
106       tensorflow::DataTypeCanUseMemcpy(
107           static_cast<tensorflow::DataType>(dtype)) &&
108       reinterpret_cast<intptr_t>(data) % std::max(1, EIGEN_MAX_ALIGN_BYTES) !=
109           0) {
110     // TF_STRING and TF_RESOURCE tensors have a different representation in
111     // TF_Tensor than they do in tensorflow::Tensor. So a copy here is a waste
112     // (any alignment requirements will be taken care of by TF_TensorToTensor
113     // and TF_TensorFromTensor).
114     //
115     // Other types have the same representation, so copy only if it is safe to
116     // do so.
117     buf = new TF_ManagedBuffer(tensorflow::allocate_tensor("TF_NewTensor", len),
118                                len, tensorflow::deallocate_buffer, nullptr,
119                                /*owns_memory=*/true);
120     std::memcpy(buf->data(), data, len);
121     // Free the original buffer.
122     deallocator(data, len, deallocator_arg);
123   } else {
124     buf = new TF_ManagedBuffer(data, len, deallocator, deallocator_arg,
125                                /*owns_memory=*/false);
126   }
127 
128   return CreateTensor(buf, dtype, dims, num_dims, len);
129 }
130 
TF_TensorMaybeMove(TF_Tensor * t)131 TF_Tensor* TF_TensorMaybeMove(TF_Tensor* t) {
132   return t->tensor->CanMove() ? t : nullptr;
133 }
134 
TF_DeleteTensor(TF_Tensor * t)135 void TF_DeleteTensor(TF_Tensor* t) {
136   if (t == nullptr) {
137     return;
138   }
139 
140   if (t->tensor) {
141     t->tensor->Release();
142   }
143 
144   delete t;
145 }
146 
TF_TensorType(const TF_Tensor * t)147 TF_DataType TF_TensorType(const TF_Tensor* t) {
148   return static_cast<TF_DataType>(t->tensor->Type());
149 }
150 
TF_NumDims(const TF_Tensor * t)151 int TF_NumDims(const TF_Tensor* t) { return t->tensor->NumDims(); }
152 
TF_Dim(const TF_Tensor * t,int dim_index)153 int64_t TF_Dim(const TF_Tensor* t, int dim_index) {
154   return t->tensor->Dim(dim_index);
155 }
156 
TF_TensorByteSize(const TF_Tensor * t)157 size_t TF_TensorByteSize(const TF_Tensor* t) { return t->tensor->ByteSize(); }
158 
TF_TensorData(const TF_Tensor * t)159 void* TF_TensorData(const TF_Tensor* t) { return t->tensor->Data(); }
160 
TF_TensorElementCount(const TF_Tensor * t)161 int64_t TF_TensorElementCount(const TF_Tensor* t) {
162   int64_t result = 1;
163   int rank = TF_NumDims(t);
164   for (int dim = 0; dim < rank; ++dim) {
165     result *= TF_Dim(t, dim);
166   }
167   return result;
168 }
169 
TF_TensorBitcastFrom(const TF_Tensor * from,TF_DataType type,TF_Tensor * to,const int64_t * new_dims,int num_new_dims,TF_Status * status)170 void TF_TensorBitcastFrom(const TF_Tensor* from, TF_DataType type,
171                           TF_Tensor* to, const int64_t* new_dims,
172                           int num_new_dims, TF_Status* status) {
173   TF_SetStatus(status, TF_OK, "");
174   Status cc_status(
175       tensorflow::down_cast<tensorflow::TensorInterface*>(to->tensor)
176           ->BitcastFrom(
177               *tensorflow::down_cast<const tensorflow::TensorInterface*>(
178                   from->tensor),
179               static_cast<tensorflow::DataType>(type), new_dims, num_new_dims));
180   Set_TF_Status_from_Status(status, cc_status);
181 }
182 
183 namespace tensorflow {
184 
Release()185 void TensorInterface::Release() {
186   if (Type() == DT_STRING && NumElements() > 0) {
187     TF_TString* data = static_cast<TF_TString*>(Data());
188     if (CanMove() && data != nullptr) {
189       for (int64_t i = 0; i < NumElements(); ++i) {
190         TF_TString_Dealloc(&data[i]);
191       }
192     }
193   }
194   delete this;
195 }
196 
CanMove() const197 bool TensorInterface::CanMove() const {
198   // It is safe to move the Tensor if and only if we own the unique reference to
199   // it. In that case, we might as well not delete and reallocate, but a future
200   // implementation might need to do so.
201   TensorBuffer* buf = tensorflow::TensorCApi::Buffer(tensor_);
202   if (buf->RefCountIsOne() && buf->root_buffer()->RefCountIsOne() &&
203       buf->OwnsMemory()) {
204     return true;
205   }
206   return false;
207 }
208 
SummarizeValue() const209 std::string TensorInterface::SummarizeValue() const {
210   return tensor_.SummarizeValue(/*max_entries=*/3, /*print_v2=*/true);
211 }
212 
Type() const213 DataType TensorInterface::Type() const { return tensor_.dtype(); }
214 
NumDims() const215 int TensorInterface::NumDims() const { return tensor_.dims(); }
216 
Dim(int dim_index) const217 int64_t TensorInterface::Dim(int dim_index) const {
218   return static_cast<int64_t>(tensor_.dim_size(dim_index));
219 }
220 
NumElements() const221 int64_t TensorInterface::NumElements() const {
222   return static_cast<int64_t>(tensor_.NumElements());
223 }
224 
ByteSize() const225 size_t TensorInterface::ByteSize() const {
226   return tensorflow::TensorCApi::Buffer(tensor_)->size();
227 }
228 
Data() const229 void* TensorInterface::Data() const {
230   return tensorflow::TensorCApi::Buffer(tensor_)->data();
231 }
232 
BitcastFrom(const TensorInterface & from,DataType type,const int64_t * new_dims,int num_new_dims)233 Status TensorInterface::BitcastFrom(const TensorInterface& from, DataType type,
234                                     const int64_t* new_dims, int num_new_dims) {
235   tensorflow::TensorShape s;
236   for (int i = 0; i < num_new_dims; ++i) {
237     s.AddDim(new_dims[i]);
238   }
239   return tensor_.BitcastFrom(from.tensor_, type, s);
240 }
241 
FromProto(const tensorflow::TensorProto & from)242 Status TensorInterface::FromProto(const tensorflow::TensorProto& from) {
243   bool success = tensor_.FromProto(from);
244   if (success) return Status::OK();
245   return errors::InvalidArgument("Unparseable tensor proto");
246 }
247 
248 }  // namespace tensorflow
249 
250 // --------------------------------------------------------------------------
251 
DeleteArray(void * data,size_t size,void * arg)252 static void DeleteArray(void* data, size_t size, void* arg) {
253   DCHECK_EQ(data, arg);
254   delete[] reinterpret_cast<char*>(arg);
255 }
256 
257 // Create an empty tensor of type 'dtype'. 'shape' can be arbitrary, but has to
258 // result in a zero-sized tensor.
EmptyTensor(TF_DataType dtype,const tensorflow::TensorShape & shape)259 static TF_Tensor* EmptyTensor(TF_DataType dtype,
260                               const tensorflow::TensorShape& shape) {
261   static char empty;
262   int64_t nelems = 1;
263   std::vector<tensorflow::int64> dims;
264   for (int i = 0; i < shape.dims(); ++i) {
265     dims.push_back(shape.dim_size(i));
266     nelems *= shape.dim_size(i);
267   }
268   CHECK_EQ(nelems, 0);
269   static_assert(sizeof(int64_t) == sizeof(tensorflow::int64),
270                 "64-bit int types should match in size");
271   return TF_NewTensor(
272       dtype, reinterpret_cast<const int64_t*>(dims.data()), shape.dims(),
273       reinterpret_cast<void*>(&empty), 0, [](void*, size_t, void*) {}, nullptr);
274 }
275 
276 namespace tensorflow {
277 
278 // Non-static for testing.
TF_TensorFromTensor(const tensorflow::Tensor & src,Status * status)279 TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src, Status* status) {
280   *status = tensorflow::Status::OK();
281   if (!src.IsInitialized()) {
282     *status = FailedPrecondition(
283         "attempt to use a tensor with an uninitialized value");
284     return nullptr;
285   }
286   if (src.NumElements() == 0) {
287     return EmptyTensor(static_cast<TF_DataType>(src.dtype()), src.shape());
288   }
289   if (src.dtype() == tensorflow::DT_RESOURCE) {
290     if (src.shape().dims() != 0) {
291       *status = InvalidArgument(
292           "Unexpected non-scalar DT_RESOURCE tensor seen (shape: ",
293           src.shape().DebugString(),
294           "). Please file a bug at "
295           "https://github.com/tensorflow/tensorflow/issues/new, "
296           "ideally with a "
297           "short code snippet that reproduces this error.");
298       return nullptr;
299     }
300     const string str =
301         src.scalar<tensorflow::ResourceHandle>()().SerializeAsString();
302     TF_Tensor* t = TF_AllocateTensor(TF_RESOURCE, {}, 0, str.size());
303     std::memcpy(TF_TensorData(t), str.c_str(), str.size());
304     return t;
305   }
306 
307   Tensor tensor;
308   if (!tensor.CopyFrom(src, src.shape())) {
309     return nullptr;
310   }
311   return new TF_Tensor{new tensorflow::TensorInterface(std::move(tensor))};
312 }
313 
TF_TensorToTensor(const TF_Tensor * src,Tensor * dst)314 Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst) {
315   return tensorflow::down_cast<const tensorflow::TensorInterface*>(src->tensor)
316       ->ToTensor(dst);
317 }
318 
ToTensor(tensorflow::Tensor * dst) const319 Status TensorInterface::ToTensor(tensorflow::Tensor* dst) const {
320   if (tensor_.dtype() == DT_RESOURCE) {
321     if (tensor_.dims() != 0) {
322       return InvalidArgument(
323           "Malformed TF_RESOURCE tensor: expected a scalar, got a tensor with "
324           "shape ",
325           tensor_.shape().DebugString());
326     }
327     *dst = tensorflow::Tensor(tensorflow::DT_RESOURCE, tensor_.shape());
328     if (!dst->scalar<tensorflow::ResourceHandle>()().ParseFromString(
329             string(static_cast<const char*>(Data()), ByteSize()))) {
330       return InvalidArgument(
331           "Malformed TF_RESOURCE tensor: unable to parse resource handle");
332     }
333     return Status::OK();
334   }
335   *dst = tensor_;
336   return Status::OK();
337 }
338 
IsAligned() const339 bool TensorInterface::IsAligned() const { return tensor_.IsAligned(); }
340 
341 }  // namespace tensorflow
342 
TF_TensorIsAligned(const TF_Tensor * t)343 bool TF_TensorIsAligned(const TF_Tensor* t) { return t->tensor->IsAligned(); }
344