1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/c/tf_tensor.h"
17
18 #include <memory>
19 #include <vector>
20
21 #include "tensorflow/c/tf_status.h"
22 #include "tensorflow/c/tf_status_helper.h"
23 #include "tensorflow/c/tf_tensor_internal.h"
24 #include "tensorflow/core/framework/allocation_description.pb.h"
25 #include "tensorflow/core/framework/log_memory.h"
26 #include "tensorflow/core/framework/tensor.h"
27 #include "tensorflow/core/framework/tensor_shape.pb.h"
28 #include "tensorflow/core/framework/types.pb.h"
29 #include "tensorflow/core/lib/core/coding.h"
30 #include "tensorflow/core/platform/casts.h"
31
32 using tensorflow::Status;
33 using tensorflow::Tensor;
34 using tensorflow::TensorBuffer;
35 using tensorflow::errors::FailedPrecondition;
36 using tensorflow::errors::InvalidArgument;
37
38 namespace tensorflow {
allocate_tensor(const char * operation,size_t len,Allocator * allocator)39 void* allocate_tensor(const char* operation, size_t len, Allocator* allocator) {
40 void* data = allocator->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, len);
41 if (LogMemory::IsEnabled() && data != nullptr) {
42 LogMemory::RecordRawAllocation(
43 operation, LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, len, data,
44 allocator);
45 }
46 return data;
47 }
48
allocate_tensor(const char * operation,size_t len)49 void* allocate_tensor(const char* operation, size_t len) {
50 return allocate_tensor(operation, len, cpu_allocator());
51 }
52
deallocate_buffer(void * data,size_t len,void * arg)53 void deallocate_buffer(void* data, size_t len, void* arg) {
54 Allocator* allocator = nullptr;
55 if (arg == nullptr) {
56 allocator = cpu_allocator();
57 } else {
58 allocator = reinterpret_cast<Allocator*>(arg);
59 }
60 if (LogMemory::IsEnabled() && data != nullptr) {
61 LogMemory::RecordRawDeallocation(
62 "TensorFlow C Api", LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data,
63 allocator, false);
64 }
65 allocator->DeallocateRaw(data);
66 }
67 } // namespace tensorflow
68
69 namespace {
CreateTensor(TF_ManagedBuffer * buf,TF_DataType dtype,const int64_t * dims,int num_dims,size_t len)70 TF_Tensor* CreateTensor(TF_ManagedBuffer* buf, TF_DataType dtype,
71 const int64_t* dims, int num_dims, size_t len) {
72 std::vector<tensorflow::int64> dimvec(num_dims);
73 for (int i = 0; i < num_dims; ++i) {
74 dimvec[i] = static_cast<tensorflow::int64>(dims[i]);
75 }
76
77 // TODO(gjn): Make the choice of interface a compile-time configuration.
78 tensorflow::TensorInterface ret(
79 Tensor(static_cast<tensorflow::DataType>(dtype),
80 tensorflow::TensorShape(dimvec), buf));
81 buf->Unref();
82 size_t elem_size = TF_DataTypeSize(dtype);
83 if (elem_size > 0 && len < (elem_size * ret.NumElements())) {
84 return nullptr;
85 }
86 return new TF_Tensor{new tensorflow::TensorInterface(ret)};
87 }
88 } // namespace
89
TF_AllocateTensor(TF_DataType dtype,const int64_t * dims,int num_dims,size_t len)90 TF_Tensor* TF_AllocateTensor(TF_DataType dtype, const int64_t* dims,
91 int num_dims, size_t len) {
92 void* data = tensorflow::allocate_tensor("TF_AllocateTensor", len,
93 tensorflow::cpu_allocator());
94 TF_ManagedBuffer* buf =
95 new TF_ManagedBuffer(data, len, tensorflow::deallocate_buffer,
96 tensorflow::cpu_allocator(), /*owns_memory=*/true);
97 return CreateTensor(buf, dtype, dims, num_dims, len);
98 }
99
TF_NewTensor(TF_DataType dtype,const int64_t * dims,int num_dims,void * data,size_t len,void (* deallocator)(void * data,size_t len,void * arg),void * deallocator_arg)100 TF_Tensor* TF_NewTensor(TF_DataType dtype, const int64_t* dims, int num_dims,
101 void* data, size_t len,
102 void (*deallocator)(void* data, size_t len, void* arg),
103 void* deallocator_arg) {
104 TF_ManagedBuffer* buf = nullptr;
105 if (dtype != TF_STRING && dtype != TF_RESOURCE &&
106 tensorflow::DataTypeCanUseMemcpy(
107 static_cast<tensorflow::DataType>(dtype)) &&
108 reinterpret_cast<intptr_t>(data) % std::max(1, EIGEN_MAX_ALIGN_BYTES) !=
109 0) {
110 // TF_STRING and TF_RESOURCE tensors have a different representation in
111 // TF_Tensor than they do in tensorflow::Tensor. So a copy here is a waste
112 // (any alignment requirements will be taken care of by TF_TensorToTensor
113 // and TF_TensorFromTensor).
114 //
115 // Other types have the same representation, so copy only if it is safe to
116 // do so.
117 buf = new TF_ManagedBuffer(tensorflow::allocate_tensor("TF_NewTensor", len),
118 len, tensorflow::deallocate_buffer, nullptr,
119 /*owns_memory=*/true);
120 std::memcpy(buf->data(), data, len);
121 // Free the original buffer.
122 deallocator(data, len, deallocator_arg);
123 } else {
124 buf = new TF_ManagedBuffer(data, len, deallocator, deallocator_arg,
125 /*owns_memory=*/false);
126 }
127
128 return CreateTensor(buf, dtype, dims, num_dims, len);
129 }
130
TF_TensorMaybeMove(TF_Tensor * t)131 TF_Tensor* TF_TensorMaybeMove(TF_Tensor* t) {
132 return t->tensor->CanMove() ? t : nullptr;
133 }
134
TF_DeleteTensor(TF_Tensor * t)135 void TF_DeleteTensor(TF_Tensor* t) {
136 if (t == nullptr) {
137 return;
138 }
139
140 if (t->tensor) {
141 t->tensor->Release();
142 }
143
144 delete t;
145 }
146
TF_TensorType(const TF_Tensor * t)147 TF_DataType TF_TensorType(const TF_Tensor* t) {
148 return static_cast<TF_DataType>(t->tensor->Type());
149 }
150
TF_NumDims(const TF_Tensor * t)151 int TF_NumDims(const TF_Tensor* t) { return t->tensor->NumDims(); }
152
TF_Dim(const TF_Tensor * t,int dim_index)153 int64_t TF_Dim(const TF_Tensor* t, int dim_index) {
154 return t->tensor->Dim(dim_index);
155 }
156
TF_TensorByteSize(const TF_Tensor * t)157 size_t TF_TensorByteSize(const TF_Tensor* t) { return t->tensor->ByteSize(); }
158
TF_TensorData(const TF_Tensor * t)159 void* TF_TensorData(const TF_Tensor* t) { return t->tensor->Data(); }
160
TF_TensorElementCount(const TF_Tensor * t)161 int64_t TF_TensorElementCount(const TF_Tensor* t) {
162 int64_t result = 1;
163 int rank = TF_NumDims(t);
164 for (int dim = 0; dim < rank; ++dim) {
165 result *= TF_Dim(t, dim);
166 }
167 return result;
168 }
169
TF_TensorBitcastFrom(const TF_Tensor * from,TF_DataType type,TF_Tensor * to,const int64_t * new_dims,int num_new_dims,TF_Status * status)170 void TF_TensorBitcastFrom(const TF_Tensor* from, TF_DataType type,
171 TF_Tensor* to, const int64_t* new_dims,
172 int num_new_dims, TF_Status* status) {
173 TF_SetStatus(status, TF_OK, "");
174 Status cc_status(
175 tensorflow::down_cast<tensorflow::TensorInterface*>(to->tensor)
176 ->BitcastFrom(
177 *tensorflow::down_cast<const tensorflow::TensorInterface*>(
178 from->tensor),
179 static_cast<tensorflow::DataType>(type), new_dims, num_new_dims));
180 Set_TF_Status_from_Status(status, cc_status);
181 }
182
183 namespace tensorflow {
184
Release()185 void TensorInterface::Release() { delete this; }
186
CanMove() const187 bool TensorInterface::CanMove() const {
188 // It is safe to move the Tensor if and only if we own the unique reference to
189 // it. In that case, we might as well not delete and reallocate, but a future
190 // implementation might need to do so.
191 TensorBuffer* buf = tensorflow::TensorCApi::Buffer(tensor_);
192 if (buf->RefCountIsOne() && buf->root_buffer()->RefCountIsOne() &&
193 buf->OwnsMemory()) {
194 return true;
195 }
196 return false;
197 }
198
Type() const199 DataType TensorInterface::Type() const { return tensor_.dtype(); }
200
NumDims() const201 int TensorInterface::NumDims() const { return tensor_.dims(); }
202
Dim(int dim_index) const203 int64_t TensorInterface::Dim(int dim_index) const {
204 return static_cast<int64_t>(tensor_.dim_size(dim_index));
205 }
206
NumElements() const207 int64_t TensorInterface::NumElements() const {
208 return static_cast<int64_t>(tensor_.NumElements());
209 }
210
ByteSize() const211 size_t TensorInterface::ByteSize() const {
212 return tensorflow::TensorCApi::Buffer(tensor_)->size();
213 }
214
Data() const215 void* TensorInterface::Data() const {
216 return tensorflow::TensorCApi::Buffer(tensor_)->data();
217 }
218
BitcastFrom(const TensorInterface & from,DataType type,const int64_t * new_dims,int num_new_dims)219 Status TensorInterface::BitcastFrom(const TensorInterface& from, DataType type,
220 const int64_t* new_dims, int num_new_dims) {
221 tensorflow::TensorShape s;
222 for (int i = 0; i < num_new_dims; ++i) {
223 s.AddDim(new_dims[i]);
224 }
225 return tensor_.BitcastFrom(from.tensor_, type, s);
226 }
227
228 } // namespace tensorflow
229
230 // --------------------------------------------------------------------------
231
DeleteArray(void * data,size_t size,void * arg)232 static void DeleteArray(void* data, size_t size, void* arg) {
233 DCHECK_EQ(data, arg);
234 delete[] reinterpret_cast<char*>(arg);
235 }
236
237 // Create an empty tensor of type 'dtype'. 'shape' can be arbitrary, but has to
238 // result in a zero-sized tensor.
EmptyTensor(TF_DataType dtype,const tensorflow::TensorShape & shape)239 static TF_Tensor* EmptyTensor(TF_DataType dtype,
240 const tensorflow::TensorShape& shape) {
241 static char empty;
242 tensorflow::int64 nelems = 1;
243 std::vector<tensorflow::int64> dims;
244 for (int i = 0; i < shape.dims(); ++i) {
245 dims.push_back(shape.dim_size(i));
246 nelems *= shape.dim_size(i);
247 }
248 CHECK_EQ(nelems, 0);
249 static_assert(sizeof(int64_t) == sizeof(tensorflow::int64),
250 "64-bit int types should match in size");
251 return TF_NewTensor(
252 dtype, reinterpret_cast<const int64_t*>(dims.data()), shape.dims(),
253 reinterpret_cast<void*>(&empty), 0, [](void*, size_t, void*) {}, nullptr);
254 }
255
256 namespace tensorflow {
257
258 // Non-static for testing.
TF_TensorFromTensor(const tensorflow::Tensor & src,Status * status)259 TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src, Status* status) {
260 *status = tensorflow::Status::OK();
261 if (!src.IsInitialized()) {
262 *status = FailedPrecondition(
263 "attempt to use a tensor with an uninitialized value");
264 return nullptr;
265 }
266 if (src.NumElements() == 0) {
267 return EmptyTensor(static_cast<TF_DataType>(src.dtype()), src.shape());
268 }
269 if (src.dtype() == tensorflow::DT_RESOURCE) {
270 if (src.shape().dims() != 0) {
271 *status = InvalidArgument(
272 "Unexpected non-scalar DT_RESOURCE tensor seen (shape: ",
273 src.shape().DebugString(),
274 "). Please file a bug at "
275 "https://github.com/tensorflow/tensorflow/issues/new, "
276 "ideally with a "
277 "short code snippet that reproduces this error.");
278 return nullptr;
279 }
280 const string str =
281 src.scalar<tensorflow::ResourceHandle>()().SerializeAsString();
282 TF_Tensor* t = TF_AllocateTensor(TF_RESOURCE, {}, 0, str.size());
283 std::memcpy(TF_TensorData(t), str.c_str(), str.size());
284 return t;
285 }
286
287 Tensor tensor;
288 if (!tensor.CopyFrom(src, src.shape())) {
289 return nullptr;
290 }
291 return new TF_Tensor{new tensorflow::TensorInterface(std::move(tensor))};
292 }
293
TF_TensorToTensor(const TF_Tensor * src,Tensor * dst)294 Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst) {
295 return tensorflow::down_cast<const tensorflow::TensorInterface*>(src->tensor)
296 ->ToTensor(dst);
297 }
298
ToTensor(tensorflow::Tensor * dst) const299 Status TensorInterface::ToTensor(tensorflow::Tensor* dst) const {
300 if (tensor_.dtype() == DT_RESOURCE) {
301 if (tensor_.dims() != 0) {
302 return InvalidArgument(
303 "Malformed TF_RESOURCE tensor: expected a scalar, got a tensor with "
304 "shape ",
305 tensor_.shape().DebugString());
306 }
307 *dst = tensorflow::Tensor(tensorflow::DT_RESOURCE, tensor_.shape());
308 if (!dst->scalar<tensorflow::ResourceHandle>()().ParseFromString(
309 string(static_cast<const char*>(Data()), ByteSize()))) {
310 return InvalidArgument(
311 "Malformed TF_RESOURCE tensor: unable to parse resource handle");
312 }
313 return Status::OK();
314 }
315 *dst = tensor_;
316 return Status::OK();
317 }
318
IsAligned() const319 bool TensorInterface::IsAligned() const { return tensor_.IsAligned(); }
320
321 } // namespace tensorflow
322
TF_TensorIsAligned(const TF_Tensor * t)323 bool TF_TensorIsAligned(const TF_Tensor* t) { return t->tensor->IsAligned(); }
324