1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/c/common.h"
17
18 #include "tensorflow/lite/c/c_api_types.h"
19 #ifdef TF_LITE_TENSORFLOW_PROFILER
20 #include "tensorflow/lite/tensorflow_profiler_logger.h"
21 #endif
22
23 #ifndef TF_LITE_STATIC_MEMORY
24 #include <stdlib.h>
25 #include <string.h>
26 #endif // TF_LITE_STATIC_MEMORY
27
28 extern "C" {
29
TfLiteIntArrayGetSizeInBytes(int size)30 size_t TfLiteIntArrayGetSizeInBytes(int size) {
31 static TfLiteIntArray dummy;
32
33 size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
34 #if defined(_MSC_VER)
35 // Context for why this is needed is in http://b/189926408#comment21
36 computed_size -= sizeof(dummy.data[0]);
37 #endif
38 return computed_size;
39 }
40
TfLiteIntArrayEqual(const TfLiteIntArray * a,const TfLiteIntArray * b)41 int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
42 if (a == b) return 1;
43 if (a == nullptr || b == nullptr) return 0;
44 return TfLiteIntArrayEqualsArray(a, b->size, b->data);
45 }
46
TfLiteIntArrayEqualsArray(const TfLiteIntArray * a,int b_size,const int b_data[])47 int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
48 const int b_data[]) {
49 if (a == nullptr) return (b_size == 0);
50 if (a->size != b_size) return 0;
51 int i = 0;
52 for (; i < a->size; i++)
53 if (a->data[i] != b_data[i]) return 0;
54 return 1;
55 }
56
57 #ifndef TF_LITE_STATIC_MEMORY
58
TfLiteIntArrayCreate(int size)59 TfLiteIntArray* TfLiteIntArrayCreate(int size) {
60 size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size);
61 if (alloc_size <= 0) return nullptr;
62 TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
63 if (!ret) return ret;
64 ret->size = size;
65 return ret;
66 }
67
TfLiteIntArrayCopy(const TfLiteIntArray * src)68 TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {
69 if (!src) return nullptr;
70 TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size);
71 if (ret) {
72 memcpy(ret->data, src->data, src->size * sizeof(int));
73 }
74 return ret;
75 }
76
TfLiteIntArrayFree(TfLiteIntArray * a)77 void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
78
79 #endif // TF_LITE_STATIC_MEMORY
80
TfLiteFloatArrayGetSizeInBytes(int size)81 int TfLiteFloatArrayGetSizeInBytes(int size) {
82 static TfLiteFloatArray dummy;
83
84 int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
85 #if defined(_MSC_VER)
86 // Context for why this is needed is in http://b/189926408#comment21
87 computed_size -= sizeof(dummy.data[0]);
88 #endif
89 return computed_size;
90 }
91
92 #ifndef TF_LITE_STATIC_MEMORY
93
TfLiteFloatArrayCreate(int size)94 TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
95 TfLiteFloatArray* ret =
96 (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size));
97 ret->size = size;
98 return ret;
99 }
100
TfLiteFloatArrayFree(TfLiteFloatArray * a)101 void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
102
TfLiteTensorDataFree(TfLiteTensor * t)103 void TfLiteTensorDataFree(TfLiteTensor* t) {
104 if (t->allocation_type == kTfLiteDynamic ||
105 t->allocation_type == kTfLitePersistentRo) {
106 if (t->data.raw) {
107 #ifdef TF_LITE_TENSORFLOW_PROFILER
108 tflite::OnTfLiteTensorDealloc(t);
109 #endif
110 free(t->data.raw);
111 }
112 }
113 t->data.raw = nullptr;
114 }
115
TfLiteQuantizationFree(TfLiteQuantization * quantization)116 void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
117 if (quantization->type == kTfLiteAffineQuantization) {
118 TfLiteAffineQuantization* q_params =
119 (TfLiteAffineQuantization*)(quantization->params);
120 if (q_params->scale) {
121 TfLiteFloatArrayFree(q_params->scale);
122 q_params->scale = nullptr;
123 }
124 if (q_params->zero_point) {
125 TfLiteIntArrayFree(q_params->zero_point);
126 q_params->zero_point = nullptr;
127 }
128 free(q_params);
129 }
130 quantization->params = nullptr;
131 quantization->type = kTfLiteNoQuantization;
132 }
133
TfLiteSparsityFree(TfLiteSparsity * sparsity)134 void TfLiteSparsityFree(TfLiteSparsity* sparsity) {
135 if (sparsity == nullptr) {
136 return;
137 }
138
139 if (sparsity->traversal_order) {
140 TfLiteIntArrayFree(sparsity->traversal_order);
141 sparsity->traversal_order = nullptr;
142 }
143
144 if (sparsity->block_map) {
145 TfLiteIntArrayFree(sparsity->block_map);
146 sparsity->block_map = nullptr;
147 }
148
149 if (sparsity->dim_metadata) {
150 int i = 0;
151 for (; i < sparsity->dim_metadata_size; i++) {
152 TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i];
153 if (metadata.format == kTfLiteDimSparseCSR) {
154 TfLiteIntArrayFree(metadata.array_segments);
155 metadata.array_segments = nullptr;
156 TfLiteIntArrayFree(metadata.array_indices);
157 metadata.array_indices = nullptr;
158 }
159 }
160 free(sparsity->dim_metadata);
161 sparsity->dim_metadata = nullptr;
162 }
163
164 free(sparsity);
165 }
166
TfLiteTensorFree(TfLiteTensor * t)167 void TfLiteTensorFree(TfLiteTensor* t) {
168 TfLiteTensorDataFree(t);
169 if (t->dims) TfLiteIntArrayFree(t->dims);
170 t->dims = nullptr;
171
172 if (t->dims_signature) {
173 TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature);
174 }
175 t->dims_signature = nullptr;
176
177 TfLiteQuantizationFree(&t->quantization);
178 TfLiteSparsityFree(t->sparsity);
179 t->sparsity = nullptr;
180 }
181
TfLiteTensorReset(TfLiteType type,const char * name,TfLiteIntArray * dims,TfLiteQuantizationParams quantization,char * buffer,size_t size,TfLiteAllocationType allocation_type,const void * allocation,bool is_variable,TfLiteTensor * tensor)182 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
183 TfLiteQuantizationParams quantization, char* buffer,
184 size_t size, TfLiteAllocationType allocation_type,
185 const void* allocation, bool is_variable,
186 TfLiteTensor* tensor) {
187 TfLiteTensorFree(tensor);
188 tensor->type = type;
189 tensor->name = name;
190 tensor->dims = dims;
191 tensor->params = quantization;
192 tensor->data.raw = buffer;
193 tensor->bytes = size;
194 tensor->allocation_type = allocation_type;
195 tensor->allocation = allocation;
196 tensor->is_variable = is_variable;
197
198 tensor->quantization.type = kTfLiteNoQuantization;
199 tensor->quantization.params = nullptr;
200 }
201
TfLiteTensorCopy(const TfLiteTensor * src,TfLiteTensor * dst)202 TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {
203 if (!src || !dst) return kTfLiteOk;
204 if (src->bytes != dst->bytes) return kTfLiteError;
205 if (src == dst) return kTfLiteOk;
206
207 dst->type = src->type;
208 if (dst->dims) TfLiteIntArrayFree(dst->dims);
209 dst->dims = TfLiteIntArrayCopy(src->dims);
210 memcpy(dst->data.raw, src->data.raw, src->bytes);
211 dst->buffer_handle = src->buffer_handle;
212 dst->data_is_stale = src->data_is_stale;
213 dst->delegate = src->delegate;
214
215 return kTfLiteOk;
216 }
217
TfLiteTensorRealloc(size_t num_bytes,TfLiteTensor * tensor)218 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
219 if (tensor->allocation_type != kTfLiteDynamic &&
220 tensor->allocation_type != kTfLitePersistentRo) {
221 return;
222 }
223 // TODO(b/145340303): Tensor data should be aligned.
224 if (!tensor->data.raw) {
225 tensor->data.raw = (char*)malloc(num_bytes);
226 #ifdef TF_LITE_TENSORFLOW_PROFILER
227 tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
228 #endif
229 } else if (num_bytes > tensor->bytes) {
230 #ifdef TF_LITE_TENSORFLOW_PROFILER
231 tflite::OnTfLiteTensorDealloc(tensor);
232 #endif
233 tensor->data.raw = (char*)realloc(tensor->data.raw, num_bytes);
234 #ifdef TF_LITE_TENSORFLOW_PROFILER
235 tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
236 #endif
237 }
238 tensor->bytes = num_bytes;
239 }
240 #endif // TF_LITE_STATIC_MEMORY
241
TfLiteTypeGetName(TfLiteType type)242 const char* TfLiteTypeGetName(TfLiteType type) {
243 switch (type) {
244 case kTfLiteNoType:
245 return "NOTYPE";
246 case kTfLiteFloat32:
247 return "FLOAT32";
248 case kTfLiteUInt16:
249 return "UINT16";
250 case kTfLiteInt16:
251 return "INT16";
252 case kTfLiteInt32:
253 return "INT32";
254 case kTfLiteUInt32:
255 return "UINT32";
256 case kTfLiteUInt8:
257 return "UINT8";
258 case kTfLiteInt8:
259 return "INT8";
260 case kTfLiteInt64:
261 return "INT64";
262 case kTfLiteUInt64:
263 return "UINT64";
264 case kTfLiteBool:
265 return "BOOL";
266 case kTfLiteComplex64:
267 return "COMPLEX64";
268 case kTfLiteComplex128:
269 return "COMPLEX128";
270 case kTfLiteString:
271 return "STRING";
272 case kTfLiteFloat16:
273 return "FLOAT16";
274 case kTfLiteFloat64:
275 return "FLOAT64";
276 case kTfLiteResource:
277 return "RESOURCE";
278 case kTfLiteVariant:
279 return "VARIANT";
280 }
281 return "Unknown type";
282 }
283
TfLiteDelegateCreate()284 TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; }
285
286 } // extern "C"
287