• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/memory_helpers.h"
17 
18 #include <cstddef>
19 #include <cstdint>
20 
21 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
22 #include "tensorflow/lite/c/common.h"
23 #include "tensorflow/lite/core/api/error_reporter.h"
24 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
25 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
26 #include "tensorflow/lite/schema/schema_generated.h"
27 
28 namespace tflite {
29 
AlignPointerUp(uint8_t * data,size_t alignment)30 uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) {
31   std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
32   uint8_t* aligned_result = reinterpret_cast<uint8_t*>(
33       ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment);
34   return aligned_result;
35 }
36 
AlignPointerDown(uint8_t * data,size_t alignment)37 uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) {
38   std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
39   uint8_t* aligned_result =
40       reinterpret_cast<uint8_t*>((data_as_uintptr_t / alignment) * alignment);
41   return aligned_result;
42 }
43 
AlignSizeUp(size_t size,size_t alignment)44 size_t AlignSizeUp(size_t size, size_t alignment) {
45   size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment);
46   return aligned_size;
47 }
48 
TfLiteTypeSizeOf(TfLiteType type,size_t * size)49 TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) {
50   switch (type) {
51     case kTfLiteFloat16:
52       *size = sizeof(int16_t);
53       break;
54     case kTfLiteFloat32:
55       *size = sizeof(float);
56       break;
57     case kTfLiteFloat64:
58       *size = sizeof(double);
59       break;
60     case kTfLiteInt16:
61       *size = sizeof(int16_t);
62       break;
63     case kTfLiteInt32:
64       *size = sizeof(int32_t);
65       break;
66     case kTfLiteUInt32:
67       *size = sizeof(uint32_t);
68       break;
69     case kTfLiteUInt8:
70       *size = sizeof(uint8_t);
71       break;
72     case kTfLiteInt8:
73       *size = sizeof(int8_t);
74       break;
75     case kTfLiteInt64:
76       *size = sizeof(int64_t);
77       break;
78     case kTfLiteUInt64:
79       *size = sizeof(uint64_t);
80       break;
81     case kTfLiteBool:
82       *size = sizeof(bool);
83       break;
84     case kTfLiteComplex64:
85       *size = sizeof(float) * 2;
86       break;
87     case kTfLiteComplex128:
88       *size = sizeof(double) * 2;
89       break;
90     default:
91       return kTfLiteError;
92   }
93   return kTfLiteOk;
94 }
95 
BytesRequiredForTensor(const tflite::Tensor & flatbuffer_tensor,size_t * bytes,size_t * type_size,ErrorReporter * error_reporter)96 TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor,
97                                     size_t* bytes, size_t* type_size,
98                                     ErrorReporter* error_reporter) {
99   int element_count = 1;
100   // If flatbuffer_tensor.shape == nullptr, then flatbuffer_tensor is a scalar
101   // so has 1 element.
102   if (flatbuffer_tensor.shape() != nullptr) {
103     for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
104       element_count *= flatbuffer_tensor.shape()->Get(n);
105     }
106   }
107 
108   TfLiteType tf_lite_type;
109   TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(),
110                                           &tf_lite_type, error_reporter));
111   TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(tf_lite_type, type_size));
112   *bytes = element_count * (*type_size);
113   return kTfLiteOk;
114 }
115 
TfLiteEvalTensorByteLength(const TfLiteEvalTensor * eval_tensor,size_t * out_bytes)116 TfLiteStatus TfLiteEvalTensorByteLength(const TfLiteEvalTensor* eval_tensor,
117                                         size_t* out_bytes) {
118   TFLITE_DCHECK(out_bytes != nullptr);
119 
120   int element_count = 1;
121   // If eval_tensor->dims == nullptr, then tensor is a scalar so has 1 element.
122   if (eval_tensor->dims != nullptr) {
123     for (int n = 0; n < eval_tensor->dims->size; ++n) {
124       element_count *= eval_tensor->dims->data[n];
125     }
126   }
127   size_t type_size;
128   TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(eval_tensor->type, &type_size));
129   *out_bytes = element_count * type_size;
130   return kTfLiteOk;
131 }
132 
AllocateOutputDimensionsFromInput(TfLiteContext * context,const TfLiteTensor * input1,const TfLiteTensor * input2,TfLiteTensor * output)133 TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context,
134                                                const TfLiteTensor* input1,
135                                                const TfLiteTensor* input2,
136                                                TfLiteTensor* output) {
137   const TfLiteTensor* input = nullptr;
138 
139   TF_LITE_ENSURE(context, input1->dims != nullptr);
140   TF_LITE_ENSURE(context, input2->dims != nullptr);
141   TF_LITE_ENSURE(context, output->dims->size == 0);
142 
143   input = input1->dims->size > input2->dims->size ? input1 : input2;
144   TF_LITE_ENSURE(context, output->type == input->type);
145 
146   size_t size = 0;
147   TfLiteTypeSizeOf(input->type, &size);
148   const int dimensions_count = tflite::GetTensorShape(input).DimensionsCount();
149   for (int i = 0; i < dimensions_count; i++) {
150     size *= input->dims->data[i];
151   }
152 
153   output->bytes = size;
154 
155   output->dims =
156       reinterpret_cast<TfLiteIntArray*>(context->AllocatePersistentBuffer(
157           context, TfLiteIntArrayGetSizeInBytes(size)));
158 
159   output->dims->size = input->dims->size;
160   for (int i = 0; i < dimensions_count; i++) {
161     output->dims->data[i] = input->dims->data[i];
162   }
163 
164   return kTfLiteOk;
165 }
166 
167 }  // namespace tflite
168