• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/c/builtin_op_data.h"
17 #include "tensorflow/lite/c/common.h"
18 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
19 #include "tensorflow/lite/kernels/kernel_util.h"
20 #include "tensorflow/lite/kernels/op_macros.h"
21 #include "tensorflow/lite/micro/kernels/kernel_util.h"
22 
23 namespace tflite {
24 namespace ops {
25 namespace micro {
26 namespace split_v {
27 
28 template <typename T>
SplitImpl(TfLiteContext * context,TfLiteNode * node,const TfLiteEvalTensor * input,int axis_value)29 TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node,
30                        const TfLiteEvalTensor* input, int axis_value) {
31   const TfLiteIntArray* input_dims = input->dims;
32   const TfLiteEvalTensor* output0 =
33       tflite::micro::GetEvalOutput(context, node, 0);
34 
35   const int split_dimensions = input_dims->size;
36 
37   TFLITE_DCHECK_LT(axis_value, split_dimensions);
38   TFLITE_DCHECK_EQ(output0->dims->size, split_dimensions);
39 
40   int64_t split_size = 0;
41   const int output_count = NumOutputs(node);
42   for (int i = 0; i < output_count; i++) {
43     split_size +=
44         tflite::micro::GetEvalOutput(context, node, i)->dims->data[axis_value];
45   }
46   TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]);
47   int64_t outer_size = 1;
48   for (int i = 0; i < axis_value; ++i) {
49     outer_size *= input_dims->data[i];
50   }
51 
52   int64_t base_inner_size = 1;
53   for (int i = axis_value + 1; i < split_dimensions; ++i) {
54     base_inner_size *= input_dims->data[i];
55   }
56 
57   const T* input_ptr = tflite::micro::GetTensorData<T>(input);
58   for (int k = 0; k < outer_size; ++k) {
59     for (int i = 0; i < output_count; ++i) {
60       TfLiteEvalTensor* output_tensor =
61           tflite::micro::GetEvalOutput(context, node, i);
62       T* output_data = tflite::micro::GetTensorData<T>(output_tensor);
63       const int copy_size =
64           output_tensor->dims->data[axis_value] * base_inner_size;
65       T* output_ptr = output_data + k * copy_size;
66       for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j];
67       input_ptr += copy_size;
68     }
69   }
70 
71   return kTfLiteOk;
72 }
73 
Prepare(TfLiteContext * context,TfLiteNode * node)74 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
75   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
76 
77   // Dynamic output tensors are needed if axis tensor is not constant.
78   // But Micro doesn't support dynamic memory allocation, so we only support
79   // constant axis tensor for now.
80   const TfLiteTensor* axis = GetInput(context, node, 2);
81   TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis),
82                      "Non constant axis tensor not supported");
83 
84   return kTfLiteOk;
85 }
86 
Eval(TfLiteContext * context,TfLiteNode * node)87 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
88   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
89   const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2);
90 
91   int axis_value = tflite::micro::GetTensorData<int32_t>(axis)[0];
92   if (axis_value < 0) {
93     axis_value += input->dims->size;
94   }
95 
96   TF_LITE_ENSURE(context, axis_value >= 0);
97   TF_LITE_ENSURE(context, axis_value < input->dims->size);
98 
99   switch (input->type) {
100     case kTfLiteFloat32: {
101       return SplitImpl<float>(context, node, input, axis_value);
102     }
103     case kTfLiteInt8: {
104       return SplitImpl<int8_t>(context, node, input, axis_value);
105     }
106     case kTfLiteInt16: {
107       return SplitImpl<int16_t>(context, node, input, axis_value);
108     }
109     case kTfLiteInt32: {
110       return SplitImpl<int32_t>(context, node, input, axis_value);
111     }
112     default:
113       TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
114                          TfLiteTypeGetName(input->type));
115       return kTfLiteError;
116   }
117   return kTfLiteOk;
118 }
119 
120 }  // namespace split_v
121 
Register_SPLIT_V()122 TfLiteRegistration Register_SPLIT_V() {
123   return {/*init=*/nullptr,
124           /*free=*/nullptr,
125           /*prepare=*/split_v::Prepare,
126           /*invoke=*/split_v::Eval,
127           /*profiling_string=*/nullptr,
128           /*builtin_code=*/0,
129           /*custom_name=*/nullptr,
130           /*version=*/0};
131 }
132 
133 }  // namespace micro
134 }  // namespace ops
135 }  // namespace tflite
136