• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/reshape_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24 
25 #include <gtest/gtest.h>
26 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/model.h"
30 #include "tensorflow/lite/schema/schema_conversion_utils.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 #include "tensorflow/lite/version.h"
33 
34 namespace tflite {
35 namespace xnnpack {
36 
Test(TfLiteDelegate * delegate) const37 void ReshapeTester::Test(TfLiteDelegate* delegate) const {
38   std::random_device random_device;
39   auto rng = std::mt19937(random_device());
40   auto f32rng =
41       std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
42 
43   ASSERT_EQ(InputSize(), OutputSize());
44 
45   std::vector<char> buffer = CreateTfLiteModel();
46   const Model* model = GetModel(buffer.data());
47 
48   std::unique_ptr<Interpreter> delegate_interpreter;
49   ASSERT_EQ(
50       InterpreterBuilder(
51           model,
52           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
53           &delegate_interpreter),
54       kTfLiteOk);
55   std::unique_ptr<Interpreter> default_interpreter;
56   ASSERT_EQ(
57       InterpreterBuilder(
58           model,
59           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
60           &default_interpreter),
61       kTfLiteOk);
62 
63   ASSERT_TRUE(delegate_interpreter);
64   ASSERT_TRUE(default_interpreter);
65 
66   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
67   ASSERT_EQ(default_interpreter->inputs().size(), 1);
68 
69   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
70   ASSERT_EQ(default_interpreter->outputs().size(), 1);
71 
72   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
73   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
74 
75   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
76 
77   float* default_input_data = default_interpreter->typed_tensor<float>(
78       default_interpreter->inputs()[0]);
79   std::generate(default_input_data, default_input_data + InputSize(),
80                 std::ref(f32rng));
81 
82   float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
83       delegate_interpreter->inputs()[0]);
84   std::copy(default_input_data, default_input_data + InputSize(),
85             delegate_input_data);
86 
87   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
88   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
89 
90   float* default_output_data = default_interpreter->typed_tensor<float>(
91       default_interpreter->outputs()[0]);
92   float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
93       delegate_interpreter->outputs()[0]);
94 
95   for (size_t i = 0; i < OutputSize(); i++) {
96     ASSERT_EQ(delegate_output_data[i], default_output_data[i]);
97   }
98 }
99 
CreateTfLiteModel() const100 std::vector<char> ReshapeTester::CreateTfLiteModel() const {
101   flatbuffers::FlatBufferBuilder builder;
102   flatbuffers::Offset<OperatorCode> operator_code =
103       CreateOperatorCode(builder, BuiltinOperator_RESHAPE, 0);
104 
105   std::vector<flatbuffers::Offset<Buffer>> buffers{{
106       CreateBuffer(builder, builder.CreateVector({})),
107   }};
108   if (OutputShapeAsInput()) {
109     buffers.emplace_back(CreateBuffer(
110         builder, builder.CreateVector(
111                      reinterpret_cast<const uint8_t*>(OutputShape().data()),
112                      OutputShape().size() * sizeof(int32_t))));
113   }
114 
115   std::vector<flatbuffers::Offset<Tensor>> tensors{{
116       CreateTensor(builder,
117                    builder.CreateVector<int32_t>(InputShape().data(),
118                                                  InputShape().size()),
119                    TensorType_FLOAT32),
120       CreateTensor(builder,
121                    builder.CreateVector<int32_t>(OutputShape().data(),
122                                                  OutputShape().size()),
123                    TensorType_FLOAT32),
124   }};
125 
126   if (OutputShapeAsInput()) {
127     const std::array<int32_t, 1> reshape_shape{
128         {static_cast<int32_t>(InputShape().size())}};
129     tensors.insert(tensors.begin() + 1,
130                    CreateTensor(builder,
131                                 builder.CreateVector<int32_t>(
132                                     reshape_shape.data(), reshape_shape.size()),
133                                 TensorType_INT32, /*buffer=*/1));
134   }
135 
136   std::vector<int32_t> op_inputs({0});
137   if (OutputShapeAsInput()) {
138     op_inputs.push_back(1);
139   }
140   const std::array<int32_t, 1> op_outputs{{OutputShapeAsInput() ? 2 : 1}};
141 
142   BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE;
143   flatbuffers::Offset<void> builtin_options = 0;
144   if (!OutputShapeAsInput()) {
145     builtin_options_type = tflite::BuiltinOptions_ReshapeOptions;
146     builtin_options =
147         CreateReshapeOptions(
148             builder, builder.CreateVector<int32_t>(OutputShape().data(),
149                                                    OutputShape().size()))
150             .Union();
151   }
152 
153   const flatbuffers::Offset<Operator> op = CreateOperator(
154       builder, /*opcode_index=*/0,
155       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
156       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
157       builtin_options_type, builtin_options);
158 
159   const std::array<int32_t, 1> subgraph_inputs{{op_inputs.front()}};
160   const std::array<int32_t, 1> subgraph_outputs{{op_outputs.front()}};
161   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
162       builder, builder.CreateVector(tensors.data(), tensors.size()),
163       builder.CreateVector<int32_t>(subgraph_inputs.data(),
164                                     subgraph_inputs.size()),
165       builder.CreateVector<int32_t>(subgraph_outputs.data(),
166                                     subgraph_outputs.size()),
167       builder.CreateVector(&op, 1));
168 
169   const flatbuffers::Offset<Model> model_buffer = CreateModel(
170       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
171       builder.CreateVector(&subgraph, 1), builder.CreateString("Reshape model"),
172       builder.CreateVector(buffers.data(), buffers.size()));
173 
174   builder.Finish(model_buffer);
175 
176   return std::vector<char>(builder.GetBufferPointer(),
177                            builder.GetBufferPointer() + builder.GetSize());
178 }
179 
ComputeSize(const std::vector<int32_t> & shape)180 int32_t ReshapeTester::ComputeSize(const std::vector<int32_t>& shape) {
181   return std::accumulate(shape.cbegin(), shape.cend(), 1,
182                          std::multiplies<int32_t>());
183 }
184 
185 }  // namespace xnnpack
186 }  // namespace tflite
187