• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/pad_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24 
25 #include <gtest/gtest.h>
26 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/model.h"
30 #include "tensorflow/lite/schema/schema_conversion_utils.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 #include "tensorflow/lite/version.h"
33 
34 namespace tflite {
35 namespace xnnpack {
36 
OutputShape() const37 std::vector<int32_t> PadTester::OutputShape() const {
38   std::vector<int32_t> output_shape;
39   output_shape.reserve(InputShape().size());
40   for (size_t i = 0; i < InputShape().size(); i++) {
41     int32_t output_dim = InputShape()[i];
42     if (i < InputPrePaddings().size()) {
43       output_dim += InputPrePaddings()[i];
44     }
45     if (i < InputPostPaddings().size()) {
46       output_dim += InputPostPaddings()[i];
47     }
48     output_shape.push_back(output_dim);
49   }
50   return output_shape;
51 }
52 
Test(TfLiteDelegate * delegate) const53 void PadTester::Test(TfLiteDelegate* delegate) const {
54   ASSERT_EQ(InputPrePaddings().size(), InputPostPaddings().size());
55   ASSERT_LE(InputPrePaddings().size(), InputShape().size());
56 
57   std::random_device random_device;
58   auto rng = std::mt19937(random_device());
59   auto input_rng =
60       std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
61 
62   std::vector<char> buffer = CreateTfLiteModel();
63   const Model* model = GetModel(buffer.data());
64 
65   std::unique_ptr<Interpreter> delegate_interpreter;
66   ASSERT_EQ(
67       InterpreterBuilder(
68           model,
69           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
70           &delegate_interpreter),
71       kTfLiteOk);
72   std::unique_ptr<Interpreter> default_interpreter;
73   ASSERT_EQ(
74       InterpreterBuilder(
75           model,
76           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
77           &default_interpreter),
78       kTfLiteOk);
79 
80   ASSERT_TRUE(delegate_interpreter);
81   ASSERT_TRUE(default_interpreter);
82 
83   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
84   ASSERT_EQ(default_interpreter->inputs().size(), 1);
85 
86   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
87   ASSERT_EQ(default_interpreter->outputs().size(), 1);
88 
89   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
90   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
91 
92   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
93 
94   float* default_input_data = default_interpreter->typed_tensor<float>(
95       default_interpreter->inputs()[0]);
96   std::generate(default_input_data,
97                 default_input_data + ComputeSize(InputShape()),
98                 std::ref(input_rng));
99 
100   float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
101       delegate_interpreter->inputs()[0]);
102   std::copy(default_input_data, default_input_data + ComputeSize(InputShape()),
103             delegate_input_data);
104 
105   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
106   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
107 
108   float* default_output_data = default_interpreter->typed_tensor<float>(
109       default_interpreter->outputs()[0]);
110   float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
111       delegate_interpreter->outputs()[0]);
112 
113   for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
114     ASSERT_EQ(default_output_data[i], delegate_output_data[i]);
115   }
116 }
117 
CreateTfLiteModel() const118 std::vector<char> PadTester::CreateTfLiteModel() const {
119   flatbuffers::FlatBufferBuilder builder;
120   flatbuffers::Offset<OperatorCode> operator_code =
121       CreateOperatorCode(builder, BuiltinOperator_PAD);
122 
123   std::vector<int32_t> paddings(InputPrePaddings().size() +
124                                 InputPostPaddings().size());
125   for (size_t i = 0; i < InputPrePaddings().size(); i++) {
126     paddings[i * 2] = InputPrePaddings()[i];
127     paddings[i * 2 + 1] = InputPostPaddings()[i];
128   }
129   const std::array<flatbuffers::Offset<Buffer>, 2> buffers{{
130       CreateBuffer(builder, builder.CreateVector({})),
131       CreateBuffer(builder,
132                    builder.CreateVector(
133                        reinterpret_cast<const uint8_t*>(paddings.data()),
134                        sizeof(float) * paddings.size())),
135   }};
136 
137   const std::vector<int32_t> output_shape = OutputShape();
138   const std::array<int32_t, 2> paddings_shape{
139       {static_cast<int32_t>(InputPrePaddings().size()), 2}};
140   const std::array<flatbuffers::Offset<Tensor>, 3> tensors{{
141       CreateTensor(builder,
142                    builder.CreateVector<int32_t>(InputShape().data(),
143                                                  InputShape().size()),
144                    TensorType_FLOAT32),
145       CreateTensor(builder,
146                    builder.CreateVector<int32_t>(paddings_shape.data(),
147                                                  paddings_shape.size()),
148                    TensorType_INT32, /*buffer=*/1),
149       CreateTensor(builder,
150                    builder.CreateVector<int32_t>(output_shape.data(),
151                                                  output_shape.size()),
152                    TensorType_FLOAT32),
153   }};
154 
155   const std::array<int32_t, 2> op_inputs{{0, 1}};
156   const std::array<int32_t, 1> op_outputs{{2}};
157   flatbuffers::Offset<Operator> op = CreateOperator(
158       builder, /*opcode_index=*/0,
159       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
160       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
161 
162   const std::array<int32_t, 1> subgraph_inputs{{0}};
163   const std::array<int32_t, 1> subgraph_outputs{{2}};
164   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
165       builder, builder.CreateVector(tensors.data(), tensors.size()),
166       builder.CreateVector<int32_t>(subgraph_inputs.data(),
167                                     subgraph_inputs.size()),
168       builder.CreateVector<int32_t>(subgraph_outputs.data(),
169                                     subgraph_outputs.size()),
170       builder.CreateVector(&op, 1));
171 
172   flatbuffers::Offset<flatbuffers::String> description =
173       builder.CreateString("Pad model");
174 
175   flatbuffers::Offset<Model> model_buffer = CreateModel(
176       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
177       builder.CreateVector(&subgraph, 1), description,
178       builder.CreateVector(buffers.data(), buffers.size()));
179 
180   builder.Finish(model_buffer);
181 
182   return std::vector<char>(builder.GetBufferPointer(),
183                            builder.GetBufferPointer() + builder.GetSize());
184 }
185 
ComputeSize(const std::vector<int32_t> & shape)186 int32_t PadTester::ComputeSize(const std::vector<int32_t>& shape) {
187   return std::accumulate(shape.cbegin(), shape.cend(), 1,
188                          std::multiplies<int32_t>());
189 }
190 
191 }  // namespace xnnpack
192 }  // namespace tflite
193