1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/delegates/xnnpack/softmax_tester.h"
17
18 #include <algorithm>
19 #include <array>
20 #include <cstdint>
21 #include <functional>
22 #include <numeric>
23 #include <random>
24 #include <vector>
25
26 #include <gtest/gtest.h>
27 #include "flatbuffers/flatbuffers.h" // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34
35 namespace tflite {
36 namespace xnnpack {
37
Test(TfLiteDelegate * delegate) const38 void SoftmaxTester::Test(TfLiteDelegate* delegate) const {
39 std::random_device random_device;
40 auto rng = std::mt19937(random_device());
41 auto input_rng = std::bind(
42 std::uniform_real_distribution<float>(-15.0f, 15.0f), std::ref(rng));
43
44 std::vector<char> buffer = CreateTfLiteModel();
45 const Model* model = GetModel(buffer.data());
46
47 std::unique_ptr<Interpreter> delegate_interpreter;
48 ASSERT_EQ(
49 InterpreterBuilder(
50 model,
51 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
52 &delegate_interpreter),
53 kTfLiteOk);
54 std::unique_ptr<Interpreter> default_interpreter;
55 ASSERT_EQ(
56 InterpreterBuilder(
57 model,
58 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
59 &default_interpreter),
60 kTfLiteOk);
61
62 ASSERT_TRUE(delegate_interpreter);
63 ASSERT_TRUE(default_interpreter);
64
65 ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
66 ASSERT_EQ(default_interpreter->inputs().size(), 1);
67
68 ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
69 ASSERT_EQ(default_interpreter->outputs().size(), 1);
70
71 ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
72 ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
73
74 ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
75
76 float* default_input_data = default_interpreter->typed_input_tensor<float>(0);
77 std::generate(default_input_data, default_input_data + Size(),
78 std::ref(input_rng));
79
80 float* delegate_input_data =
81 delegate_interpreter->typed_input_tensor<float>(0);
82 std::copy(default_input_data, default_input_data + Size(),
83 delegate_input_data);
84
85 ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
86 ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
87
88 float* default_output_data =
89 default_interpreter->typed_output_tensor<float>(0);
90 float* delegate_output_data =
91 delegate_interpreter->typed_output_tensor<float>(0);
92
93 for (size_t i = 0; i < Size(); i++) {
94 ASSERT_NEAR(default_output_data[i], delegate_output_data[i],
95 std::numeric_limits<float>::epsilon() *
96 std::max(std::abs(default_output_data[i]) * 10.0f, 1.0f));
97 }
98 }
99
CreateTfLiteModel() const100 std::vector<char> SoftmaxTester::CreateTfLiteModel() const {
101 flatbuffers::FlatBufferBuilder builder;
102 flatbuffers::Offset<OperatorCode> operator_code =
103 CreateOperatorCode(builder, BuiltinOperator_SOFTMAX);
104
105 const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
106 CreateBuffer(builder, builder.CreateVector({})),
107 }};
108
109 const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
110 CreateTensor(
111 builder,
112 builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
113 TensorType_FLOAT32),
114 CreateTensor(
115 builder,
116 builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
117 TensorType_FLOAT32),
118 }};
119
120 flatbuffers::Offset<SoftmaxOptions> softmax_options =
121 CreateSoftmaxOptions(builder, Beta());
122
123 const std::array<int32_t, 1> op_inputs{{0}};
124 const std::array<int32_t, 1> op_outputs{{1}};
125 flatbuffers::Offset<Operator> op = CreateOperator(
126 builder, /*opcode_index=*/0,
127 builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
128 builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
129 BuiltinOptions_SoftmaxOptions, softmax_options.Union());
130
131 const std::array<int32_t, 1> subgraph_inputs{{0}};
132 const std::array<int32_t, 1> subgraph_outputs{{1}};
133 flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
134 builder, builder.CreateVector(tensors.data(), tensors.size()),
135 builder.CreateVector<int32_t>(subgraph_inputs.data(),
136 subgraph_inputs.size()),
137 builder.CreateVector<int32_t>(subgraph_outputs.data(),
138 subgraph_outputs.size()),
139 builder.CreateVector(&op, 1));
140
141 flatbuffers::Offset<flatbuffers::String> description =
142 builder.CreateString("Softmax model");
143
144 flatbuffers::Offset<Model> model_buffer = CreateModel(
145 builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
146 builder.CreateVector(&subgraph, 1), description,
147 builder.CreateVector(buffers.data(), buffers.size()));
148
149 builder.Finish(model_buffer);
150
151 return std::vector<char>(builder.GetBufferPointer(),
152 builder.GetBufferPointer() + builder.GetSize());
153 }
154
ComputeSize(const std::vector<int32_t> & shape)155 int32_t SoftmaxTester::ComputeSize(const std::vector<int32_t>& shape) {
156 return std::accumulate(shape.cbegin(), shape.cend(), 1,
157 std::multiplies<int32_t>());
158 }
159
160 } // namespace xnnpack
161 } // namespace tflite
162