1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <sys/mman.h>
16
17 #include <gtest/gtest.h>
18 #include "tensorflow/lite/c/common.h"
19 #include "tensorflow/lite/core/subgraph.h"
20 #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
21 #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_mock_test.h"
22 #include "tensorflow/lite/interpreter.h"
23 #include "tensorflow/lite/kernels/test_util.h"
24 #include "tensorflow/lite/model.h"
25 #include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
26 #include "tensorflow/lite/nnapi/nnapi_implementation.h"
27
28 namespace tflite {
29 namespace {
30
31 class SingleOpModelWithNNAPI : public SingleOpModel {
32 public:
SingleOpModelWithNNAPI(const NnApi * nnapi)33 explicit SingleOpModelWithNNAPI(const NnApi* nnapi) {
34 options_.disallow_nnapi_cpu = false;
35 stateful_delegate_.reset(new StatefulNnApiDelegate(nnapi, options_));
36 this->SetDelegate(stateful_delegate_.get());
37 }
38
GetDelegate()39 StatefulNnApiDelegate* GetDelegate() { return stateful_delegate_.get(); }
40
SetBufferHandle(int index,TfLiteBufferHandle handle)41 void SetBufferHandle(int index, TfLiteBufferHandle handle) {
42 interpreter_->SetBufferHandle(index, handle, stateful_delegate_.get());
43 }
44
45 private:
46 std::unique_ptr<StatefulNnApiDelegate> stateful_delegate_;
47 StatefulNnApiDelegate::Options options_;
48 };
49
50 class FloatAddOpModel : public SingleOpModelWithNNAPI {
51 public:
FloatAddOpModel(const NnApi * nnapi,const TensorData & input1,const TensorData & input2,const TensorData & output,ActivationFunctionType activation_type,bool allow_fp32_relax_to_fp16=false)52 FloatAddOpModel(const NnApi* nnapi, const TensorData& input1,
53 const TensorData& input2, const TensorData& output,
54 ActivationFunctionType activation_type,
55 bool allow_fp32_relax_to_fp16 = false)
56 : SingleOpModelWithNNAPI(nnapi) {
57 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
58 }
59
input1()60 int input1() { return input1_; }
input2()61 int input2() { return input2_; }
62
GetOutput()63 std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
64
65 protected:
66 int input1_;
67 int input2_;
68 int output_;
69
70 private:
71 // Performs initialization logic shared across all constructors.
Init(const TensorData & input1,const TensorData & input2,const TensorData & output,ActivationFunctionType activation_type,bool allow_fp32_relax_to_fp16=false)72 void Init(const TensorData& input1, const TensorData& input2,
73 const TensorData& output, ActivationFunctionType activation_type,
74 bool allow_fp32_relax_to_fp16 = false) {
75 input1_ = AddInput(input1);
76 input2_ = AddInput(input2);
77 output_ = AddOutput(output);
78 SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
79 CreateAddOptions(builder_, activation_type).Union());
80 BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
81 allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
82 }
83 };
84
85 struct NnApiErrnoTest : ::tflite::delegate::nnapi::NnApiDelegateMockTest {};
86
TEST_F(NnApiErrnoTest,IsZeroWhenNoErrorOccurs)87 TEST_F(NnApiErrnoTest, IsZeroWhenNoErrorOccurs) {
88 FloatAddOpModel m(nnapi_mock_->GetNnApi(), {TensorType_FLOAT32, {1, 2, 2, 1}},
89 {TensorType_FLOAT32, {1, 2, 2, 1}},
90 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
91 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
92 m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
93 m.Invoke();
94
95 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), 0);
96 }
97
TEST_F(NnApiErrnoTest,HasTheStatusOfTheNnApiCallFailedCallingInit)98 TEST_F(NnApiErrnoTest, HasTheStatusOfTheNnApiCallFailedCallingInit) {
99 nnapi_mock_->ExecutionCreateReturns<8>();
100
101 FloatAddOpModel m(nnapi_mock_->GetNnApi(), {TensorType_FLOAT32, {1, 2, 2, 1}},
102 {TensorType_FLOAT32, {1, 2, 2, 1}},
103 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
104
105 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
106 m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
107
108 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteError);
109 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), 8);
110 }
111
TEST_F(NnApiErrnoTest,HasTheStatusOfTheNnApiCallFailedCallingInvoke)112 TEST_F(NnApiErrnoTest, HasTheStatusOfTheNnApiCallFailedCallingInvoke) {
113 nnapi_mock_->ModelFinishReturns<-4>();
114
115 FloatAddOpModel m(nnapi_mock_->GetNnApi(), {TensorType_FLOAT32, {1, 2, 2, 1}},
116 {TensorType_FLOAT32, {1, 2, 2, 1}},
117 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
118
119 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
120 m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
121
122 // Failure is detected and the delegate is disabled.
123 // Execution runs without it and succeeds
124 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteOk);
125 // The delegate should store the value of the failure
126 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), -4);
127 }
128
TEST_F(NnApiErrnoTest,ErrnoIsResetWhenRestoringDelegateForModel)129 TEST_F(NnApiErrnoTest, ErrnoIsResetWhenRestoringDelegateForModel) {
130 nnapi_mock_->ModelFinishReturns<-4>();
131
132 FloatAddOpModel m(nnapi_mock_->GetNnApi(), {TensorType_FLOAT32, {1, 2, 2, 1}},
133 {TensorType_FLOAT32, {1, 2, 2, 1}},
134 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
135
136 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
137 m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
138
139 // Failure is detected and the delegate is disabled.
140 // Execution runs without it and succeeds
141 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteOk);
142 // The delegate should store the value of the failure
143 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), -4);
144
145 nnapi_mock_->ModelFinishReturns<0>();
146
147 // Need to restore the delegate since it was disabled because of the
148 // previous failure.
149 m.ApplyDelegate();
150 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteOk);
151
152 // The error is still the last one recorded
153 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), 0);
154 }
155
TEST_F(NnApiErrnoTest,ErrnoIsUpdatedInCaseOfAnotherFailure)156 TEST_F(NnApiErrnoTest, ErrnoIsUpdatedInCaseOfAnotherFailure) {
157 nnapi_mock_->ModelFinishReturns<-4>();
158
159 FloatAddOpModel m(nnapi_mock_->GetNnApi(), {TensorType_FLOAT32, {1, 2, 2, 1}},
160 {TensorType_FLOAT32, {1, 2, 2, 1}},
161 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
162
163 m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
164 m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
165
166 // Failure is detected and the delegate is disabled.
167 // Execution runs without it and succeeds
168 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteOk);
169 // The delegate should store the value of the failure
170 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), -4);
171
172 nnapi_mock_->ModelFinishReturns<-5>();
173
174 // Need to restore the delegate since it was disabled because of the
175 // previous failure.
176 m.ApplyDelegate();
177 EXPECT_EQ(m.InvokeUnchecked(), kTfLiteOk);
178
179 // The error is still the last one recorded
180 EXPECT_EQ(m.GetDelegate()->GetNnApiErrno(), -5);
181 }
182
183 } // namespace
184 } // namespace tflite
185