1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <algorithm>
16 #include <cmath>
17 #include <vector>
18
19 #include <gtest/gtest.h>
20 #include "tensorflow/lite/kernels/internal/common.h"
21 #include "tensorflow/lite/kernels/internal/test_util.h"
22 #include "tensorflow/lite/kernels/internal/types.h"
23
24 #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
25 #include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
26 #include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h"
27 #include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
28
29 namespace tflite {
30 namespace {
31
32 // Runs the DepthwiseConv and compares against the reference implementation.
TestOneDepthwiseConv(const DepthwiseParams & params,const RuntimeShape & input_shape,const float * input_data,const RuntimeShape & filter_shape,const float * filter_data,const RuntimeShape & bias_shape,const float * bias_data,const RuntimeShape & output_shape)33 void TestOneDepthwiseConv(
34 const DepthwiseParams& params, const RuntimeShape& input_shape,
35 const float* input_data, const RuntimeShape& filter_shape,
36 const float* filter_data, const RuntimeShape& bias_shape,
37 const float* bias_data, const RuntimeShape& output_shape) {
38 const int output_buffer_size = output_shape.FlatSize();
39 std::vector<float> output_data(output_buffer_size);
40 std::vector<float> reference_output_data(output_buffer_size);
41 reference_ops::DepthwiseConv(params, input_shape, input_data, filter_shape,
42 filter_data, bias_shape, bias_data, output_shape,
43 reference_output_data.data());
44 optimized_ops::DepthwiseConvImpl(
45 params, input_shape, input_data, filter_shape, filter_data, bias_shape,
46 bias_data, output_shape, output_data.data(), CpuFlags(),
47 /*thread_start=*/0,
48 /*thread_end=*/output_shape.Dims(1), /*thread_dim=*/1);
49
50 double sum_abs_diff = 0;
51 float max_abs_val = 0;
52 for (int i = 0; i < output_buffer_size; i++) {
53 sum_abs_diff += std::abs(output_data[i] - reference_output_data[i]);
54 max_abs_val = std::max(max_abs_val, std::abs(reference_output_data[i]));
55 }
56 if (sum_abs_diff != 0.f) {
57 const float mean_diff =
58 static_cast<float>(sum_abs_diff / output_buffer_size);
59 const float relative_error = std::abs(mean_diff) / max_abs_val;
60 ASSERT_LT(relative_error, 1e-5f);
61 }
62 }
63
64 // This function picks some random DepthwiseConv params, which may or may not
65 // be legal. If they're not legal, it returns false. If they're legal,
66 // it runs the DepthwiseConv test and returns true. This allows the caller
67 // to loop until a test has been run.
TryTestOneDepthwiseConv()68 bool TryTestOneDepthwiseConv() {
69 // We have to pick a lot of positive values, where we are particularly
70 // interested in small values because they are most likely to be special
71 // cases in optimized implementations, and secondarily because they allow
72 // tests to run fast, which means we can run more tests and get more
73 // coverage.
74 const int batch = UniformRandomInt(1, 2);
75 const int input_depth = ExponentialRandomPositiveInt(0.9f, 6, 50);
76 const int input_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
77 const int input_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
78 const int filter_width = ExponentialRandomPositiveInt(0.9f, 4, 10);
79 const int filter_height = ExponentialRandomPositiveInt(0.9f, 4, 10);
80 const int depth_multiplier = ExponentialRandomPositiveInt(0.8f, 6, 50);
81 const int stride = ExponentialRandomPositiveInt(0.9f, 3, 8);
82 const int output_depth = input_depth * depth_multiplier;
83 const int dilation_width_factor = RandomElement(std::vector<int>({1, 2, 4}));
84 const int dilation_height_factor = RandomElement(std::vector<int>({1, 2, 4}));
85 float output_activation_min, output_activation_max;
86 FusedActivationFunctionType ac =
87 RandomElement(std::vector<FusedActivationFunctionType>(
88 {FusedActivationFunctionType::kNone,
89 FusedActivationFunctionType::kRelu,
90 FusedActivationFunctionType::kRelu1,
91 FusedActivationFunctionType::kRelu6}));
92 GetActivationMinMax(ac, &output_activation_min, &output_activation_max);
93 // The optimized DepthwiseConv implementation currently uses a fixed-size
94 // accumulator buffer on the stack, with that size. This currently means
95 // that it does not support larger output depths. It CHECK's for it,
96 // so it's safe in the sense that if a larger output depth was encountered,
97 // it would explicitly fail. We just need to adjust our testing to that
98 // constraint.
99 const int kMaxSupportedOutputDepth = 1024;
100 if (output_depth > kMaxSupportedOutputDepth) {
101 return false;
102 }
103 RuntimeShape input_shape_inference(
104 {batch, input_height, input_width, input_depth});
105 RuntimeShape output_shape_inference;
106 int pad_width, pad_height;
107 const auto padding_type =
108 UniformRandomInt(0, 1) ? PaddingType::kSame : PaddingType::kValid;
109 if (!ComputeConvSizes(input_shape_inference, output_depth, filter_width,
110 filter_height, stride, dilation_width_factor,
111 dilation_height_factor, padding_type,
112 &output_shape_inference, &pad_width, &pad_height)) {
113 return false;
114 }
115 RuntimeShape filter_shape_inference(
116 {1, filter_height, filter_width, output_depth});
117 RuntimeShape bias_shape_inference({1, 1, 1, output_depth});
118 const int input_buffer_size = input_shape_inference.FlatSize();
119 const int filter_buffer_size = filter_shape_inference.FlatSize();
120 std::vector<float> input_data(input_buffer_size);
121 std::vector<float> filter_data(filter_buffer_size);
122 std::vector<float> bias_data(output_depth);
123 const float input_amplitude = 1.f;
124 const float filter_amplitude = 1.f;
125 const float bias_amplitude =
126 filter_width * filter_height * input_amplitude * filter_amplitude;
127 FillRandom(&input_data, -input_amplitude, input_amplitude);
128 FillRandom(&filter_data, -filter_amplitude, filter_amplitude);
129 FillRandom(&bias_data, -bias_amplitude, bias_amplitude);
130 DepthwiseParams op_params;
131 op_params.padding_type = PaddingType::kSame;
132 op_params.padding_values.width = pad_width;
133 op_params.padding_values.height = pad_height;
134 op_params.stride_width = stride;
135 op_params.stride_height = stride;
136 op_params.dilation_width_factor = dilation_width_factor;
137 op_params.dilation_height_factor = dilation_height_factor;
138 op_params.depth_multiplier = depth_multiplier;
139 op_params.float_activation_min = output_activation_min;
140 op_params.float_activation_max = output_activation_max;
141 TestOneDepthwiseConv(op_params, input_shape_inference, input_data.data(),
142 filter_shape_inference, filter_data.data(),
143 bias_shape_inference, bias_data.data(),
144 output_shape_inference);
145 return true;
146 }
147
TestOneDepthwiseConv()148 void TestOneDepthwiseConv() {
149 while (!TryTestOneDepthwiseConv()) {
150 }
151 }
152
TEST(TestDepthwiseConv,TestDepthwiseConv)153 TEST(TestDepthwiseConv, TestDepthwiseConv) {
154 const int kTestsToRun = 10 * 1000;
155 for (int i = 0; i < kTestsToRun; i++) {
156 TestOneDepthwiseConv();
157 }
158 }
159 } // namespace
160 } // namespace tflite
161