1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #define EIGEN_USE_THREADS
17
18 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
19 #include "tensorflow/core/framework/fake_input.h"
20 #include "tensorflow/core/framework/node_def_builder.h"
21 #include "tensorflow/core/framework/tensor.h"
22 #include "tensorflow/core/framework/tensor_testutil.h"
23 #include "tensorflow/core/framework/types.h"
24 #include "tensorflow/core/framework/types.pb.h"
25 #include "tensorflow/core/kernels/batch_norm_op.h"
26 #include "tensorflow/core/kernels/ops_testutil.h"
27 #include "tensorflow/core/kernels/quantization_utils.h"
28 #include "tensorflow/core/lib/core/status_test_util.h"
29 #include "tensorflow/core/lib/core/threadpool.h"
30 #include "tensorflow/core/platform/test.h"
31
32 namespace tensorflow {
33
34 using QuantizedBatchNormOpTest = OpsTestBase;
35
TEST_F(QuantizedBatchNormOpTest,Simple)36 TEST_F(QuantizedBatchNormOpTest, Simple) {
37 TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op",
38 "QuantizedBatchNormWithGlobalNormalization")
39 .Input(FakeInput(DT_QUINT8))
40 .Input(FakeInput(DT_FLOAT))
41 .Input(FakeInput(DT_FLOAT))
42 .Input(FakeInput(DT_QUINT8))
43 .Input(FakeInput(DT_FLOAT))
44 .Input(FakeInput(DT_FLOAT))
45 .Input(FakeInput(DT_QUINT8))
46 .Input(FakeInput(DT_FLOAT))
47 .Input(FakeInput(DT_FLOAT))
48 .Input(FakeInput(DT_QUINT8))
49 .Input(FakeInput(DT_FLOAT))
50 .Input(FakeInput(DT_FLOAT))
51 .Input(FakeInput(DT_QUINT8))
52 .Input(FakeInput(DT_FLOAT))
53 .Input(FakeInput(DT_FLOAT))
54 .Attr("scale_after_normalization", false)
55 .Attr("variance_epsilon", 0.001)
56 .Attr("Tinput", DT_QUINT8)
57 .Attr("out_type", DT_QINT32)
58 .Finalize(node_def()));
59 TF_ASSERT_OK(InitOp());
60 const float input_min = -128.0f;
61 const float input_max = 127.0f;
62 const int input_batch = 1;
63 const int input_height = 1;
64 const int input_width = 6;
65 const int input_depth = 2;
66 Tensor input_float(DT_FLOAT,
67 {input_batch, input_height, input_width, input_depth});
68 test::FillValues<float>(&input_float,
69 {1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
70 Tensor input_quantized =
71 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
72 const float mean_min = 0.0f;
73 const float mean_max = 20.0f;
74 Tensor mean_float(DT_FLOAT, {input_depth});
75 test::FillValues<float>(&mean_float, {10, 20});
76 Tensor mean_quantized =
77 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max);
78 const float variance_min = 0.0f;
79 const float variance_max = 1.0f;
80 Tensor variance_float(DT_FLOAT, {input_depth});
81 test::FillValues<float>(&variance_float, {0.25, 0.5});
82 Tensor variance_quantized = FloatTensorToQuantized<quint8>(
83 variance_float, variance_min, variance_max);
84 const float beta_min = 0.0f;
85 const float beta_max = 1.0f;
86 Tensor beta_float(DT_FLOAT, {input_depth});
87 test::FillValues<float>(&beta_float, {0.1, 0.6});
88 Tensor beta_quantized =
89 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max);
90 const float gamma_min = 0.0f;
91 const float gamma_max = 1.0f;
92 Tensor gamma_float(DT_FLOAT, {input_depth});
93 test::FillValues<float>(&gamma_float, {0.0, 0.0});
94 Tensor gamma_quantized =
95 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max);
96
97 AddInputFromArray<quint8>(input_quantized.shape(),
98 input_quantized.flat<quint8>());
99 AddInputFromArray<float>(TensorShape({1}), {input_min});
100 AddInputFromArray<float>(TensorShape({1}), {input_max});
101 AddInputFromArray<quint8>(mean_quantized.shape(),
102 mean_quantized.flat<quint8>());
103 AddInputFromArray<float>(TensorShape({1}), {mean_min});
104 AddInputFromArray<float>(TensorShape({1}), {mean_max});
105 AddInputFromArray<quint8>(variance_quantized.shape(),
106 variance_quantized.flat<quint8>());
107 AddInputFromArray<float>(TensorShape({1}), {variance_min});
108 AddInputFromArray<float>(TensorShape({1}), {variance_max});
109 AddInputFromArray<quint8>(beta_quantized.shape(),
110 beta_quantized.flat<quint8>());
111 AddInputFromArray<float>(TensorShape({1}), {beta_min});
112 AddInputFromArray<float>(TensorShape({1}), {beta_max});
113 AddInputFromArray<quint8>(gamma_quantized.shape(),
114 gamma_quantized.flat<quint8>());
115 AddInputFromArray<float>(TensorShape({1}), {gamma_min});
116 AddInputFromArray<float>(TensorShape({1}), {gamma_max});
117 TF_ASSERT_OK(RunOpKernel());
118
119 Tensor expected_float(
120 allocator(), DT_FLOAT,
121 TensorShape({input_batch, input_height, input_width, input_depth}));
122 test::FillValues<float>(
123 &expected_float, {-17.86, -22.00, -15.87, -20.59, -13.87, -19.18, -21.86,
124 -33.31, -23.85, -34.72, -25.85, -36.13});
125 const Tensor& output_quantized = *GetOutput(0);
126 const float output_min = GetOutput(1)->flat<float>()(0);
127 const float output_max = GetOutput(2)->flat<float>()(0);
128 Tensor output_float =
129 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
130 test::ExpectTensorNear<float>(expected_float, output_float, 0.1);
131 }
132
TEST_F(QuantizedBatchNormOpTest,SameAsFloat)133 TEST_F(QuantizedBatchNormOpTest, SameAsFloat) {
134 TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op",
135 "QuantizedBatchNormWithGlobalNormalization")
136 .Input(FakeInput(DT_QUINT8))
137 .Input(FakeInput(DT_FLOAT))
138 .Input(FakeInput(DT_FLOAT))
139 .Input(FakeInput(DT_QUINT8))
140 .Input(FakeInput(DT_FLOAT))
141 .Input(FakeInput(DT_FLOAT))
142 .Input(FakeInput(DT_QUINT8))
143 .Input(FakeInput(DT_FLOAT))
144 .Input(FakeInput(DT_FLOAT))
145 .Input(FakeInput(DT_QUINT8))
146 .Input(FakeInput(DT_FLOAT))
147 .Input(FakeInput(DT_FLOAT))
148 .Input(FakeInput(DT_QUINT8))
149 .Input(FakeInput(DT_FLOAT))
150 .Input(FakeInput(DT_FLOAT))
151 .Attr("scale_after_normalization", false)
152 .Attr("variance_epsilon", 0.001)
153 .Attr("Tinput", DT_QUINT8)
154 .Attr("out_type", DT_QINT32)
155 .Finalize(node_def()));
156 TF_ASSERT_OK(InitOp());
157 const float input_min = -128.0f;
158 const float input_max = 127.0f;
159 const int input_batch = 1;
160 const int input_height = 1;
161 const int input_width = 6;
162 const int input_depth = 2;
163 Tensor input_float(DT_FLOAT,
164 {input_batch, input_height, input_width, input_depth});
165 test::FillValues<float>(&input_float,
166 {1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
167 Tensor input_quantized =
168 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
169 const float mean_min = 0.0f;
170 const float mean_max = 20.0f;
171 Tensor mean_float(DT_FLOAT, {input_depth});
172 test::FillValues<float>(&mean_float, {10, 20});
173 Tensor mean_quantized =
174 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max);
175 const float variance_min = 0.0f;
176 const float variance_max = 1.0f;
177 Tensor variance_float(DT_FLOAT, {input_depth});
178 test::FillValues<float>(&variance_float, {0.25, 0.5});
179 Tensor variance_quantized = FloatTensorToQuantized<quint8>(
180 variance_float, variance_min, variance_max);
181 const float beta_min = 0.0f;
182 const float beta_max = 1.0f;
183 Tensor beta_float(DT_FLOAT, {input_depth});
184 test::FillValues<float>(&beta_float, {0.1, 0.6});
185 Tensor beta_quantized =
186 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max);
187 const float gamma_min = 0.0f;
188 const float gamma_max = 1.0f;
189 Tensor gamma_float(DT_FLOAT, {input_depth});
190 test::FillValues<float>(&gamma_float, {0.0, 0.0});
191 Tensor gamma_quantized =
192 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max);
193
194 AddInputFromArray<quint8>(input_quantized.shape(),
195 input_quantized.flat<quint8>());
196 AddInputFromArray<float>(TensorShape({1}), {input_min});
197 AddInputFromArray<float>(TensorShape({1}), {input_max});
198 AddInputFromArray<quint8>(mean_quantized.shape(),
199 mean_quantized.flat<quint8>());
200 AddInputFromArray<float>(TensorShape({1}), {mean_min});
201 AddInputFromArray<float>(TensorShape({1}), {mean_max});
202 AddInputFromArray<quint8>(variance_quantized.shape(),
203 variance_quantized.flat<quint8>());
204 AddInputFromArray<float>(TensorShape({1}), {variance_min});
205 AddInputFromArray<float>(TensorShape({1}), {variance_max});
206 AddInputFromArray<quint8>(beta_quantized.shape(),
207 beta_quantized.flat<quint8>());
208 AddInputFromArray<float>(TensorShape({1}), {beta_min});
209 AddInputFromArray<float>(TensorShape({1}), {beta_max});
210 AddInputFromArray<quint8>(gamma_quantized.shape(),
211 gamma_quantized.flat<quint8>());
212 AddInputFromArray<float>(TensorShape({1}), {gamma_min});
213 AddInputFromArray<float>(TensorShape({1}), {gamma_max});
214 TF_ASSERT_OK(RunOpKernel());
215
216 Tensor expected_float(
217 allocator(), DT_FLOAT,
218 TensorShape({input_batch, input_height, input_width, input_depth}));
219 thread::ThreadPool threadpool(Env::Default(), "test", 1);
220 Eigen::ThreadPoolDevice eigen_cpu_device(threadpool.AsEigenThreadPool(), 1);
221 const Tensor& const_input_float = input_float;
222 const Tensor& const_mean_float = mean_float;
223 const Tensor& const_variance_float = variance_float;
224 const Tensor& const_beta_float = beta_float;
225 const Tensor& const_gamma_float = gamma_float;
226 functor::BatchNorm<Eigen::ThreadPoolDevice, float>()(
227 eigen_cpu_device, const_input_float.tensor<float, 4>(),
228 const_mean_float.vec<float>(), const_variance_float.vec<float>(),
229 const_beta_float.vec<float>(), const_gamma_float.vec<float>(), 0.001,
230 false, expected_float.tensor<float, 4>());
231
232 const Tensor& output_quantized = *GetOutput(0);
233 const float output_min = GetOutput(1)->flat<float>()(0);
234 const float output_max = GetOutput(2)->flat<float>()(0);
235 Tensor output_float =
236 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
237 test::ExpectTensorNear<float>(expected_float, output_float, 0.1);
238 }
239
240 } // namespace tensorflow
241