• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for binary_op."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import tensorflow.compat.v1 as tf
21from tensorflow.lite.testing.zip_test_utils import create_tensor_data
22from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
23from tensorflow.lite.testing.zip_test_utils import register_make_test_function
24
25
26def make_binary_op_tests(options,
27                         binary_operator,
28                         allow_fully_quantize=False,
29                         expected_tf_failures=0,
30                         test_parameters=None):
31  """Make a set of tests to do binary ops with and without broadcast."""
32
33  if test_parameters is None:
34    test_parameters = []
35
36  test_parameters = test_parameters + [
37      # Avoid creating all combinations to keep the test size small.
38      {
39          "dtype": [tf.float32, tf.int32],
40          "input_shape_1": [[1, 3, 4, 3]],
41          "input_shape_2": [[1, 3, 4, 3]],
42          "activation": [True],
43          "fully_quantize": [False],
44          "dynamic_range_quantize": [False],
45      },
46      {
47          "dtype": [tf.float32],
48          "input_shape_1": [[5]],
49          "input_shape_2": [[5]],
50          "activation": [False, True],
51          "fully_quantize": [False],
52          "dynamic_range_quantize": [False],
53      },
54      {
55          "dtype": [tf.float32, tf.int32, tf.int64],
56          "input_shape_1": [[1, 3, 4, 3]],
57          "input_shape_2": [[3]],
58          "activation": [True, False],
59          "fully_quantize": [False],
60          "dynamic_range_quantize": [False],
61      },
62      {
63          "dtype": [tf.float32, tf.int32],
64          "input_shape_1": [[3]],
65          "input_shape_2": [[1, 3, 4, 3]],
66          "activation": [True, False],
67          "fully_quantize": [False],
68          "dynamic_range_quantize": [False],
69      },
70      {
71          "dtype": [tf.float32],
72          "input_shape_1": [[]],
73          "input_shape_2": [[]],
74          "activation": [False],
75          "fully_quantize": [False],
76          "dynamic_range_quantize": [False],
77      },
78      {
79          "dtype": [tf.float32],
80          "input_shape_1": [[0]],
81          "input_shape_2": [[1]],
82          "activation": [False],
83          "fully_quantize": [False],
84          "dynamic_range_quantize": [False],
85      },
86      {
87          "dtype": [tf.float32],
88          "input_shape_1": [[1, 3, 4, 3]],
89          "input_shape_2": [[1, 3, 4, 3]],
90          "activation": [False],
91          "fully_quantize": [True],
92          "dynamic_range_quantize": [False],
93      },
94      {
95          "dtype": [tf.float32],
96          "input_shape_1": [[5]],
97          "input_shape_2": [[5]],
98          "activation": [False],
99          "fully_quantize": [True],
100          "dynamic_range_quantize": [False],
101      },
102      {
103          "dtype": [tf.float32],
104          "input_shape_1": [[1, 3, 4, 3]],
105          "input_shape_2": [[3]],
106          "activation": [False],
107          "fully_quantize": [True],
108          "dynamic_range_quantize": [False],
109      },
110      {
111          "dtype": [tf.float32],
112          "input_shape_1": [[3]],
113          "input_shape_2": [[1, 3, 4, 3]],
114          "activation": [False],
115          "fully_quantize": [True],
116          "dynamic_range_quantize": [False],
117      },
118      {
119          "dtype": [tf.float32],
120          "input_shape_1": [[]],
121          "input_shape_2": [[]],
122          "activation": [False],
123          "fully_quantize": [True],
124          "dynamic_range_quantize": [False],
125      },
126      {
127          "dtype": [tf.float32],
128          "input_shape_1": [[1, 3, 4, 3]],
129          "input_shape_2": [[1, 3, 4, 3]],
130          "activation": [False],
131          "fully_quantize": [False],
132          "dynamic_range_quantize": [True],
133      },
134      {
135          "dtype": [tf.float32],
136          "input_shape_1": [[5]],
137          "input_shape_2": [[5]],
138          "activation": [False],
139          "fully_quantize": [False],
140          "dynamic_range_quantize": [True],
141      },
142      {
143          "dtype": [tf.float32],
144          "input_shape_1": [[1, 3, 4, 3]],
145          "input_shape_2": [[3]],
146          "activation": [False],
147          "fully_quantize": [False],
148          "dynamic_range_quantize": [True],
149      },
150      {
151          "dtype": [tf.float32],
152          "input_shape_1": [[3]],
153          "input_shape_2": [[1, 3, 4, 3]],
154          "activation": [False],
155          "fully_quantize": [False],
156          "dynamic_range_quantize": [True],
157      },
158      {
159          "dtype": [tf.float32],
160          "input_shape_1": [[]],
161          "input_shape_2": [[]],
162          "activation": [False],
163          "fully_quantize": [False],
164          "dynamic_range_quantize": [True],
165      },
166  ]
167
168  # float64 types are supported via flex only.
169  if options.run_with_flex and options.use_experimental_converter:
170    test_parameters = test_parameters + [
171        {
172            "dtype": [tf.float64],
173            "input_shape_1": [[7]],
174            "input_shape_2": [[7]],
175            "activation": [False],
176            "fully_quantize": [False],
177            "dynamic_range_quantize": [False],
178        },
179    ]
180
181  # High dimension broadcasting support in MLIR converter.
182  if options.use_experimental_converter:
183    test_parameters = test_parameters + [
184        {
185            "dtype": [tf.float32],
186            "input_shape_1": [[8, 7, 6, 5, 4, 3, 2, 1]],
187            "input_shape_2": [[4, 3, 2, 1]],
188            "activation": [False],
189            "fully_quantize": [False],
190            "dynamic_range_quantize": [False],
191        },
192    ]
193
194  # test_parameters include fully_quantize option only when
195  # allow_fully_quantize is True.
196  if not allow_fully_quantize:
197    test_parameters = [
198        test_parameter for test_parameter in test_parameters
199        if True not in test_parameter["fully_quantize"]
200    ]
201
202  def build_graph(parameters):
203    """Builds the graph given the current parameters."""
204    input1 = tf.compat.v1.placeholder(
205        dtype=parameters["dtype"],
206        name="input1",
207        shape=parameters["input_shape_1"])
208    input2 = tf.compat.v1.placeholder(
209        dtype=parameters["dtype"],
210        name="input2",
211        shape=parameters["input_shape_2"])
212    out = binary_operator(input1, input2)
213    # TODO(karimnosseir): Update condition after moving to new converter.
214    if parameters["activation"] and (not options.use_experimental_converter or
215                                     (parameters["dtype"] != tf.int32 and
216                                      parameters["dtype"] != tf.int64)):
217      out = tf.nn.relu(out)
218    return [input1, input2], [out]
219
220  def build_inputs(parameters, sess, inputs, outputs):
221    """Builds operand inputs for op."""
222    if allow_fully_quantize:
223      input1 = create_tensor_data(
224          parameters["dtype"],
225          parameters["input_shape_1"],
226          min_value=-1,
227          max_value=1)
228      input2 = create_tensor_data(
229          parameters["dtype"],
230          parameters["input_shape_2"],
231          min_value=-1,
232          max_value=1)
233    else:
234      input1 = create_tensor_data(parameters["dtype"],
235                                  parameters["input_shape_1"])
236      input2 = create_tensor_data(parameters["dtype"],
237                                  parameters["input_shape_2"])
238    return [input1, input2], sess.run(
239        outputs, feed_dict={
240            inputs[0]: input1,
241            inputs[1]: input2
242        })
243
244  make_zip_of_tests(
245      options,
246      test_parameters,
247      build_graph,
248      build_inputs,
249      expected_tf_failures=expected_tf_failures)
250
251
252def make_binary_op_tests_func(binary_operator):
253  """Return a function that does a test on a binary operator."""
254  return lambda options: make_binary_op_tests(options, binary_operator)
255
256
257@register_make_test_function()
258def make_add_tests(options):
259  make_binary_op_tests(options, tf.add, allow_fully_quantize=True)
260
261
262@register_make_test_function()
263def make_div_tests(options):
264  """Make zip tests for div op with 5D case."""
265  test_parameters = [
266      {
267          "dtype": [tf.float32],
268          "input_shape_1": [[1, 3, 3, 3, 3]],
269          "input_shape_2": [[3]],
270          "activation": [False],
271          "fully_quantize": [False],
272          "dynamic_range_quantize": [False, True],
273      },
274  ]
275  make_binary_op_tests(
276      options, tf.compat.v1.div, test_parameters=test_parameters)
277
278
279@register_make_test_function()
280def make_sub_tests(options):
281  """Make zip tests for sub op with additional cases."""
282  test_parameters = [
283      {
284          "dtype": [tf.float32],
285          "input_shape_1": [[1, 3, 3, 3, 3]],
286          "input_shape_2": [[3]],
287          "activation": [False],
288          "fully_quantize": [False],
289          "dynamic_range_quantize": [False, True],
290      },
291  ]
292  make_binary_op_tests(
293      options,
294      tf.subtract,
295      allow_fully_quantize=True,
296      test_parameters=test_parameters)
297
298
299@register_make_test_function()
300def make_mul_tests(options):
301  make_binary_op_tests(options, tf.multiply, allow_fully_quantize=True)
302
303
304@register_make_test_function()
305def make_pow_tests(options):
306  make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
307
308
309@register_make_test_function()
310def make_floor_div_tests(options):
311  make_binary_op_tests(options, tf.math.floordiv)
312
313
314@register_make_test_function()
315def make_floor_mod_tests(options):
316  make_binary_op_tests(options, tf.math.floormod)
317
318
319@register_make_test_function()
320def make_squared_difference_tests(options):
321  make_binary_op_tests(options, tf.math.squared_difference,
322                       allow_fully_quantize=True)
323