• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for binary_op."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import tensorflow.compat.v1 as tf
21from tensorflow.lite.testing.zip_test_utils import create_tensor_data
22from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
23from tensorflow.lite.testing.zip_test_utils import register_make_test_function
24
25
26def make_binary_op_tests(options,
27                         binary_operator,
28                         allow_fully_quantize=False,
29                         expected_tf_failures=0,
30                         test_parameters=None):
31  """Make a set of tests to do binary ops with and without broadcast."""
32
33  if test_parameters is None:
34    test_parameters = []
35
36  test_parameters = test_parameters + [
37      # Avoid creating all combinations to keep the test size small.
38      {
39          "dtype": [tf.float32, tf.int32],
40          "input_shape_1": [[1, 3, 4, 3]],
41          "input_shape_2": [[1, 3, 4, 3]],
42          "activation": [True],
43          "fully_quantize": [False],
44          "dynamic_range_quantize": [False],
45      },
46      {
47          "dtype": [tf.float32],
48          "input_shape_1": [[5]],
49          "input_shape_2": [[5]],
50          "activation": [False, True],
51          "fully_quantize": [False],
52          "dynamic_range_quantize": [False],
53      },
54      {
55          "dtype": [tf.float32, tf.int32, tf.int64],
56          "input_shape_1": [[1, 3, 4, 3]],
57          "input_shape_2": [[3]],
58          "activation": [True, False],
59          "fully_quantize": [False],
60          "dynamic_range_quantize": [False],
61      },
62      {
63          "dtype": [tf.float32, tf.int32],
64          "input_shape_1": [[3]],
65          "input_shape_2": [[1, 3, 4, 3]],
66          "activation": [True, False],
67          "fully_quantize": [False],
68          "dynamic_range_quantize": [False],
69      },
70      {
71          "dtype": [tf.float32],
72          "input_shape_1": [[]],
73          "input_shape_2": [[]],
74          "activation": [False],
75          "fully_quantize": [False],
76          "dynamic_range_quantize": [False],
77      },
78      {
79          "dtype": [tf.float32],
80          "input_shape_1": [[0]],
81          "input_shape_2": [[1]],
82          "activation": [False],
83          "fully_quantize": [False],
84          "dynamic_range_quantize": [False],
85      },
86      {
87          "dtype": [tf.float32],
88          "input_shape_1": [[1]],
89          "input_shape_2": [[0]],
90          "activation": [False],
91          "fully_quantize": [False],
92          "dynamic_range_quantize": [False],
93      },
94      {
95          "dtype": [tf.float32],
96          "input_shape_1": [[1, 3, 4, 3]],
97          "input_shape_2": [[1, 3, 4, 3]],
98          "activation": [False],
99          "fully_quantize": [True],
100          "dynamic_range_quantize": [False],
101      },
102      {
103          "dtype": [tf.float32],
104          "input_shape_1": [[5]],
105          "input_shape_2": [[5]],
106          "activation": [False],
107          "fully_quantize": [True],
108          "dynamic_range_quantize": [False],
109      },
110      {
111          "dtype": [tf.float32],
112          "input_shape_1": [[1, 3, 4, 3]],
113          "input_shape_2": [[3]],
114          "activation": [False],
115          "fully_quantize": [True],
116          "dynamic_range_quantize": [False],
117      },
118      {
119          "dtype": [tf.float32],
120          "input_shape_1": [[3]],
121          "input_shape_2": [[1, 3, 4, 3]],
122          "activation": [False],
123          "fully_quantize": [True],
124          "dynamic_range_quantize": [False],
125      },
126      {
127          "dtype": [tf.float32],
128          "input_shape_1": [[]],
129          "input_shape_2": [[]],
130          "activation": [False],
131          "fully_quantize": [True],
132          "dynamic_range_quantize": [False],
133      },
134      {
135          "dtype": [tf.float32],
136          "input_shape_1": [[1, 3, 4, 3]],
137          "input_shape_2": [[1, 3, 4, 3]],
138          "activation": [False],
139          "fully_quantize": [False],
140          "dynamic_range_quantize": [True],
141      },
142      {
143          "dtype": [tf.float32],
144          "input_shape_1": [[5]],
145          "input_shape_2": [[5]],
146          "activation": [False],
147          "fully_quantize": [False],
148          "dynamic_range_quantize": [True],
149      },
150      {
151          "dtype": [tf.float32],
152          "input_shape_1": [[1, 3, 4, 3]],
153          "input_shape_2": [[3]],
154          "activation": [False],
155          "fully_quantize": [False],
156          "dynamic_range_quantize": [True],
157      },
158      {
159          "dtype": [tf.float32],
160          "input_shape_1": [[3]],
161          "input_shape_2": [[1, 3, 4, 3]],
162          "activation": [False],
163          "fully_quantize": [False],
164          "dynamic_range_quantize": [True],
165      },
166      {
167          "dtype": [tf.float32],
168          "input_shape_1": [[]],
169          "input_shape_2": [[]],
170          "activation": [False],
171          "fully_quantize": [False],
172          "dynamic_range_quantize": [True],
173      },
174  ]
175
176  # float64 types are supported via flex only.
177  if options.run_with_flex and options.use_experimental_converter:
178    test_parameters = test_parameters + [
179        {
180            "dtype": [tf.float64],
181            "input_shape_1": [[7]],
182            "input_shape_2": [[7]],
183            "activation": [False],
184            "fully_quantize": [False],
185            "dynamic_range_quantize": [False],
186        },
187    ]
188
189  if options.use_experimental_converter:
190    test_parameters = test_parameters + [
191        # High dimension broadcasting support in MLIR converter.
192        {
193            "dtype": [tf.float32],
194            "input_shape_1": [[8, 7, 6, 5, 4, 3, 2, 1],
195                              [8, 7, 6, 5, None, 3, 2, 1], [2, None]],
196            "input_shape_2": [[4, 3, 2, 1], [None, 3, 2, 1]],
197            "activation": [False],
198            "fully_quantize": [False],
199            "dynamic_range_quantize": [False],
200            "dynamic_size_value": [4, 1],
201        },
202        # Zero in input shape.
203        {
204            "dtype": [tf.float32],
205            "input_shape_1": [[1, 0], [1, None]],
206            "input_shape_2": [[4, 3, 2, 1], [4, None, 2, 1]],
207            "activation": [False],
208            "fully_quantize": [False],
209            "dynamic_range_quantize": [False],
210            "dynamic_size_value": [0],
211        },
212    ]
213
214  # test_parameters include fully_quantize option only when
215  # allow_fully_quantize is True.
216  if not allow_fully_quantize:
217    test_parameters = [
218        test_parameter for test_parameter in test_parameters
219        if True not in test_parameter["fully_quantize"]
220    ]
221
222  def populate_dynamic_shape(parameters, input_shape):
223    return [
224        parameters["dynamic_size_value"] if x is None else x
225        for x in input_shape
226    ]
227
228  def build_graph(parameters):
229    """Builds the graph given the current parameters."""
230    input1 = tf.compat.v1.placeholder(
231        dtype=parameters["dtype"],
232        name="input1",
233        shape=parameters["input_shape_1"])
234    input2 = tf.compat.v1.placeholder(
235        dtype=parameters["dtype"],
236        name="input2",
237        shape=parameters["input_shape_2"])
238    out = binary_operator(input1, input2)
239    # TODO(karimnosseir): Update condition after moving to new converter.
240    if parameters["activation"] and (not options.use_experimental_converter or
241                                     (parameters["dtype"] != tf.int32 and
242                                      parameters["dtype"] != tf.int64)):
243      out = tf.nn.relu(out)
244    return [input1, input2], [out]
245
246  def build_inputs(parameters, sess, inputs, outputs):
247    """Builds operand inputs for op."""
248    input_shape_1 = populate_dynamic_shape(parameters,
249                                           parameters["input_shape_1"])
250    input_shape_2 = populate_dynamic_shape(parameters,
251                                           parameters["input_shape_2"])
252    if allow_fully_quantize:
253      input1 = create_tensor_data(
254          parameters["dtype"], input_shape_1, min_value=-1, max_value=1)
255      input2 = create_tensor_data(
256          parameters["dtype"], input_shape_2, min_value=-1, max_value=1)
257    else:
258      input1 = create_tensor_data(parameters["dtype"], input_shape_1)
259      input2 = create_tensor_data(parameters["dtype"], input_shape_2)
260    return [input1, input2], sess.run(
261        outputs, feed_dict={
262            inputs[0]: input1,
263            inputs[1]: input2
264        })
265
266  make_zip_of_tests(
267      options,
268      test_parameters,
269      build_graph,
270      build_inputs,
271      expected_tf_failures=expected_tf_failures)
272
273
274def make_binary_op_tests_func(binary_operator):
275  """Return a function that does a test on a binary operator."""
276  return lambda options: make_binary_op_tests(options, binary_operator)
277
278
279@register_make_test_function()
280def make_add_tests(options):
281  make_binary_op_tests(options, tf.add, allow_fully_quantize=True)
282
283
284@register_make_test_function()
285def make_div_tests(options):
286  """Make zip tests for div op with 5D case."""
287  test_parameters = [
288      {
289          "dtype": [tf.float32],
290          "input_shape_1": [[1, 3, 3, 3, 3]],
291          "input_shape_2": [[3]],
292          "activation": [False],
293          "fully_quantize": [False],
294          "dynamic_range_quantize": [False, True],
295      },
296  ]
297  make_binary_op_tests(
298      options, tf.compat.v1.div, test_parameters=test_parameters)
299
300
301@register_make_test_function()
302def make_sub_tests(options):
303  """Make zip tests for sub op with additional cases."""
304  test_parameters = [
305      {
306          "dtype": [tf.float32],
307          "input_shape_1": [[1, 3, 3, 3, 3]],
308          "input_shape_2": [[3]],
309          "activation": [False],
310          "fully_quantize": [False],
311          "dynamic_range_quantize": [False, True],
312      },
313  ]
314  make_binary_op_tests(
315      options,
316      tf.subtract,
317      allow_fully_quantize=True,
318      test_parameters=test_parameters)
319
320
321@register_make_test_function()
322def make_mul_tests(options):
323  make_binary_op_tests(options, tf.multiply, allow_fully_quantize=True)
324
325
326@register_make_test_function()
327def make_pow_tests(options):
328  make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
329
330
331@register_make_test_function()
332def make_floor_div_tests(options):
333  make_binary_op_tests(options, tf.math.floordiv)
334
335
336@register_make_test_function()
337def make_floor_mod_tests(options):
338  make_binary_op_tests(options, tf.math.floormod)
339
340
341@register_make_test_function()
342def make_squared_difference_tests(options):
343  make_binary_op_tests(
344      options, tf.math.squared_difference, allow_fully_quantize=True)
345