• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for binary_op."""
16import tensorflow.compat.v1 as tf
17from tensorflow.lite.testing.zip_test_utils import create_tensor_data
18from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
19from tensorflow.lite.testing.zip_test_utils import register_make_test_function
20
21
22def make_binary_op_tests(options,
23                         binary_operator,
24                         allow_fully_quantize=False,
25                         expected_tf_failures=0,
26                         test_parameters=None):
27  """Make a set of tests to do binary ops with and without broadcast."""
28
29  if test_parameters is None:
30    test_parameters = []
31
32  test_parameters = test_parameters + [
33      # Avoid creating all combinations to keep the test size small.
34      {
35          "dtype": [tf.float32, tf.int32],
36          "input_shape_1": [[1, 3, 4, 3]],
37          "input_shape_2": [[1, 3, 4, 3]],
38          "activation": [True],
39          "fully_quantize": [False],
40          "dynamic_range_quantize": [False],
41      },
42      {
43          "dtype": [tf.float32],
44          "input_shape_1": [[5]],
45          "input_shape_2": [[5]],
46          "activation": [False, True],
47          "fully_quantize": [False],
48          "dynamic_range_quantize": [False],
49      },
50      {
51          "dtype": [tf.float32, tf.int32, tf.int64],
52          "input_shape_1": [[1, 3, 4, 3]],
53          "input_shape_2": [[3]],
54          "activation": [True, False],
55          "fully_quantize": [False],
56          "dynamic_range_quantize": [False],
57      },
58      {
59          "dtype": [tf.float32, tf.int32],
60          "input_shape_1": [[3]],
61          "input_shape_2": [[1, 3, 4, 3]],
62          "activation": [True, False],
63          "fully_quantize": [False],
64          "dynamic_range_quantize": [False],
65      },
66      {
67          "dtype": [tf.float32],
68          "input_shape_1": [[]],
69          "input_shape_2": [[]],
70          "activation": [False],
71          "fully_quantize": [False],
72          "dynamic_range_quantize": [False],
73      },
74      {
75          "dtype": [tf.float32],
76          "input_shape_1": [[1, 3, 4, 3]],
77          "input_shape_2": [[1, 3, 4, 3]],
78          "activation": [False],
79          "fully_quantize": [True],
80          "dynamic_range_quantize": [False],
81      },
82      {
83          "dtype": [tf.float32],
84          "input_shape_1": [[5]],
85          "input_shape_2": [[5]],
86          "activation": [False],
87          "fully_quantize": [True],
88          "dynamic_range_quantize": [False],
89      },
90      {
91          "dtype": [tf.float32],
92          "input_shape_1": [[1, 3, 4, 3]],
93          "input_shape_2": [[3]],
94          "activation": [False],
95          "fully_quantize": [True],
96          "dynamic_range_quantize": [False],
97      },
98      {
99          "dtype": [tf.float32],
100          "input_shape_1": [[3]],
101          "input_shape_2": [[1, 3, 4, 3]],
102          "activation": [False],
103          "fully_quantize": [True],
104          "dynamic_range_quantize": [False],
105      },
106      {
107          "dtype": [tf.float32],
108          "input_shape_1": [[]],
109          "input_shape_2": [[]],
110          "activation": [False],
111          "fully_quantize": [True],
112          "dynamic_range_quantize": [False],
113      },
114      {
115          "dtype": [tf.float32],
116          "input_shape_1": [[1, 3, 4, 3]],
117          "input_shape_2": [[1, 3, 4, 3]],
118          "activation": [False],
119          "fully_quantize": [False],
120          "dynamic_range_quantize": [True],
121      },
122      {
123          "dtype": [tf.float32],
124          "input_shape_1": [[5]],
125          "input_shape_2": [[5]],
126          "activation": [False],
127          "fully_quantize": [False],
128          "dynamic_range_quantize": [True],
129      },
130      {
131          "dtype": [tf.float32],
132          "input_shape_1": [[1, 3, 4, 3]],
133          "input_shape_2": [[3]],
134          "activation": [False],
135          "fully_quantize": [False],
136          "dynamic_range_quantize": [True],
137      },
138      {
139          "dtype": [tf.float32],
140          "input_shape_1": [[3]],
141          "input_shape_2": [[1, 3, 4, 3]],
142          "activation": [False],
143          "fully_quantize": [False],
144          "dynamic_range_quantize": [True],
145      },
146      {
147          "dtype": [tf.float32],
148          "input_shape_1": [[]],
149          "input_shape_2": [[]],
150          "activation": [False],
151          "fully_quantize": [False],
152          "dynamic_range_quantize": [True],
153      },
154  ]
155
156  # float64 types are supported via flex only.
157  if options.run_with_flex:
158    test_parameters = test_parameters + [
159        {
160            "dtype": [tf.float64],
161            "input_shape_1": [[7]],
162            "input_shape_2": [[7]],
163            "activation": [False],
164            "fully_quantize": [False],
165            "dynamic_range_quantize": [False],
166        },
167    ]
168
169  if not options.skip_high_dimension_inputs:
170    test_parameters = test_parameters + [
171        # High dimension broadcasting support in MLIR converter.
172        # Note(b/204360746): XNNPack delegate don't support high dimension.
173        {
174            "dtype": [tf.float32],
175            "input_shape_1": [[8, 7, 6, 5, 4, 3, 2, 1],
176                              [8, 7, 6, 5, None, 3, 2, 1], [2, None]],
177            "input_shape_2": [[4, 3, 2, 1], [None, 3, 2, 1]],
178            "activation": [False],
179            "fully_quantize": [False],
180            "dynamic_range_quantize": [False],
181            "dynamic_size_value": [4, 1],
182        }
183    ]
184
185  # test_parameters include fully_quantize option only when
186  # allow_fully_quantize is True.
187  if not allow_fully_quantize:
188    test_parameters = [
189        test_parameter for test_parameter in test_parameters
190        if True not in test_parameter["fully_quantize"]
191    ]
192
193  def populate_dynamic_shape(parameters, input_shape):
194    return [
195        parameters["dynamic_size_value"] if x is None else x
196        for x in input_shape
197    ]
198
199  def build_graph(parameters):
200    """Builds the graph given the current parameters."""
201    input1 = tf.compat.v1.placeholder(
202        dtype=parameters["dtype"],
203        name="input1",
204        shape=parameters["input_shape_1"])
205    input2 = tf.compat.v1.placeholder(
206        dtype=parameters["dtype"],
207        name="input2",
208        shape=parameters["input_shape_2"])
209    out = binary_operator(input1, input2)
210    if parameters["activation"] and (parameters["dtype"] != tf.int32 and
211                                     parameters["dtype"] != tf.int64):
212      out = tf.nn.relu(out)
213    return [input1, input2], [out]
214
215  def build_inputs(parameters, sess, inputs, outputs):
216    """Builds operand inputs for op."""
217    input_shape_1 = populate_dynamic_shape(parameters,
218                                           parameters["input_shape_1"])
219    input_shape_2 = populate_dynamic_shape(parameters,
220                                           parameters["input_shape_2"])
221    if allow_fully_quantize:
222      input1 = create_tensor_data(
223          parameters["dtype"], input_shape_1, min_value=-1, max_value=1)
224      input2 = create_tensor_data(
225          parameters["dtype"], input_shape_2, min_value=-1, max_value=1)
226    else:
227      input1 = create_tensor_data(parameters["dtype"], input_shape_1)
228      input2 = create_tensor_data(parameters["dtype"], input_shape_2)
229    return [input1, input2], sess.run(
230        outputs, feed_dict={
231            inputs[0]: input1,
232            inputs[1]: input2
233        })
234
235  make_zip_of_tests(
236      options,
237      test_parameters,
238      build_graph,
239      build_inputs,
240      expected_tf_failures=expected_tf_failures)
241
242
243def make_binary_op_tests_func(binary_operator):
244  """Return a function that does a test on a binary operator."""
245  return lambda options: make_binary_op_tests(options, binary_operator)
246
247
248@register_make_test_function()
249def make_add_tests(options):
250  """Make zip tests for add op with uint32 case."""
251  test_parameters = [
252      {
253          "dtype": [tf.uint32],
254          "input_shape_1": [[1, 3, 3, 3], [1], [3, 3]],
255          "input_shape_2": [[3], [1]],
256          "activation": [False],
257          "fully_quantize": [False],
258          "dynamic_range_quantize": [False],
259      },
260  ]
261  make_binary_op_tests(
262      options,
263      tf.add,
264      allow_fully_quantize=True,
265      test_parameters=test_parameters)
266
267
268@register_make_test_function()
269def make_div_tests(options):
270  """Make zip tests for div op with 5D case."""
271  test_parameters = [
272      {
273          "dtype": [tf.float32],
274          "input_shape_1": [[1, 3, 3, 3, 3]],
275          "input_shape_2": [[3]],
276          "activation": [False],
277          "fully_quantize": [False],
278          "dynamic_range_quantize": [False, True],
279      },
280  ]
281  make_binary_op_tests(
282      options, tf.compat.v1.div, test_parameters=test_parameters)
283
284
285@register_make_test_function()
286def make_sub_tests(options):
287  """Make zip tests for sub op with additional cases."""
288  test_parameters = [
289      {
290          "dtype": [tf.float32],
291          "input_shape_1": [[1, 3, 3, 3, 3]],
292          "input_shape_2": [[3]],
293          "activation": [False],
294          "fully_quantize": [False],
295          "dynamic_range_quantize": [False, True],
296      },
297  ]
298  make_binary_op_tests(
299      options,
300      tf.subtract,
301      allow_fully_quantize=True,
302      test_parameters=test_parameters)
303
304
305@register_make_test_function()
306def make_mul_tests(options):
307  """Make zip tests for mul op with additional complex cases."""
308  test_parameters = [
309      {
310          "dtype": [tf.complex64],
311          "input_shape_1": [[1, 3, 3, 3, 3]],
312          "input_shape_2": [[3]],
313          "activation": [False],
314          "fully_quantize": [False],
315          "dynamic_range_quantize": [False],
316      },
317  ]
318  make_binary_op_tests(
319      options,
320      tf.multiply,
321      allow_fully_quantize=True,
322      test_parameters=test_parameters)
323
324
325@register_make_test_function()
326def make_pow_tests(options):
327  make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
328
329
330@register_make_test_function()
331def make_floor_div_tests(options):
332  make_binary_op_tests(options, tf.math.floordiv)
333
334
335@register_make_test_function()
336def make_floor_mod_tests(options):
337  make_binary_op_tests(options, tf.math.floormod)
338
339
340@register_make_test_function()
341def make_squared_difference_tests(options):
342  make_binary_op_tests(
343      options, tf.math.squared_difference, allow_fully_quantize=True)
344