• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for conv."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import numpy as np
21import tensorflow.compat.v1 as tf
22from tensorflow.lite.testing.zip_test_utils import create_tensor_data
23from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
24from tensorflow.lite.testing.zip_test_utils import register_make_test_function
25
26
27@register_make_test_function()
28def make_conv_tests(options):
29  """Make a set of tests to do convolution."""
30
31  test_parameters = [
32      {
33          "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
34          "filter_shape": [[1, 1], [2, 3], [3, 3]],
35          "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
36          "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
37          "padding": ["SAME", "VALID"],
38          "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
39          "constant_filter": [True, False],
40          "channel_multiplier": [1, 2],
41          "fully_quantize": [False],
42          "quant_16x8": [False],
43          "dynamic_range_quantize": [False]
44      },
45      {
46          "input_shape": [[1, 3, 4, 3]],
47          "filter_shape": [[1, 1], [2, 3]],
48          "strides": [[1, 1, 1, 1]],
49          "dilations": [[1, 1, 1, 1]],
50          "padding": ["SAME"],
51          "data_format": ["NHWC"],
52          "constant_filter": [True],
53          "channel_multiplier": [1, 2],
54          "fully_quantize": [True],
55          "quant_16x8": [True],
56          "dynamic_range_quantize": [False],
57      },
58      # TODO(b/134702301): The fully_quantize param is just ignored by the MLIR
59      # testing path now, resulting in duplicate tests. Either ignore these
60      # tests or handle it properly in the mlir_convert() function.
61      {
62          "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
63          "filter_shape": [[1, 1], [2, 3], [3, 3]],
64          "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
65          "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
66          "padding": ["SAME", "VALID"],
67          "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
68          "constant_filter": [True],
69          "channel_multiplier": [1, 2],
70          "fully_quantize": [True],
71          "quant_16x8": [False],
72          "dynamic_range_quantize": [False]
73      },
74      {
75          "input_shape": [[1, 3, 4, 3]],
76          "filter_shape": [[1, 1]],
77          "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
78          "dilations": [[1, 1, 1, 1]],
79          "padding": ["SAME", "VALID"],
80          "data_format": ["NHWC"],
81          "constant_filter": [True],
82          "channel_multiplier": [2],
83          "fully_quantize": [False],
84          "quant_16x8": [False],
85          "dynamic_range_quantize": [True]
86      },
87  ]
88
89  def get_tensor_shapes(parameters):
90    input_shape = parameters["input_shape"]
91    filter_size = parameters["filter_shape"]
92    filter_shape = filter_size + [
93        input_shape[3], parameters["channel_multiplier"]
94    ]
95    return [input_shape, filter_shape]
96
97  def build_graph(parameters):
98    """Build a conv graph given `parameters`."""
99    input_shape, filter_shape = get_tensor_shapes(parameters)
100    input_tensor = tf.compat.v1.placeholder(
101        dtype=tf.float32, name="input", shape=input_shape)
102
103    # Get filter input either as a placeholder or constants. Also get a list of
104    # the input tensors that are represented as placeholders.
105    if parameters["constant_filter"]:
106      filter_input = create_tensor_data(
107          np.float32, filter_shape, min_value=-10, max_value=10)
108      input_tensors = [input_tensor]
109    else:
110      filter_input = tf.compat.v1.placeholder(
111          dtype=tf.float32, name="filter", shape=filter_shape)
112      input_tensors = [input_tensor, filter_input]
113
114    out = tf.nn.conv2d(
115        input_tensor,
116        filter_input,
117        strides=parameters["strides"],
118        dilations=parameters["dilations"],
119        padding=parameters["padding"],
120        data_format=parameters["data_format"])
121    return input_tensors, [out]
122
123  def build_inputs(parameters, sess, inputs, outputs):
124    # Build list of input values either containing 1 tensor (input) or 2 tensors
125    # (input, filter) based on whether filter is constant or variable input.
126    input_shape, filter_shape = get_tensor_shapes(parameters)
127    values = [
128        create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
129    ]
130    if not parameters["constant_filter"]:
131      values.append(create_tensor_data(np.float32, filter_shape))
132    return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
133
134  make_zip_of_tests(
135      options,
136      test_parameters,
137      build_graph,
138      build_inputs,
139      expected_tf_failures=60)
140