• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for conv with activations."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import numpy as np
21import tensorflow as tf
22from tensorflow.lite.testing.zip_test_utils import create_tensor_data
23from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
24from tensorflow.lite.testing.zip_test_utils import register_make_test_function
25
26
27def make_conv_activation_tests(activation_op):
28  """Make a set of tests to do convolution with activation."""
29
30  def f(options):
31    """Actual function that generates examples."""
32    test_parameters = [
33        {
34            "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
35            "filter_shape": [[1, 1], [2, 3], [3, 3]],
36            "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
37            "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
38            "padding": ["SAME", "VALID"],
39            "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
40            "constant_filter": [True, False],
41            "channel_multiplier": [1, 2],
42            "fully_quantize": [False],
43        },
44        # TODO(b/134702301): The fully_quantize param is just ignored by the
45        # MLIR testing path now, resulting in duplicate tests. Either ignore
46        # these tests or handle it properly in the mlir_convert() function.
47        {
48            "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
49            "filter_shape": [[1, 1], [2, 3], [3, 3]],
50            "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
51            "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
52            "padding": ["SAME", "VALID"],
53            "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
54            "constant_filter": [True],
55            "channel_multiplier": [1, 2],
56            "fully_quantize": [True],
57        }
58    ]
59
60    def get_tensor_shapes(parameters):
61      input_shape = parameters["input_shape"]
62      filter_size = parameters["filter_shape"]
63      filter_shape = filter_size + [
64          input_shape[3], parameters["channel_multiplier"]
65      ]
66      return [input_shape, filter_shape]
67
68    def build_graph(parameters):
69      """Build a conv graph given `parameters`."""
70      input_shape, filter_shape = get_tensor_shapes(parameters)
71      input_tensor = tf.compat.v1.placeholder(
72          dtype=tf.float32, name="input", shape=input_shape)
73
74      # Get filter input either as a placeholder or constants. Also get a list
75      # of the input tensors that are represented as placeholders.
76      if parameters["constant_filter"]:
77        filter_input = create_tensor_data(
78            np.float32, filter_shape, min_value=-10, max_value=10)
79        input_tensors = [input_tensor]
80      else:
81        filter_input = tf.compat.v1.placeholder(
82            dtype=tf.float32, name="filter", shape=filter_shape)
83        input_tensors = [input_tensor, filter_input]
84
85      out = tf.nn.conv2d(
86          input_tensor,
87          filter_input,
88          strides=parameters["strides"],
89          dilations=parameters["dilations"],
90          padding=parameters["padding"],
91          data_format=parameters["data_format"])
92      out = activation_op(out)
93      return input_tensors, [out]
94
95    def build_inputs(parameters, sess, inputs, outputs):
96      """Build inputs for conv with activation."""
97
98      input_shape, filter_shape = get_tensor_shapes(parameters)
99      values = [
100          create_tensor_data(
101              np.float32, input_shape, min_value=-1, max_value=1)
102      ]
103      if not parameters["constant_filter"]:
104        values.append(create_tensor_data(np.float32, filter_shape))
105      return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
106
107    make_zip_of_tests(
108        options,
109        test_parameters,
110        build_graph,
111        build_inputs,
112        expected_tf_failures=60)
113
114  return f
115
116
117@register_make_test_function()
118def make_conv_relu6_tests(options):
119  """Make a set of tests to do conv_relu6."""
120  return make_conv_activation_tests(tf.nn.relu6)(options)
121
122
123@register_make_test_function()
124def make_conv_relu_tests(options):
125  """Make a set of tests to do conv_relu."""
126  return make_conv_activation_tests(tf.nn.relu)(options)
127
128
129def relu1(input_tensor):
130  # Note that the following is not supported:
131  #   out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
132  out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
133  return out
134
135
136@register_make_test_function()
137def make_conv_relu1_tests(options):
138  """Make a set of tests to do conv_relu1."""
139  return make_conv_activation_tests(relu1)(options)
140