• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for fused_batch_norm."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import numpy as np
21
22import tensorflow.compat.v1 as tf
23from tensorflow.lite.testing.zip_test_utils import create_tensor_data
24from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
25from tensorflow.lite.testing.zip_test_utils import register_make_test_function
26
27
28@register_make_test_function()
29def make_fused_batch_norm_tests(options):
30  """Make a set of tests to do fused_batch_norm."""
31
32  test_parameters = [{
33      "dtype": [tf.float32],
34      "input_shape": [[1, 1, 6, 2]],
35      "epsilon": [0.001, 0.1],
36      "is_training": [False],
37  }]
38
39  # Training support in MLIR converter.
40  if options.use_experimental_converter:
41    test_parameters = test_parameters + [
42        {
43            "dtype": [tf.float32],
44            "input_shape": [[1, 1, 6, 2]],
45            "epsilon": [0.001, 0.1],
46            "is_training": [True],
47        },
48        {
49            "dtype": [tf.float32],
50            "input_shape": [[1, None, 6, 2]],
51            "epsilon": [0.001, 0.1],
52            "is_training": [True, False],
53        },
54    ]
55
56  def build_graph(parameters):
57    """Build the testing graph for fused batch normalization."""
58    input_shape = parameters["input_shape"]
59    scale_shape = input_shape[3]
60
61    scale = create_tensor_data(parameters["dtype"], scale_shape)
62    offset = create_tensor_data(parameters["dtype"], scale_shape)
63    mean = create_tensor_data(parameters["dtype"], scale_shape)
64    variance = create_tensor_data(parameters["dtype"], scale_shape)
65
66    x = tf.compat.v1.placeholder(
67        dtype=parameters["dtype"], name="x", shape=parameters["input_shape"])
68    [x_norm, _, _] = tf.compat.v1.nn.fused_batch_norm(
69        x,
70        scale,
71        offset,
72        mean,
73        variance,
74        parameters["epsilon"],
75        data_format="NHWC",
76        is_training=parameters["is_training"])
77
78    input_tensor = tf.compat.v1.placeholder(
79        dtype=parameters["dtype"],
80        name="input",
81        shape=parameters["input_shape"])
82    out = tf.add(input_tensor, x_norm)
83    return [x, input_tensor], [out]
84
85  def build_inputs(parameters, sess, inputs, outputs):
86    # Fill dynamic shape with a random number.
87    input_shape = parameters["input_shape"]
88    input_shape = [
89        np.random.randint(1, 10) if v is None else v for v in input_shape
90    ]
91
92    input_values = [
93        create_tensor_data(parameters["dtype"], input_shape),
94        create_tensor_data(parameters["dtype"], input_shape)
95    ]
96
97    return input_values, sess.run(
98        outputs, feed_dict=dict(zip(inputs, input_values)))
99
100  make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
101