• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for fused_batch_norm."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import tensorflow.compat.v1 as tf
21from tensorflow.lite.testing.zip_test_utils import create_tensor_data
22from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
23from tensorflow.lite.testing.zip_test_utils import register_make_test_function
24
25
26@register_make_test_function()
27def make_fused_batch_norm_tests(options):
28  """Make a set of tests to do fused_batch_norm."""
29
30  test_parameters = [{
31      "dtype": [tf.float32],
32      "input_shape": [[1, 1, 6, 2]],
33      "epsilon": [0.001, 0.1],
34      "is_training": [False],
35  }]
36
37  # Training support in MLIR converter.
38  if options.use_experimental_converter:
39    test_parameters = test_parameters + [{
40        "dtype": [tf.float32],
41        "input_shape": [[1, 1, 6, 2]],
42        "epsilon": [0.001, 0.1],
43        "is_training": [True],
44    }]
45
46  def build_graph(parameters):
47    """Build the testing graph for fused batch normalization."""
48    input_shape = parameters["input_shape"]
49    scale_shape = input_shape[3]
50
51    scale = create_tensor_data(parameters["dtype"], scale_shape)
52    offset = create_tensor_data(parameters["dtype"], scale_shape)
53    mean = create_tensor_data(parameters["dtype"], scale_shape)
54    variance = create_tensor_data(parameters["dtype"], scale_shape)
55
56    x = tf.compat.v1.placeholder(
57        dtype=parameters["dtype"], name="x", shape=parameters["input_shape"])
58    [x_norm, _, _] = tf.compat.v1.nn.fused_batch_norm(
59        x,
60        scale,
61        offset,
62        mean,
63        variance,
64        parameters["epsilon"],
65        data_format="NHWC",
66        is_training=parameters["is_training"])
67
68    input_tensor = tf.compat.v1.placeholder(
69        dtype=parameters["dtype"],
70        name="input",
71        shape=parameters["input_shape"])
72    out = tf.add(input_tensor, x_norm)
73    return [x, input_tensor], [out]
74
75  def build_inputs(parameters, sess, inputs, outputs):
76    input_values = [
77        create_tensor_data(parameters["dtype"], parameters["input_shape"]),
78        create_tensor_data(parameters["dtype"], parameters["input_shape"])
79    ]
80
81    return input_values, sess.run(
82        outputs, feed_dict=dict(zip(inputs, input_values)))
83
84  make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
85