• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Test configs for conv_with_shared_weights."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import numpy as np
21import tensorflow.compat.v1 as tf
22from tensorflow.lite.testing.zip_test_utils import create_tensor_data
23from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
24from tensorflow.lite.testing.zip_test_utils import register_make_test_function
25
26
27@register_make_test_function()
28def make_conv_with_shared_weights_tests(options):
29  """Make a test where 2 Conv ops shared the same constant weight tensor."""
30
31  test_parameters = [{
32      "input_shape": [[1, 10, 10, 3]],
33      "filter_shape": [[3, 3]],
34      "strides": [[1, 1, 1, 1]],
35      "dilations": [[1, 1, 1, 1]],
36      "padding": ["SAME"],
37      "data_format": ["NHWC"],
38      "channel_multiplier": [1],
39      "dynamic_range_quantize": [False, True],
40  }]
41
42  def get_tensor_shapes(parameters):
43    input_shape = parameters["input_shape"]
44    filter_size = parameters["filter_shape"]
45    filter_shape = filter_size + [
46        input_shape[3], parameters["channel_multiplier"]
47    ]
48    return [input_shape, filter_shape]
49
50  def build_graph(parameters):
51    """Build a conv graph given `parameters`."""
52    input_shape, filter_shape = get_tensor_shapes(parameters)
53    input_tensor = tf.compat.v1.placeholder(
54        dtype=tf.float32, name="input", shape=input_shape)
55    input_tensors = [input_tensor]
56
57    # Construct a constant weights tensor which will be used by both Conv2D.
58    filter_tensor = tf.constant(
59        create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
60
61    # Ensure that FuseBinaryIntoFollowingAffine works with an input which
62    # is shared by multiple affine ops.
63    conv_input = input_tensor + 0.1
64
65    # Construct 2 Conv2D operations which use exactly the same input and
66    # weights.
67    result1 = tf.nn.conv2d(
68        conv_input,
69        filter_tensor,
70        strides=parameters["strides"],
71        dilations=parameters["dilations"],
72        padding=parameters["padding"],
73        data_format=parameters["data_format"])
74    result2 = tf.nn.conv2d(
75        conv_input,
76        filter_tensor,
77        strides=parameters["strides"],
78        dilations=parameters["dilations"],
79        padding=parameters["padding"],
80        data_format=parameters["data_format"])
81    # Add MUL ops after Conv2D ops. These MUL ops should be fused into the
82    # weights of Conv2D.
83    result1 = result1 * 2
84    result2 = result2 * 3
85    # Add the 2 results up.
86    out = result1 + result2
87    return input_tensors, [out]
88
89  def build_inputs(parameters, sess, inputs, outputs):
90    # Build list of input values either containing 1 tensor (input) or 2 tensors
91    # (input, filter) based on whether filter is constant or variable input.
92    input_shape, unused_filter_shape = get_tensor_shapes(parameters)
93    values = [create_tensor_data(np.float32, input_shape)]
94    return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
95
96  make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
97