• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for convolution related functionality in tensorflow.ops.nn."""
16
17import numpy as np
18
19from tensorflow.python.framework import constant_op
20from tensorflow.python.framework import dtypes
21from tensorflow.python.framework import test_util
22from tensorflow.python.ops import array_ops
23from tensorflow.python.ops import gradient_checker
24from tensorflow.python.ops import nn_impl
25from tensorflow.python.ops import nn_ops
26import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
27from tensorflow.python.platform import test
28
29
30def _upsample_filters(filters, rate):
31  """Upsamples the filters by a factor of rate along the spatial dimensions.
32
33  Args:
34    filters: [h, w, in_depth, out_depth]. Original filters.
35    rate: An int, specifying the upsampling rate.
36
37  Returns:
38    filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
39      h_up = h + (h - 1) * (rate - 1)
40      w_up = w + (w - 1) * (rate - 1)
41      containing (rate - 1) zeros between consecutive filter values along
42      the filters' spatial dimensions.
43  """
44  if rate == 1:
45    return filters
46  # [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
47  filters_up = np.transpose(filters, [2, 3, 0, 1])
48  ker = np.zeros([rate, rate], dtype=np.float32)
49  ker[0, 0] = 1
50  filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
51  # [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
52  filters_up = np.transpose(filters_up, [2, 3, 0, 1])
53  return filters_up
54
55
56class AtrousConv2DTest(test.TestCase):
57
58  @test_util.run_deprecated_v1
59  def testAtrousConv2DForward(self):
60    with self.session():
61      # Input: [batch, height, width, input_depth]
62      height = 9
63      for width in [9, 10]:  # Test both odd and even width.
64        x_shape = [2, height, width, 2]
65        x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
66
67        # Filter: [kernel_height, kernel_width, input_depth, output_depth]
68        for kernel_height in range(1, 4):
69          for kernel_width in range(1, 4):
70            f_shape = [kernel_height, kernel_width, 2, 2]
71            f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
72
73            for rate in range(1, 4):
74              f_up = _upsample_filters(f, rate)
75
76              for padding in ["SAME", "VALID"]:
77                y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
78                y2 = nn_ops.conv2d(
79                    x, f_up, strides=[1, 1, 1, 1], padding=padding)
80                self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
81
82  @test_util.run_deprecated_v1
83  def testAtrousSequence(self):
84    """Tests optimization of sequence of atrous convolutions.
85
86    Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
87    parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
88
89        net = atrous_conv2d(net, filters1, rate, padding="SAME")
90        net = atrous_conv2d(net, filters2, rate, padding="SAME")
91        ...
92        net = atrous_conv2d(net, filtersK, rate, padding="SAME")
93
94    is equivalent to:
95
96        pad = ...  # padding so that the input dims are multiples of rate
97        net = space_to_batch(net, paddings=pad, block_size=rate)
98        net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
99        net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
100        ...
101        net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
102        net = batch_to_space(net, crops=pad, block_size=rate)
103    """
104    padding = "SAME"  # The padding needs to be "SAME"
105    np.random.seed(1)  # Make it reproducible.
106
107    with self.session():
108      # Input: [batch, height, width, input_depth]
109      for height in range(15, 17):
110        for width in range(15, 17):
111          x_shape = [3, height, width, 2]
112          x = np.random.random_sample(x_shape).astype(np.float32)
113
114          for kernel in [1, 3, 5]:  # The kernel size needs to be odd.
115            # Filter: [kernel_height, kernel_width, input_depth, output_depth]
116            f_shape = [kernel, kernel, 2, 2]
117            f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
118
119            for rate in range(2, 4):
120              # y1: three atrous_conv2d in a row.
121              y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
122              y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
123              y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
124              # y2: space_to_batch, three conv2d in a row, batch_to_space
125              pad_bottom = 0 if height % rate == 0 else rate - height % rate
126              pad_right = 0 if width % rate == 0 else rate - width % rate
127              pad = [[0, pad_bottom], [0, pad_right]]
128              y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
129              y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
130              y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
131              y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
132              y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
133              self.assertAllClose(y1, y2, rtol=1e-2, atol=1e-2)
134
135  @test_util.run_deprecated_v1
136  def testGradient(self):
137    with self.session():
138      # Input: [batch, height, width, input_depth]
139      x_shape = [2, 5, 6, 2]
140      # Filter: [kernel_height, kernel_width, input_depth, output_depth]
141      f_shape = [3, 3, 2, 2]
142      # Output: [batch, height, width, output_depth]
143      y_shape = [2, 5, 6, 2]
144
145      np.random.seed(1)  # Make it reproducible.
146      x_val = np.random.random_sample(x_shape).astype(np.float32)
147      f_val = np.random.random_sample(f_shape).astype(np.float32)
148      x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
149      f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
150
151      for rate in range(1, 4):
152        output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
153        err = gradient_checker.compute_gradient_error([x, f],
154                                                      [x_shape, f_shape],
155                                                      output, y_shape)
156        print("atrous_conv2d gradient err = %g " % err)
157        err_tolerance = 4e-3 if test_util.is_xla_enabled() else 1e-3
158        self.assertLess(err, err_tolerance)
159
160
161class AtrousConv2DTransposeTest(test.TestCase):
162
163  @test_util.run_deprecated_v1
164  def testAtrousConv2DTransposeForward(self):
165    with self.session():
166      # Input: [batch, height, width, input_depth]
167      height = 9
168      for width in [9, 10]:  # Test both odd and even width.
169        x_shape = [2, height, width, 2]
170        x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
171
172        # Filter: [kernel_height, kernel_width, input_depth, output_depth]
173        for kernel_height in range(1, 4):
174          for kernel_width in range(1, 4):
175            f_shape = [kernel_height, kernel_width, 2, 2]
176            f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
177
178            for rate in range(1, 4):
179              f_up = _upsample_filters(f, rate)
180              kernel_height_up = (kernel_height + (kernel_height - 1) *
181                                  (rate - 1))
182              kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
183
184              for padding in ["SAME", "VALID"]:
185                if padding == "SAME":
186                  y_shape = [2, height, width, 2]
187                else:
188                  y_shape = [
189                      2, height + kernel_height_up - 1,
190                      width + kernel_width_up - 1, 2
191                  ]
192
193                y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
194                                                    padding)
195                y2 = nn_ops.conv2d_transpose(
196                    x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
197                self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
198
199
200class AtrousDepthwiseConv2DTest(test.TestCase):
201
202  @test_util.run_deprecated_v1
203  def testAtrousDepthwiseConv2DForward(self):
204    strides = [1, 1, 1, 1]
205    with self.session():
206      # Input: [batch, height, width, input_depth]
207      height = 9
208      for width in [9, 10]:  # Test both odd and even width.
209        x_shape = [2, height, width, 2]
210        x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
211
212        # Filter: [kernel_height, kernel_width, input_depth, output_depth]
213        for kernel_height in range(1, 4):
214          for kernel_width in range(1, 4):
215            f_shape = [kernel_height, kernel_width, 2, 2]
216            f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
217
218            for rate in range(1, 4):
219              f_up = _upsample_filters(f, rate)
220
221              for padding in ["SAME", "VALID"]:
222                y1 = nn_impl.depthwise_conv2d(
223                    x, f, strides, padding, rate=[rate, rate])
224                y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
225                self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
226
227
228if __name__ == "__main__":
229  test.main()
230