1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Tests for local response normalization.""" 16 17import copy 18 19import numpy as np 20 21from tensorflow.python.framework import constant_op 22from tensorflow.python.framework import dtypes 23from tensorflow.python.framework import errors_impl 24from tensorflow.python.framework import test_util 25from tensorflow.python.ops import array_ops 26from tensorflow.python.ops import gradient_checker 27from tensorflow.python.ops import gradients_impl 28from tensorflow.python.ops import nn 29from tensorflow.python.ops import random_ops 30import tensorflow.python.ops.nn_grad # pylint: disable=unused-import 31from tensorflow.python.platform import test 32 33 34class LRNOpTest(test.TestCase): 35 36 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, 37 beta=0.5): 38 """Compute expected result.""" 39 output = copy.deepcopy(input_image) 40 batch_size = input_image.shape[0] 41 rows = input_image.shape[1] 42 cols = input_image.shape[2] 43 depth = input_image.shape[3] 44 for b in range(batch_size): 45 for r in range(rows): 46 for c in range(cols): 47 for d in range(depth): 48 begin = max(0, d - lrn_depth_radius) 49 end = min(depth, d + lrn_depth_radius + 1) 50 patch = input_image[b, r, c, begin:end] 51 output[b, r, c, d] /= ( 52 np.power(bias + alpha * np.sum(patch * patch), beta)) 53 return output 54 55 def _RunAndVerify(self, dtype): 56 with self.cached_session(): 57 # random shape 58 shape = np.random.randint(1, 16, size=4) 59 # Make depth at least 2 to make it meaningful 60 shape[3] += 1 61 p = array_ops.placeholder(dtype, shape=shape) 62 # random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to 63 # be in [1, 7]. 64 lrn_depth_radius = np.random.randint(1, min(8, shape[3])) 65 66 bias = 1.0 + np.random.rand() 67 alpha = 2.0 * np.random.rand() 68 # cuDNN requires beta >= 0.01. 69 beta = 0.01 + 2.0 * np.random.rand() 70 lrn_t = nn.local_response_normalization( 71 p, 72 name="lrn", 73 depth_radius=lrn_depth_radius, 74 bias=bias, 75 alpha=alpha, 76 beta=beta) 77 params = {p: np.random.rand(*shape).astype("f")} 78 result = lrn_t.eval(feed_dict=params) 79 expected = self._LRN( 80 params[p], 81 lrn_depth_radius=lrn_depth_radius, 82 bias=bias, 83 alpha=alpha, 84 beta=beta) 85 err = np.amax(np.abs(result - expected)) 86 print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ", 87 err) 88 if dtype == dtypes.float32: 89 self.assertTrue(err < 1e-4) 90 else: 91 self.assertTrue(err < 1e-2) 92 self.assertShapeEqual(expected, lrn_t) 93 94 @test_util.run_deprecated_v1 95 def testCompute(self): 96 for _ in range(2): 97 self._RunAndVerify(dtypes.float32) 98 # Enable when LRN supports tf.float16 on GPU. 99 if not test.is_gpu_available(): 100 self._RunAndVerify(dtypes.float16) 101 102 @test_util.run_deprecated_v1 103 def testGradientsZeroInput(self): 104 with self.session(): 105 shape = [4, 4, 4, 4] 106 p = array_ops.placeholder(dtypes.float32, shape=shape) 107 inp_array = np.zeros(shape).astype("f") 108 lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn") 109 grad = gradients_impl.gradients([lrn_op], [p])[0] 110 params = {p: inp_array} 111 r = grad.eval(feed_dict=params) 112 expected = np.ones(shape).astype("f") 113 self.assertAllClose(r, expected) 114 self.assertShapeEqual(expected, grad) 115 116 @test_util.run_in_graph_and_eager_modes 117 def testIncompatibleInputAndOutputImageShapes(self): 118 depth_radius = 1 119 bias = 1.59018219 120 alpha = 0.117728651 121 beta = 0.404427052 122 input_grads = random_ops.random_uniform( 123 shape=[4, 4, 4, 4], 124 minval=-10000, 125 maxval=10000, 126 dtype=dtypes.float32, 127 seed=-2033) 128 input_image = random_ops.random_uniform( 129 shape=[4, 4, 4, 4], 130 minval=-10000, 131 maxval=10000, 132 dtype=dtypes.float32, 133 seed=-2033) 134 invalid_output_image = random_ops.random_uniform( 135 shape=[4, 4, 4, 4, 4, 4], 136 minval=-10000, 137 maxval=10000, 138 dtype=dtypes.float32, 139 seed=-2033) 140 with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): 141 self.evaluate( 142 nn.lrn_grad( 143 input_grads=input_grads, 144 input_image=input_image, 145 output_image=invalid_output_image, 146 depth_radius=depth_radius, 147 bias=bias, 148 alpha=alpha, 149 beta=beta)) 150 151 def _RunAndVerifyGradients(self, dtype): 152 with self.cached_session(): 153 # random shape 154 shape = np.random.randint(1, 5, size=4) 155 # Make depth at least 2 to make it meaningful 156 shape[3] += 1 157 # random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to 158 # be in [1, 7]. 159 lrn_depth_radius = np.random.randint(1, min(8, shape[3])) 160 bias = 1.0 + np.random.rand() 161 alpha = 1.0 * np.random.rand() 162 # cuDNN requires beta >= 0.01. 163 beta = 0.01 + 1.0 * np.random.rand() 164 if dtype == dtypes.float32: 165 inp_array = np.random.rand(*shape).astype(np.float32) 166 else: 167 inp_array = np.random.rand(*shape).astype(np.float16) 168 169 inp = constant_op.constant( 170 list(inp_array.ravel(order="C")), shape=shape, dtype=dtype) 171 lrn_op = nn.local_response_normalization( 172 inp, 173 name="lrn", 174 depth_radius=lrn_depth_radius, 175 bias=bias, 176 alpha=alpha, 177 beta=beta) 178 err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape) 179 print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta, 180 " is ", err) 181 if dtype == dtypes.float32: 182 self.assertLess(err, 1e-4) 183 else: 184 self.assertLess(err, 1.0) 185 186 @test_util.run_deprecated_v1 187 def testGradients(self): 188 for _ in range(2): 189 self._RunAndVerifyGradients(dtypes.float32) 190 # Enable when LRN supports tf.float16 on GPU. 191 if not test.is_gpu_available(): 192 self._RunAndVerifyGradients(dtypes.float16) 193 194 195if __name__ == "__main__": 196 test.main() 197