1# Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Functional tests for 3d convolutional operations.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21import itertools 22 23import numpy as np 24 25from tensorflow.python.framework import constant_op 26from tensorflow.python.framework import dtypes 27from tensorflow.python.ops import array_ops 28from tensorflow.python.ops import gradient_checker 29from tensorflow.python.ops import gradients_impl 30from tensorflow.python.ops import math_ops 31from tensorflow.python.platform import test 32from tensorflow.python.platform import tf_logging 33 34 35class BetaincTest(test.TestCase): 36 37 def _testBetaInc(self, a_s, b_s, x_s, dtype): 38 try: 39 from scipy import special # pylint: disable=g-import-not-at-top 40 np_dt = dtype.as_numpy_dtype 41 42 # Test random values 43 a_s = a_s.astype(np_dt) # in (0, infty) 44 b_s = b_s.astype(np_dt) # in (0, infty) 45 x_s = x_s.astype(np_dt) # in (0, 1) 46 tf_a_s = constant_op.constant(a_s, dtype=dtype) 47 tf_b_s = constant_op.constant(b_s, dtype=dtype) 48 tf_x_s = constant_op.constant(x_s, dtype=dtype) 49 tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s) 50 with self.test_session(): 51 tf_out = tf_out_t.eval() 52 scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt) 53 54 # the scipy version of betainc uses a double-only implementation. 55 # TODO(ebrevdo): identify reasons for (sometime) precision loss 56 # with doubles 57 tol = 1e-4 if dtype == dtypes.float32 else 5e-5 58 self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=0) 59 60 # Test out-of-range values (most should return nan output) 61 combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3)) 62 a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt) 63 with self.test_session(): 64 tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval() 65 scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt) 66 self.assertAllCloseAccordingToType(scipy_comb, tf_comb) 67 68 # Test broadcasting between scalars and other shapes 69 with self.test_session(): 70 self.assertAllCloseAccordingToType( 71 special.betainc(0.1, b_s, x_s).astype(np_dt), 72 math_ops.betainc(0.1, b_s, x_s).eval(), 73 rtol=tol, 74 atol=0) 75 self.assertAllCloseAccordingToType( 76 special.betainc(a_s, 0.1, x_s).astype(np_dt), 77 math_ops.betainc(a_s, 0.1, x_s).eval(), 78 rtol=tol, 79 atol=0) 80 self.assertAllCloseAccordingToType( 81 special.betainc(a_s, b_s, 0.1).astype(np_dt), 82 math_ops.betainc(a_s, b_s, 0.1).eval(), 83 rtol=tol, 84 atol=0) 85 self.assertAllCloseAccordingToType( 86 special.betainc(0.1, b_s, 0.1).astype(np_dt), 87 math_ops.betainc(0.1, b_s, 0.1).eval(), 88 rtol=tol, 89 atol=0) 90 self.assertAllCloseAccordingToType( 91 special.betainc(0.1, 0.1, 0.1).astype(np_dt), 92 math_ops.betainc(0.1, 0.1, 0.1).eval(), 93 rtol=tol, 94 atol=0) 95 96 with self.assertRaisesRegexp(ValueError, "must be equal"): 97 math_ops.betainc(0.5, [0.5], [[0.5]]) 98 99 with self.test_session(): 100 with self.assertRaisesOpError("Shapes of .* are inconsistent"): 101 a_p = array_ops.placeholder(dtype) 102 b_p = array_ops.placeholder(dtype) 103 x_p = array_ops.placeholder(dtype) 104 math_ops.betainc(a_p, b_p, x_p).eval( 105 feed_dict={a_p: 0.5, 106 b_p: [0.5], 107 x_p: [[0.5]]}) 108 109 except ImportError as e: 110 tf_logging.warn("Cannot test special functions: %s" % str(e)) 111 112 def testBetaIncFloat(self): 113 a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty) 114 b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty) 115 x_s = np.random.rand(10, 10) # in (0, 1) 116 self._testBetaInc(a_s, b_s, x_s, dtypes.float32) 117 118 def testBetaIncDouble(self): 119 a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty) 120 b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty) 121 x_s = np.random.rand(10, 10) # in (0, 1) 122 self._testBetaInc(a_s, b_s, x_s, dtypes.float64) 123 124 def testBetaIncDoubleVeryLargeValues(self): 125 a_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty) 126 b_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty) 127 x_s = np.random.rand(10, 10) # in (0, 1) 128 self._testBetaInc(a_s, b_s, x_s, dtypes.float64) 129 130 def testBetaIncDoubleVerySmallValues(self): 131 a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty) 132 b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty) 133 x_s = np.random.rand(10, 10) # in (0, 1) 134 self._testBetaInc(a_s, b_s, x_s, dtypes.float64) 135 136 def testBetaIncFloatVerySmallValues(self): 137 a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty) 138 b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty) 139 x_s = np.random.rand(10, 10) # in (0, 1) 140 self._testBetaInc(a_s, b_s, x_s, dtypes.float32) 141 142 def testBetaIncFpropAndBpropAreNeverNAN(self): 143 with self.test_session() as sess: 144 space = np.logspace(-8, 5).tolist() 145 space_x = np.linspace(1e-16, 1 - 1e-16).tolist() 146 ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x))) 147 # Test grads are never nan 148 ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32) 149 gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32) 150 gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32) 151 tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t) 152 tf_gout, grads_x = sess.run( 153 [tf_gout_t, 154 gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]]) 155 156 # Equivalent to `assertAllFalse` (if it existed). 157 self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool), 158 np.isnan(tf_gout)) 159 self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool), 160 np.isnan(grads_x)) 161 162 def testBetaIncGrads(self): 163 err_tolerance = 1e-3 164 with self.test_session(): 165 # Test gradient 166 ga_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty) 167 gb_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty) 168 gx_s = np.random.rand(2, 2) # in (0, 1) 169 tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64) 170 tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64) 171 tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64) 172 tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s) 173 err = gradient_checker.compute_gradient_error( 174 [tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape) 175 print("betainc gradient err = %g " % err) 176 self.assertLess(err, err_tolerance) 177 178 # Test broadcast gradient 179 gx_s = np.random.rand() # in (0, 1) 180 tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64) 181 tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s) 182 err = gradient_checker.compute_gradient_error( 183 [tf_gx_s], [()], tf_gout_t, ga_s.shape) 184 print("betainc gradient err = %g " % err) 185 self.assertLess(err, err_tolerance) 186 187 188if __name__ == "__main__": 189 test.main() 190