1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Functional tests for coefficient-wise operations.""" 16 17import numpy as np 18 19from tensorflow.python.framework import constant_op 20from tensorflow.python.framework import dtypes as dtypes_lib 21from tensorflow.python.framework import errors 22from tensorflow.python.framework import ops 23from tensorflow.python.framework import sparse_tensor 24from tensorflow.python.framework import test_util 25from tensorflow.python.ops import array_ops 26from tensorflow.python.ops import gradient_checker 27from tensorflow.python.ops import math_ops 28from tensorflow.python.ops import nn_grad # pylint: disable=unused-import 29from tensorflow.python.platform import test 30 31_ADD = lambda x, y: x + y 32_SUB = lambda x, y: x - y 33_MUL = lambda x, y: x * y 34_POW = lambda x, y: x**y 35_TRUEDIV = lambda x, y: x / y 36_FLOORDIV = lambda x, y: x // y 37_MOD = lambda x, y: x % y 38 39_LT = lambda x, y: x < y 40_LE = lambda x, y: x <= y 41_GT = lambda x, y: x > y 42_GE = lambda x, y: x >= y 43 44_AND = lambda x, y: x & y 45_OR = lambda x, y: x | y 46_XOR = lambda x, y: x ^ y 47_INV = lambda x: ~x 48 49 50# TODO(zongheng): it'd be great to factor out this function and various random 51# SparseTensor gen funcs. 52def _sparsify(x, thresh=0.5, index_dtype=np.int64): 53 x[x < thresh] = 0 54 55 non_zero = np.where(x) 56 x_indices = np.vstack(non_zero).astype(index_dtype).T 57 x_values = x[non_zero] 58 x_shape = x.shape 59 60 return sparse_tensor.SparseTensor( 61 indices=x_indices, values=x_values, dense_shape=x_shape), x_values 62 63 64def _default_tolerance(dtype): 65 """Returns a sensible default tolerance for comparing results of a given type. 66 67 Args: 68 dtype: A datatype. 69 """ 70 if dtype == np.float16: 71 return 5e-3 72 elif dtype in (np.float32, np.complex64): 73 return 1e-3 74 elif dtype in (np.float64, np.complex128): 75 return 1e-5 76 else: 77 return None # Fail fast for unexpected types 78 79 80class ComparisonOpTest(test.TestCase): 81 82 def _compareScalar(self, func, x, y, dtype): 83 with test_util.use_gpu(): 84 out = func( 85 ops.convert_to_tensor(np.array([x]).astype(dtype)), 86 ops.convert_to_tensor(np.array([y]).astype(dtype))) 87 ret = self.evaluate(out) 88 return ret[0] 89 90 def testScalarCompareScalar(self): 91 dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64] 92 data = [-1, 0, 1] 93 for t in dtypes: 94 for x in data: 95 for y in data: 96 with self.subTest(t=t, x=x, y=y): 97 self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y) 98 self.assertEqual( 99 self._compareScalar(math_ops.less_equal, x, y, t), x <= y) 100 self.assertEqual( 101 self._compareScalar(math_ops.greater, x, y, t), x > y) 102 self.assertEqual( 103 self._compareScalar(math_ops.greater_equal, x, y, t), x >= y) 104 self.assertEqual( 105 self._compareScalar(math_ops.equal, x, y, t), x == y) 106 self.assertEqual( 107 self._compareScalar(math_ops.not_equal, x, y, t), x != y) 108 data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j] 109 for t in [np.complex64, np.complex128]: 110 for x in data: 111 for y in data: 112 with self.subTest(t=t, x=x, y=y): 113 self.assertEqual( 114 self._compareScalar(math_ops.equal, x, y, t), x == y) 115 self.assertEqual( 116 self._compareScalar(math_ops.not_equal, x, y, t), x != y) 117 118 def _compare(self, x, y, np_func, tf_func): 119 np_ans = np_func(x, y) 120 with test_util.use_gpu(): 121 out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y)) 122 tf_ans = self.evaluate(out) 123 self.assertAllEqual(np_ans, tf_ans) 124 125 def testTensorCompareTensor(self): 126 x = np.linspace(-15, 15, 6).reshape(1, 3, 2) # pylint: disable=too-many-function-args 127 y = np.linspace(20, -10, 6).reshape(1, 3, 2) # pylint: disable=too-many-function-args 128 for t in [np.float16, np.float32, np.float64, np.int32, np.int64]: 129 with self.subTest(t=t): 130 xt = x.astype(t) 131 yt = y.astype(t) 132 self._compare(xt, yt, np.less, math_ops.less) 133 self._compare(xt, yt, np.less_equal, math_ops.less_equal) 134 self._compare(xt, yt, np.greater, math_ops.greater) 135 self._compare(xt, yt, np.greater_equal, math_ops.greater_equal) 136 self._compare(xt, yt, np.equal, math_ops.equal) 137 self._compare(xt, yt, np.not_equal, math_ops.not_equal) 138 # Complex types do not support ordering but do support equality tests. 139 for t in [np.complex64, np.complex128]: 140 with self.subTest(t=t): 141 xt = x.astype(t) 142 xt -= 1j * xt 143 yt = y.astype(t) 144 yt -= 1j * yt 145 self._compare(xt, yt, np.equal, math_ops.equal) 146 self._compare(xt, yt, np.not_equal, math_ops.not_equal) 147 148 def _compareBCast(self, xs, ys, dtype, np_func, tf_func): 149 x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs) 150 y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys) 151 if dtype in (np.complex64, np.complex128): 152 x -= 1j * x 153 y -= 1j * y 154 self._compare(x, y, np_func, tf_func) 155 self._compare(y, x, np_func, tf_func) 156 157 def _testBCastByFunc(self, np_func, tf_func, include_complex=False): 158 shapes = [ 159 ([1, 3, 2], [1]), 160 ([1, 3, 2], [2]), 161 ([1, 3, 2], [3, 2]), 162 ([1, 3, 2], [3, 1]), 163 ([1, 3, 2], [1, 3, 2]), 164 ([1, 3, 2], [2, 3, 1]), 165 ([1, 3, 2], [2, 1, 1]), 166 ([1, 3, 2], [1, 3, 1]), 167 ([2, 1, 5], [2, 3, 1]), 168 ([2, 0, 5], [2, 0, 1]), 169 ([2, 3, 0], [2, 3, 1]), 170 ] 171 dtypes = [ 172 np.float16, 173 np.float32, 174 np.float64, 175 np.int32, 176 np.int64, 177 ] 178 if include_complex: 179 dtypes.extend([np.complex64, np.complex128]) 180 181 for (xs, ys) in shapes: 182 for dtype in dtypes: 183 with self.subTest(xs=xs, ys=ys, dtype=dtype): 184 self._compareBCast(xs, ys, dtype, np_func, tf_func) 185 186 def testBCastLess(self): 187 self._testBCastByFunc(np.less, math_ops.less) 188 189 def testBCastLessEqual(self): 190 self._testBCastByFunc(np.less_equal, math_ops.less_equal) 191 192 def testBCastGreater(self): 193 self._testBCastByFunc(np.greater, math_ops.greater) 194 195 def testBCastGreaterEqual(self): 196 self._testBCastByFunc(np.greater_equal, math_ops.greater_equal) 197 198 def testBCastEqual(self): 199 self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True) 200 201 def testBCastNotEqual(self): 202 self._testBCastByFunc( 203 np.not_equal, math_ops.not_equal, include_complex=True) 204 205 def testShapeMismatch(self): 206 dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64] 207 funcs = [ 208 math_ops.less, math_ops.less_equal, math_ops.greater, 209 math_ops.greater_equal, math_ops.equal, math_ops.not_equal 210 ] 211 x = np.arange(0, 10).reshape([2, 5]) 212 y = np.arange(0, 10).reshape([5, 2]) 213 for t in dtypes: 214 for f in funcs: 215 with self.subTest(t=t, f=f): 216 with self.assertRaisesIncompatibleShapesError( 217 (ValueError, errors.InvalidArgumentError)): 218 f(x.astype(t), y.astype(t)) 219 220 221class LogicalOpTest(test.TestCase): 222 223 def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False): 224 np_ans = np_func(x, y) 225 with test_util.device(use_gpu=use_gpu): 226 inx = ops.convert_to_tensor(x) 227 iny = ops.convert_to_tensor(y) 228 out = tf_func(inx, iny) 229 tf_val = self.evaluate(out) 230 self.assertEqual(out.dtype, dtypes_lib.bool) 231 self.assertAllEqual(np_ans, tf_val) 232 self.assertShapeEqual(np_ans, out) 233 234 def _not(self, x, use_gpu=False): 235 np_ans = np.logical_not(x) 236 with test_util.device(use_gpu=use_gpu): 237 out = math_ops.logical_not(ops.convert_to_tensor(x)) 238 tf_val = self.evaluate(out) 239 self.assertEqual(out.dtype, dtypes_lib.bool) 240 self.assertAllEqual(np_ans, tf_val) 241 self.assertShapeEqual(np_ans, out) 242 243 def testScalar(self): 244 data = [np.array([True]), np.array([False])] 245 for use_gpu in [True, False]: 246 for x in data: 247 with self.subTest(use_gpu=use_gpu, x=x): 248 self._not(x, use_gpu) 249 for x in data: 250 for y in data: 251 with self.subTest(use_gpu=use_gpu, x=x, y=y): 252 self._compareBinary(x, y, np.logical_and, math_ops.logical_and, 253 use_gpu) 254 self._compareBinary(x, y, np.logical_or, math_ops.logical_or, 255 use_gpu) 256 self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, 257 use_gpu) 258 259 def testTensor(self): 260 x = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 261 y = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 262 for use_gpu in [True, False]: 263 with self.subTest(use_gpu=use_gpu): 264 self._not(x, use_gpu) 265 self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu) 266 self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu) 267 self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu) 268 269 def testBCast(self): 270 shapes = [ 271 ([1, 3, 2], [1]), 272 ([1, 3, 2], [2]), 273 ([1, 3, 2], [3, 2]), 274 ([1, 3, 2], [3, 1]), 275 ([1, 3, 2], [1, 3, 2]), 276 ([1, 3, 2], [2, 3, 1]), 277 ([1, 3, 2], [2, 1, 1]), 278 ([1, 3, 2], [1, 3, 1]), 279 ([2, 1, 5], [2, 3, 1]), 280 ([2, 0, 5], [2, 0, 1]), 281 ([2, 3, 0], [2, 3, 1]), 282 ] 283 for (xs, ys) in shapes: 284 x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool_).reshape(xs) 285 y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool_).reshape(ys) 286 for use_gpu in [True, False]: 287 with self.subTest(xs=xs, ys=ys, use_gpu=use_gpu): 288 self._compareBinary(x, y, np.logical_and, math_ops.logical_and, 289 use_gpu) 290 self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu) 291 self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, 292 use_gpu) 293 294 @test_util.run_deprecated_v1 295 def testShapeMismatch(self): 296 x = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 297 y = np.random.randint(0, 2, 6).astype(np.bool_).reshape(3, 2, 1) # pylint: disable=too-many-function-args 298 for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]: 299 with self.subTest(f=f): 300 with self.assertRaisesWithPredicateMatch( 301 ValueError, lambda e: "Dimensions must" in str(e)): 302 f(x, y) 303 304 @test_util.run_deprecated_v1 305 def testUsingAsPythonValueFails(self): 306 # Ensure that we raise an error when the user attempts to treat a 307 # `Tensor` as a Python `bool`. 308 b = constant_op.constant(False) 309 with self.assertRaises(TypeError): 310 if b: 311 pass 312 313 x = constant_op.constant(3) 314 y = constant_op.constant(4) 315 with self.assertRaises(TypeError): 316 if x > y: 317 pass 318 319 z = constant_op.constant(7) 320 321 # The chained comparison should fail because Python computes `x < 322 # y` and short-circuits the comparison with `z` if it is `False`. 323 with self.assertRaises(TypeError): 324 _ = x < y < z 325 326 327class SelectOpTest(test.TestCase): 328 329 def _compare(self, fn, c, x, y, use_gpu): 330 np_ans = np.where(c, x, y) 331 with test_util.device(use_gpu=use_gpu): 332 out = fn(c, x, y) 333 tf_ans = self.evaluate(out) 334 self.assertAllEqual(np_ans, tf_ans) 335 self.assertShapeEqual(np_ans, out) 336 337 def _compareGradientX(self, 338 fn, 339 c, 340 x, 341 y, 342 numeric_gradient_type=None, 343 x_init_value=None): 344 with self.cached_session(): 345 inx = ops.convert_to_tensor(x) 346 iny = ops.convert_to_tensor(y) 347 out = fn(c, inx, iny) 348 s = list(np.shape(c)) 349 if x_init_value is None: 350 x_init_value = x 351 if x.shape != y.shape: 352 x_init_value = np.broadcast_to(y, x.shape) 353 jacob_t, jacob_n = gradient_checker.compute_gradient( 354 inx, s, out, s, x_init_value=x_init_value) 355 if numeric_gradient_type is not None: 356 xf = x.astype(numeric_gradient_type) 357 yf = y.astype(numeric_gradient_type) 358 inxf = ops.convert_to_tensor(xf) 359 inyf = ops.convert_to_tensor(yf) 360 outf = fn(c, inxf, inyf) 361 _, jacob_n = gradient_checker.compute_gradient( 362 inxf, s, outf, s, x_init_value=xf) 363 jacob_n = jacob_n.astype(x.dtype) 364 if x.dtype == np.float16: 365 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 366 elif x.dtype == np.float32: 367 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 368 elif x.dtype == np.float64: 369 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 370 371 def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None): 372 with self.cached_session(): 373 inx = ops.convert_to_tensor(x) 374 iny = ops.convert_to_tensor(y) 375 out = fn(c, inx, iny) 376 s = list(np.shape(c)) 377 jacob_t, jacob_n = gradient_checker.compute_gradient( 378 iny, s, out, s, x_init_value=x, delta=1.0) 379 if numeric_gradient_type is not None: 380 xf = x.astype(numeric_gradient_type) 381 yf = y.astype(numeric_gradient_type) 382 inxf = ops.convert_to_tensor(xf) 383 inyf = ops.convert_to_tensor(yf) 384 outf = fn(c, inxf, inyf) 385 _, jacob_n = gradient_checker.compute_gradient( 386 inyf, s, outf, s, x_init_value=yf) 387 jacob_n = jacob_n.astype(x.dtype) 388 if x.dtype == np.float16: 389 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 390 elif x.dtype == np.float32: 391 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 392 elif x.dtype == np.float64: 393 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 394 395 def _testScalar(self, fn): 396 c = True 397 x = np.random.rand(1, 3, 2) * 100 398 y = np.random.rand(1, 3, 2) * 100 399 for t in [ 400 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 401 np.complex128 402 ]: 403 with self.subTest(t=t): 404 xt = x.astype(t) 405 yt = y.astype(t) 406 self._compare(fn, c, xt, yt, use_gpu=False) 407 if t in [np.float16, np.float32, np.float64]: 408 self._compare(fn, c, xt, yt, use_gpu=True) 409 410 def testScalar(self): 411 self._testScalar(array_ops.where) 412 self._testScalar(array_ops.where_v2) 413 414 def _testScalarBroadcast(self, fn, c, x, y): 415 for t in [ 416 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 417 np.complex128 418 ]: 419 with self.subTest(t=t): 420 xt = x.astype(t) 421 yt = y.astype(t) 422 self._compare(fn, c, xt, yt, use_gpu=False) 423 if t in [np.float16, np.float32, np.float64]: 424 self._compare(fn, c, xt, yt, use_gpu=True) 425 426 def testScalarBroadcast(self): 427 c = True 428 # where_v2 only 429 x = np.random.rand(1, 3, 2) * 100 430 y = np.random.rand(1, 1, 1) * 100 431 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 432 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 433 x = np.random.rand(1, 3, 2) * 100 434 y = np.random.rand(1, 3, 1) * 100 435 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 436 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 437 x = np.random.rand(1, 3, 2) * 100 438 y = np.random.rand(1, 1, 2) * 100 439 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 440 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 441 x = np.random.rand(1, 3, 2) * 100 442 y = np.random.rand(1, 1) * 100 443 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 444 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 445 x = np.random.rand(1, 3, 2) * 100 446 y = np.random.rand(1) * 100 447 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 448 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 449 x = np.random.rand(1, 3, 2) * 100 450 y = np.random.rand(1, 2) * 100 451 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 452 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 453 x = np.random.rand(1, 3, 2) * 100 454 y = np.random.rand(3, 2) * 100 455 self._testScalarBroadcast(array_ops.where_v2, c, x, y) 456 self._testScalarBroadcast(array_ops.where_v2, c, y, x) 457 458 def _testBasic(self, fn): 459 c = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 460 x = np.random.rand(1, 3, 2) * 100 461 y = np.random.rand(1, 3, 2) * 100 462 for t in [ 463 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 464 np.complex128 465 ]: 466 with self.subTest(t=t): 467 xt = x.astype(t) 468 yt = y.astype(t) 469 self._compare(fn, c, xt, yt, use_gpu=False) 470 if t in [np.float16, np.float32, np.float64]: 471 self._compare(fn, c, xt, yt, use_gpu=True) 472 473 def testBasic(self): 474 self._testBasic(array_ops.where) 475 self._testBasic(array_ops.where_v2) 476 477 def _testBasicBroadcast(self, fn, c, x, y): 478 for t in [ 479 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 480 np.complex128 481 ]: 482 with self.subTest(t=t): 483 xt = x.astype(t) 484 yt = y.astype(t) 485 self._compare(fn, c, xt, yt, use_gpu=False) 486 if t in [np.float16, np.float32, np.float64]: 487 self._compare(fn, c, xt, yt, use_gpu=True) 488 489 def testBasicBroadcast(self): 490 c0 = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 491 c1 = np.random.randint(0, 2, 2).astype(np.bool_).reshape(1, 1, 2) # pylint: disable=too-many-function-args 492 c2 = np.random.randint(0, 2, 3).astype(np.bool_).reshape(1, 3, 1) # pylint: disable=too-many-function-args 493 c3 = np.random.randint(0, 2, 1).astype(np.bool_).reshape(1, 1, 1) # pylint: disable=too-many-function-args 494 for c in [c0, c1, c2, c3]: 495 # where_v2 only 496 with self.subTest(c=c): 497 x = np.random.rand(1, 3, 2) * 100 498 y = np.random.rand(1, 1, 1) * 100 499 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 500 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 501 x = np.random.rand(1, 3, 2) * 100 502 y = np.random.rand(1, 3, 1) * 100 503 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 504 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 505 x = np.random.rand(1, 3, 2) * 100 506 y = np.random.rand(1, 1, 2) * 100 507 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 508 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 509 x = np.random.rand(1, 3, 2) * 100 510 y = np.random.rand(1, 1) * 100 511 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 512 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 513 x = np.random.rand(1, 3, 2) * 100 514 y = np.random.rand(1) * 100 515 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 516 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 517 x = np.random.rand(1, 3, 2) * 100 518 y = np.random.rand(1, 2) * 100 519 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 520 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 521 x = np.random.rand(1, 3, 2) * 100 522 y = np.random.rand(3, 2) * 100 523 self._testBasicBroadcast(array_ops.where_v2, c, x, y) 524 self._testBasicBroadcast(array_ops.where_v2, c, y, x) 525 526 def _testGradients(self, fn): 527 c = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 528 x = np.random.rand(1, 3, 2) * 100 529 y = np.random.rand(1, 3, 2) * 100 530 for t in [np.float16, np.float32, np.float64]: 531 with self.subTest(t=t): 532 xt = x.astype(t) 533 yt = y.astype(t) 534 if t == np.float16: 535 # Compare fp16 theoretical gradients to fp32 numerical gradients, 536 # since fp16 numerical gradients are too imprecise unless great 537 # care is taken with choosing the inputs and the delta. This is 538 # a weaker check (in particular, it does not test the op itself, 539 # only its gradient), but it's much better than nothing. 540 self._compareGradientX(fn, c, xt, yt, np.float64) 541 self._compareGradientY(fn, c, xt, yt, np.float64) 542 else: 543 self._compareGradientX(fn, c, xt, yt) 544 self._compareGradientY(fn, c, xt, yt) 545 546 @test_util.run_deprecated_v1 547 def testGradients(self): 548 self._testGradients(array_ops.where) 549 self._testGradients(array_ops.where_v2) 550 551 @test_util.run_deprecated_v1 552 def testGradientsBroadcast(self): 553 c = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 554 for t in [np.float32, np.float64]: 555 # where_v2 only 556 with self.subTest(t=t): 557 x = np.random.rand(1, 3, 2) * 100 558 y = np.random.rand(1, 1, 1) * 100 559 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 560 x = np.random.rand(1, 3, 2) * 100 561 y = np.random.rand(1, 3, 1) * 100 562 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 563 x = np.random.rand(1, 3, 2) * 100 564 y = np.random.rand(1, 1, 2) * 100 565 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 566 x = np.random.rand(1, 3, 2) * 100 567 y = np.random.rand(1, 1) * 100 568 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 569 x = np.random.rand(1, 3, 2) * 100 570 y = np.random.rand(1) * 100 571 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 572 x = np.random.rand(1, 3, 2) * 100 573 y = np.random.rand(1, 2) * 100 574 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 575 x = np.random.rand(1, 3, 2) * 100 576 y = np.random.rand(3, 2) * 100 577 self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) 578 579 def _testShapeMismatch(self, fn): 580 c = np.random.randint(0, 2, 6).astype(np.bool_).reshape(1, 3, 2) # pylint: disable=too-many-function-args 581 x = np.random.rand(1, 3, 2) * 100 582 y = np.random.rand(2, 5, 3) * 100 583 for t in [ 584 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 585 np.complex128 586 ]: 587 with self.subTest(t=t): 588 xt = x.astype(t) 589 yt = y.astype(t) 590 with self.assertRaises(ValueError): 591 fn(c, xt, yt) 592 593 @test_util.run_deprecated_v1 594 def testShapeMismatch(self): 595 self._testShapeMismatch(array_ops.where) 596 self._testShapeMismatch(array_ops.where_v2) 597 598 def _testEmptyTensor(self, fn): 599 c = np.random.randint(0, 3, 0).astype(np.bool_).reshape(1, 3, 0) # pylint: disable=too-many-function-args 600 x = np.random.rand(1, 3, 0) * 100 601 y = np.random.rand(1, 3, 0) * 100 602 z_expected = np.zeros((1, 3, 0), dtype=np.float32) 603 with self.cached_session(): 604 xt = x.astype(np.float32) 605 yt = y.astype(np.float32) 606 z = fn(c, xt, yt).eval() 607 self.assertAllEqual(z_expected, z) 608 609 @test_util.run_deprecated_v1 610 def testEmptyTensor(self): 611 self._testEmptyTensor(array_ops.where) 612 self._testEmptyTensor(array_ops.where_v2) 613 614 def _testNan(self, fn): 615 with self.cached_session(): 616 for c in False, True: 617 for a in 7.0, np.nan: 618 for b in 5.0, np.nan: 619 with self.subTest(c=c, a=a, b=b): 620 x = fn(c, a, b).eval() 621 y = a if c else b 622 self.assertEqual(np.isnan(x), np.isnan(y)) 623 624 @test_util.run_deprecated_v1 625 def testNan(self): 626 """Verify that nans don't propagate where they shouldn't.""" 627 self._testNan(array_ops.where) 628 self._testNan(array_ops.where_v2) 629 630 631class BatchSelectOpTest(test.TestCase): 632 """Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.""" 633 634 def _compare(self, c, x, y, use_gpu): 635 np_ans = np.dstack( 636 [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose( 637 [2, 0, 1]) 638 with test_util.device(use_gpu=use_gpu): 639 out = array_ops.where(c, x, y) 640 tf_ans = self.evaluate(out) 641 self.assertAllEqual(np_ans, tf_ans) 642 self.assertShapeEqual(np_ans, out) 643 644 def _compareGradientX(self, c, x, y, numeric_gradient_type=None): 645 with self.cached_session(): 646 inx = ops.convert_to_tensor(x) 647 iny = ops.convert_to_tensor(y) 648 out = array_ops.where(c, inx, iny) 649 s = list(np.shape(x)) 650 jacob_t, jacob_n = gradient_checker.compute_gradient( 651 inx, s, out, s, x_init_value=x) 652 if numeric_gradient_type is not None: 653 xf = x.astype(numeric_gradient_type) 654 yf = y.astype(numeric_gradient_type) 655 inxf = ops.convert_to_tensor(xf) 656 inyf = ops.convert_to_tensor(yf) 657 outf = array_ops.where(c, inxf, inyf) 658 _, jacob_n = gradient_checker.compute_gradient( 659 inxf, s, outf, s, x_init_value=xf) 660 jacob_n = jacob_n.astype(x.dtype) 661 if x.dtype == np.float16: 662 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 663 elif x.dtype == np.float32: 664 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 665 elif x.dtype == np.float64: 666 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 667 668 def _compareGradientY(self, c, x, y, numeric_gradient_type=None): 669 with self.cached_session(): 670 inx = ops.convert_to_tensor(x) 671 iny = ops.convert_to_tensor(y) 672 out = array_ops.where(c, inx, iny) 673 s = list(np.shape(x)) 674 jacob_t, jacob_n = gradient_checker.compute_gradient( 675 iny, s, out, s, x_init_value=y) 676 if numeric_gradient_type is not None: 677 xf = x.astype(numeric_gradient_type) 678 yf = y.astype(numeric_gradient_type) 679 inxf = ops.convert_to_tensor(xf) 680 inyf = ops.convert_to_tensor(yf) 681 outf = array_ops.where(c, inxf, inyf) 682 _, jacob_n = gradient_checker.compute_gradient( 683 inyf, s, outf, s, x_init_value=yf) 684 jacob_n = jacob_n.astype(x.dtype) 685 if x.dtype == np.float16: 686 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 687 elif x.dtype == np.float32: 688 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 689 elif x.dtype == np.float64: 690 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 691 692 def testBasic(self): 693 c = np.random.randint(0, 2, 16).astype(np.bool_) 694 x = np.random.rand(16, 2, 8) * 100 695 y = np.random.rand(16, 2, 8) * 100 696 for t in [ 697 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 698 np.complex128 699 ]: 700 with self.subTest(t=t): 701 xt = x.astype(t) 702 yt = y.astype(t) 703 self._compare(c, xt, yt, use_gpu=False) 704 if t in [np.float16, np.float32, np.float64]: 705 self._compare(c, xt, yt, use_gpu=True) 706 707 @test_util.run_deprecated_v1 708 def testGradients(self): 709 c = np.random.randint(0, 2, 16).astype(np.bool_) 710 x = np.random.rand(16, 2, 8) * 100 711 y = np.random.rand(16, 2, 8) * 100 712 for t in [np.float16, np.float32, np.float64]: 713 with self.subTest(t=t): 714 xt = x.astype(t) 715 yt = y.astype(t) 716 if t == np.float16: 717 # Compare fp16 theoretical gradients to fp32 numerical gradients, 718 # since fp16 numerical gradients are too imprecise unless great 719 # care is taken with choosing the inputs and the delta. This is 720 # a weaker check (in particular, it does not test the op itself, 721 # only its gradient), but it's much better than nothing. 722 self._compareGradientX(c, xt, yt, np.float64) 723 self._compareGradientY(c, xt, yt, np.float64) 724 else: 725 self._compareGradientX(c, xt, yt) 726 self._compareGradientY(c, xt, yt) 727 728 @test_util.run_deprecated_v1 729 def testShapeMismatch(self): 730 c = np.random.randint(0, 2, 8).astype(np.bool_) 731 x = np.random.rand(16, 3, 2) * 100 732 y = np.random.rand(16, 3, 2) * 100 733 for t in [ 734 np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, 735 np.complex128 736 ]: 737 with self.subTest(t=t): 738 xt = x.astype(t) 739 yt = y.astype(t) 740 with self.assertRaises(ValueError): 741 array_ops.where(c, xt, yt) 742 743 744@test_util.with_eager_op_as_function 745class MinMaxOpTest(test.TestCase): 746 747 def _compare(self, x, y, use_gpu): 748 np_min, np_max = np.minimum(x, y), np.maximum(x, y) 749 with test_util.device(use_gpu=use_gpu): 750 inx = ops.convert_to_tensor(x) 751 iny = ops.convert_to_tensor(y) 752 omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny) 753 tf_min, tf_max = self.evaluate([omin, omax]) 754 self.assertAllEqual(np_min, tf_min) 755 self.assertAllEqual(np_max, tf_max) 756 757 def testBasic(self): 758 x = np.random.rand(1, 3, 2) * 100. 759 y = np.random.rand(1, 3, 2) * 100. 760 for t in [np.float16, np.float32, np.float64, np.int8, np.uint8, np.int16, 761 np.uint16, np.int32, np.uint32, np.int64, np.uint64]: 762 with self.subTest(t=t): 763 self._compare(x.astype(t), y.astype(t), use_gpu=False) 764 self._compare(x.astype(t), y.astype(t), use_gpu=True) 765 766 # When eager_op_as_function mode is enabled xla auto-clustering kicks in. 767 # By default xla enables fast min_max computations which do not propagate NaN. 768 # TODO(b/205140614): remove decorators once TF and XLA behaviour are the same. 769 @test_util.set_xla_env_flag(flag="--xla_cpu_enable_fast_min_max=false") 770 @test_util.set_xla_env_flag(flag="--xla_gpu_enable_fast_min_max=false") 771 def testNaNPropagation(self): 772 x = np.array([1., np.nan, 1., np.nan], dtype=np.float64) 773 y = np.array([1., 1., np.nan, np.nan], dtype=np.float64) 774 for t in [np.float16, np.float32, np.float64]: 775 with self.subTest(t=t): 776 self._compare(x.astype(t), y.astype(t), use_gpu=False) 777 self._compare(x.astype(t), y.astype(t), use_gpu=True) 778 779 def testDifferentShapes(self): 780 x = np.random.rand(1, 3, 2) * 100. 781 y = np.random.rand(2) * 100. # should broadcast 782 for t in [np.float16, np.float32, np.float64, np.int32, np.int64]: 783 with self.subTest(t=t): 784 self._compare(x.astype(t), y.astype(t), use_gpu=False) 785 self._compare(x.astype(t), y.astype(t), use_gpu=True) 786 787 def testScalar(self): 788 x = np.random.rand(1, 3, 2) * 100. 789 y = np.random.rand(1).item() * 100. # should broadcast 790 # dropped np.float64, int64 because TF automatically converts to 32 bit 791 for t in [np.float32, np.int32]: 792 with self.subTest(t=t): 793 self._compare(x.astype(t), t(y), use_gpu=False) 794 self._compare(x.astype(t), t(y), use_gpu=True) 795 796 def _compareGradientX(self, func, x, y): 797 with self.cached_session(): 798 inx = ops.convert_to_tensor(x) 799 iny = ops.convert_to_tensor(y) 800 out = func(inx, iny) 801 s = list(np.shape(x)) 802 jacob_t, jacob_n = gradient_checker.compute_gradient( 803 inx, s, out, s, x_init_value=x) 804 if x.dtype == np.float16: 805 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 806 elif x.dtype == np.float32: 807 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 808 elif x.dtype == np.float64: 809 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 810 811 def _compareGradientY(self, func, x, y): 812 with self.cached_session(): 813 inx = ops.convert_to_tensor(x) 814 iny = ops.convert_to_tensor(y) 815 out = func(inx, iny) 816 s = list(np.shape(x)) 817 jacob_t, jacob_n = gradient_checker.compute_gradient( 818 iny, s, out, s, x_init_value=y) 819 if x.dtype == np.float16: 820 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 821 elif x.dtype == np.float32: 822 self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) 823 elif x.dtype == np.float64: 824 self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) 825 826 @test_util.run_deprecated_v1 827 def testGradients(self): 828 x = np.random.rand(1, 3, 2) * 100. 829 # ensure x != y 830 y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1 831 self._compareGradientX(math_ops.maximum, x, y) 832 self._compareGradientY(math_ops.maximum, x, y) 833 self._compareGradientX(math_ops.minimum, x, y) 834 self._compareGradientY(math_ops.minimum, x, y) 835 836 837class MathOpsOverloadTest(test.TestCase): 838 839 def _computeTensorAndLiteral(self, x, y, dtype, func): 840 with test_util.force_cpu(): 841 inx = ops.convert_to_tensor(x, dtype=dtype) 842 z = func(inx, y) # Should use __add__, __sub__, etc. 843 return self.evaluate(z) 844 845 def _computeLiteralAndTensor(self, x, y, dtype, func): 846 with test_util.force_cpu(): 847 iny = ops.convert_to_tensor(y, dtype=dtype) 848 z = func(x, iny) # Should use __radd__, __rsub__, etc. 849 return self.evaluate(z) 850 851 def _compareBinary(self, x, y, dtype, np_func, tf_func): 852 # astype and assertAllClose do not properly handle bfloat16 values 853 np_ans = np_func(x, y) 854 if np_func != np.true_divide: 855 # for true_divide the result is a float, event for integer args. 856 np_ans = np_ans.astype(np.float32 if dtype == dtypes_lib.bfloat16 857 else dtype.as_numpy_dtype) 858 rtol = 1e-2 if dtype in (dtypes_lib.bfloat16, dtypes_lib.float16) else 1e-6 859 self.assertAllClose(np_ans, 860 self._computeTensorAndLiteral(x, y, dtype, tf_func), 861 rtol=rtol) 862 self.assertAllClose(np_ans, 863 self._computeLiteralAndTensor(x, y, dtype, tf_func), 864 rtol=rtol) 865 866 def _compareUnary(self, x, dtype, np_func, tf_func): 867 np_ans = np_func(x).astype(dtype.as_numpy_dtype) 868 with test_util.force_cpu(): 869 self.assertAllClose( 870 np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype)))) 871 872 def testOverload(self): 873 dtypes = [ 874 dtypes_lib.float16, 875 dtypes_lib.float32, 876 dtypes_lib.float64, 877 dtypes_lib.bfloat16, 878 dtypes_lib.uint8, 879 dtypes_lib.uint16, 880 dtypes_lib.uint32, 881 dtypes_lib.uint64, 882 dtypes_lib.int8, 883 dtypes_lib.int16, 884 dtypes_lib.int32, 885 dtypes_lib.int64, 886 dtypes_lib.complex64, 887 dtypes_lib.complex128, 888 ] 889 funcs = [ 890 (np.add, _ADD), 891 (np.subtract, _SUB), 892 (np.multiply, _MUL), 893 (np.power, _POW), 894 (np.true_divide, _TRUEDIV), 895 (np.floor_divide, _FLOORDIV), 896 (np.mod, _MOD), 897 ] 898 for dtype in dtypes: 899 for np_func, tf_func in funcs: 900 with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func): 901 if dtype in (dtypes_lib.complex64, 902 dtypes_lib.complex128) and tf_func in (_FLOORDIV, _MOD): 903 continue # floordiv makes no sense for complex 904 if dtype in (dtypes_lib.uint8, dtypes_lib.uint16, dtypes_lib.uint32, 905 dtypes_lib.uint64) and tf_func == _POW: 906 continue # power not supported for unsigned types 907 self._compareBinary(10, 3, dtype, np_func, tf_func) 908 909 def testOverloadComparisons(self): 910 dtypes = [ 911 dtypes_lib.float16, 912 dtypes_lib.float32, 913 dtypes_lib.float64, 914 dtypes_lib.uint8, 915 dtypes_lib.uint16, 916 dtypes_lib.uint32, 917 dtypes_lib.uint64, 918 dtypes_lib.int8, 919 dtypes_lib.int16, 920 dtypes_lib.int32, 921 dtypes_lib.int64, 922 ] 923 funcs = [ 924 (np.less, _LT), 925 (np.less_equal, _LE), 926 (np.greater, _GT), 927 (np.greater_equal, _GE), 928 ] 929 for dtype in dtypes: 930 for np_func, tf_func in funcs: 931 with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func): 932 self._compareBinary(10, 5, dtype, np_func, tf_func) 933 logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR), 934 (np.logical_xor, _XOR), (np.equal, math_ops.equal), 935 (np.not_equal, math_ops.not_equal)] 936 for np_func, tf_func in logical_funcs: 937 with self.subTest(np_func=np_func, tf_func=tf_func): 938 self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func) 939 self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func) 940 self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func) 941 self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func) 942 self._compareBinary([True, True, False, False], 943 [True, False, True, False], dtypes_lib.bool, 944 np_func, tf_func) 945 self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV) 946 self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV) 947 self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV) 948 949 950class IsFiniteInfNanTest(test.TestCase): 951 952 def _compare(self, x, use_gpu): 953 with test_util.device(use_gpu=use_gpu): 954 inx = ops.convert_to_tensor(x) 955 ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf( 956 inx), math_ops.is_nan(inx) 957 tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan]) 958 if x.dtype == dtypes_lib.bfloat16.as_numpy_dtype: 959 # Numpy will implicitly convert bfloat16 value to float16, so we cast to 960 # float32 to avoid this. 961 x = x.astype(np.float32) 962 np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x) 963 self.assertAllEqual(np_inf, tf_inf) 964 self.assertAllEqual(np_nan, tf_nan) 965 self.assertAllEqual(np_finite, tf_finite) 966 self.assertShapeEqual(np_inf, oinf) 967 self.assertShapeEqual(np_nan, onan) 968 self.assertShapeEqual(np_finite, ofinite) 969 970 def _testDtype(self, dtype): 971 if dtype != dtypes_lib.bfloat16.as_numpy_dtype: 972 fi = np.finfo(dtype) 973 data = np.array([ 974 0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf, 975 np.inf, np.nan 976 ]).astype(dtype) 977 else: 978 # np.finfo does not support bfloat16 979 data = np.array([ 980 0, -1, 1, 0.01, -0.01, -3.3895e+38, 3.3895e+38, -np.inf, np.inf, 981 np.nan 982 ]).astype(dtype) 983 self._compare(data, use_gpu=False) 984 self._compare(data, use_gpu=True) 985 986 def testHalf(self): 987 self._testDtype(np.float16) 988 989 def testFloat(self): 990 self._testDtype(np.float32) 991 992 def testDouble(self): 993 self._testDtype(np.float64) 994 995 def testBfloat16(self): 996 self._testDtype(dtypes_lib.bfloat16.as_numpy_dtype) 997 998 def testSqrt(self): 999 for dtype in [np.float16, np.float32, np.float64]: 1000 fi = np.finfo(dtype) 1001 for size in [1, 3, 4, 7, 8, 63, 64, 65]: 1002 # For float32 Eigen uses Carmack's fast vectorized sqrt algorithm. 1003 # It is not accurate for very large arguments, so we test for 1004 # fi.max/100 instead of fi.max here. 1005 for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]: 1006 with self.subTest(dtype=dtype, size=size, value=value): 1007 x = np.full((size,), value, dtype=dtype) 1008 np_y = np.sqrt(x) 1009 np_nan = np.isnan(np_y) 1010 with test_util.use_gpu(): 1011 tf_y = math_ops.sqrt(x) 1012 tf_nan = math_ops.is_nan(tf_y) 1013 if value < 0: 1014 self.assertAllEqual(np_nan, self.evaluate(tf_nan)) 1015 else: 1016 self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y)) 1017 1018 1019class RoundingTest(test.TestCase): 1020 1021 def _compare_values(self, x, y=None): 1022 y = np.rint(x) if y is None else np.asarray(y) 1023 1024 tf_rint = math_ops.rint(x) 1025 np_rint = self.evaluate(tf_rint) 1026 1027 self.assertAllEqual(y, np_rint) 1028 self.assertShapeEqual(y, tf_rint) 1029 1030 def _compare(self, x): 1031 np_floor, np_ceil = np.floor(x), np.ceil(x) 1032 1033 inx = ops.convert_to_tensor(x) 1034 ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx) 1035 tf_floor, tf_ceil = self.evaluate([ofloor, oceil]) 1036 1037 self.assertAllEqual(np_floor, tf_floor) 1038 self.assertAllEqual(np_ceil, tf_ceil) 1039 self.assertShapeEqual(np_floor, ofloor) 1040 self.assertShapeEqual(np_ceil, oceil) 1041 1042 def _testDtype(self, dtype): 1043 data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype) 1044 self._compare(data) 1045 # TODO(reedwm): rint op is not supported for float16 and bfloat16 1046 if dtype in (np.float16, dtypes_lib.bfloat16.as_numpy_dtype): 1047 return 1048 self._compare_values(data) 1049 x = [0.5, 0.5000001] 1050 y = [0.0, 1.0] 1051 self._compare_values(x, y=y) 1052 1053 # numpy example 1054 x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0] 1055 y = [-2., -2., -0., 0., 2., 2., 2.] 1056 self._compare_values(x, y=y) 1057 1058 def testTypes(self): 1059 for dtype in [np.float16, np.float32, np.float64, 1060 dtypes_lib.bfloat16.as_numpy_dtype]: 1061 with self.subTest(dtype=dtype): 1062 self._testDtype(dtype) 1063 1064 1065class ComplexMakeRealImagTest(test.TestCase): 1066 1067 def _compareMake(self, real, imag, use_gpu): 1068 np_ans = real + (1j) * imag 1069 1070 with test_util.device(use_gpu=use_gpu): 1071 real = ops.convert_to_tensor(real) 1072 imag = ops.convert_to_tensor(imag) 1073 tf_ans = math_ops.complex(real, imag) 1074 out = self.evaluate(tf_ans) 1075 1076 self.assertAllEqual(np_ans, out) 1077 self.assertShapeEqual(np_ans, tf_ans) 1078 1079 def testMake(self): 1080 real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32) 1081 imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32) 1082 for use_gpu in [False, True]: 1083 with self.subTest(use_gpu=use_gpu): 1084 self._compareMake(real, imag, use_gpu) 1085 self._compareMake(real, 12.0, use_gpu) 1086 self._compareMake(23.0, imag, use_gpu) 1087 1088 def testRealImagNumericType(self): 1089 for use_gpu in [True, False]: 1090 for value in [1., 1j, 1. + 1j]: 1091 with self.subTest(use_gpu=use_gpu, value=value): 1092 np_real, np_imag = np.real(value), np.imag(value) 1093 with test_util.device(use_gpu=use_gpu): 1094 tf_real = math_ops.real(value) 1095 tf_imag = math_ops.imag(value) 1096 self.assertAllEqual(np_real, self.evaluate(tf_real)) 1097 self.assertAllEqual(np_imag, self.evaluate(tf_imag)) 1098 1099 def _compareRealImag(self, cplx, use_gpu): 1100 np_real, np_imag = np.real(cplx), np.imag(cplx) 1101 np_zeros = np_real * 0 1102 1103 with test_util.device(use_gpu=use_gpu): 1104 inx = ops.convert_to_tensor(cplx) 1105 tf_real = math_ops.real(inx) 1106 tf_imag = math_ops.imag(inx) 1107 tf_real_real = math_ops.real(tf_real) 1108 tf_imag_real = math_ops.imag(tf_real) 1109 self.assertAllEqual(np_real, self.evaluate(tf_real)) 1110 self.assertAllEqual(np_imag, self.evaluate(tf_imag)) 1111 self.assertAllEqual(np_real, self.evaluate(tf_real_real)) 1112 self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real)) 1113 1114 def testRealImag64(self): 1115 real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32) 1116 imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32) 1117 cplx = real + 1j * imag 1118 self._compareRealImag(cplx, use_gpu=False) 1119 self._compareRealImag(cplx, use_gpu=True) 1120 1121 def testRealImag128(self): 1122 real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64) 1123 imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64) 1124 cplx = real + 1j * imag 1125 self._compareRealImag(cplx, use_gpu=False) 1126 self._compareRealImag(cplx, use_gpu=True) 1127 1128 def _compareAngle(self, cplx, use_gpu): 1129 np_angle = np.angle(cplx) 1130 1131 with test_util.device(use_gpu=use_gpu): 1132 inx = ops.convert_to_tensor(cplx) 1133 tf_angle = math_ops.angle(inx) 1134 tf_angle_val = self.evaluate(tf_angle) 1135 1136 self.assertAllClose(np_angle, tf_angle_val) 1137 self.assertShapeEqual(np_angle, tf_angle) 1138 1139 def testAngle(self): 1140 mag = np.random.rand(10).astype(np.float32) 1141 angle = (2 * np.pi * np.arange(10) / 10.).astype(np.float32) 1142 cplx = mag * np.exp(1j * angle) 1143 cplx = np.append(cplx, [1., 1.j, -1., -1.j]) 1144 self._compareAngle(cplx, use_gpu=False) 1145 self._compareAngle(cplx, use_gpu=True) 1146 real = (np.arange(-2, 2) / 2.).astype(np.float64) 1147 self._compareAngle(real, use_gpu=False) 1148 self._compareAngle(real, use_gpu=True) 1149 1150 def testAngle64(self): 1151 mag = np.random.rand(10).astype(np.float64) 1152 angle = (2 * np.pi * np.arange(10) / 100.).astype(np.float64) 1153 cplx = mag * np.exp(1j * angle) 1154 cplx = np.append(cplx, [1., 1.j, -1., -1.j]) 1155 self._compareAngle(cplx, use_gpu=False) 1156 self._compareAngle(cplx, use_gpu=True) 1157 real = (np.arange(-2, 2) / 2.).astype(np.float64) 1158 self._compareAngle(real, use_gpu=False) 1159 self._compareAngle(real, use_gpu=True) 1160 1161 @test_util.run_deprecated_v1 1162 def testRealReal(self): 1163 for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32, 1164 dtypes_lib.float64): 1165 with self.subTest(dtype=dtype): 1166 x = array_ops.placeholder(dtype) 1167 y = math_ops.real(x) 1168 self.assertEqual(x, y) 1169 1170 def _compareConj(self, cplx, use_gpu): 1171 np_ans = np.conj(cplx) 1172 with test_util.device(use_gpu=use_gpu): 1173 inx = ops.convert_to_tensor(cplx) 1174 tf_conj = math_ops.conj(inx) 1175 tf_ans = self.evaluate(tf_conj) 1176 self.assertAllEqual(np_ans, tf_ans) 1177 self.assertShapeEqual(np_ans, tf_conj) 1178 1179 def testConj64(self): 1180 real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32) 1181 imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32) 1182 cplx = real + 1j * imag 1183 self._compareConj(cplx, use_gpu=False) 1184 self._compareConj(cplx, use_gpu=True) 1185 1186 def testConj128(self): 1187 real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64) 1188 imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64) 1189 cplx = real + 1j * imag 1190 self._compareConj(cplx, use_gpu=False) 1191 self._compareConj(cplx, use_gpu=True) 1192 1193 @test_util.run_deprecated_v1 1194 def testConjReal(self): 1195 for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16, 1196 dtypes_lib.float32, dtypes_lib.float64): 1197 with self.subTest(dtype=dtype): 1198 x = array_ops.placeholder(dtype) 1199 y = math_ops.conj(x) 1200 self.assertEqual(x, y) 1201 1202 @test_util.run_deprecated_v1 1203 def testConjString(self): 1204 x = array_ops.placeholder(dtypes_lib.string) 1205 with self.assertRaisesRegex(TypeError, 1206 r"Expected numeric or variant tensor"): 1207 math_ops.conj(x) 1208 1209 def _compareGradient(self, x): 1210 # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into 1211 # complex numbers. Then, we extract real and imag parts and 1212 # computes the squared sum. This is obviously the same as sum(real 1213 # * real) + sum(imag * imag). We just want to make sure the 1214 # gradient function is checked. 1215 with self.cached_session(): 1216 inx = ops.convert_to_tensor(x) 1217 real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1) 1218 real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1]) 1219 cplx = math_ops.complex(real, imag) 1220 cplx = math_ops.conj(cplx) 1221 loss = math_ops.reduce_sum(math_ops.square( 1222 math_ops.real(cplx))) + math_ops.reduce_sum( 1223 math_ops.square(math_ops.imag(cplx))) 1224 epsilon = 1e-3 1225 jacob_t, jacob_n = gradient_checker.compute_gradient( 1226 inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon) 1227 self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 1228 1229 def _compareBroadcastGradient(self, x): 1230 x_ = ops.convert_to_tensor(x) 1231 epsilon = 1e-3 1232 with self.cached_session(): 1233 for args in [(x_, 0.), (0., x_)]: 1234 with self.subTest(args=args): 1235 z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args))) 1236 jacob_t, jacob_n = gradient_checker.compute_gradient( 1237 x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon) 1238 self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 1239 1240 @test_util.run_deprecated_v1 1241 def testGradient(self): 1242 # complex64 1243 data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32) 1244 self._compareGradient(data) 1245 self._compareBroadcastGradient(data) 1246 # complex128 1247 data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64) 1248 self._compareGradient(data) 1249 1250 def _compareMulGradient(self, data): 1251 # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1], 1252 # data[:, 2], data[:, 3] are real parts of x, imaginary parts of 1253 # x, real parts of y and imaginary parts of y. 1254 with self.cached_session(): 1255 inp = ops.convert_to_tensor(data) 1256 xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1) 1257 1258 def vec(x): # Reshape to a vector 1259 return array_ops.reshape(x, [-1]) 1260 1261 xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi) 1262 1263 def cplx(r, i): # Combine to a complex vector 1264 return math_ops.complex(r, i) 1265 1266 x, y = cplx(xr, xi), cplx(yr, yi) 1267 # z is x times y in complex plane. 1268 z = x * y 1269 # Defines the loss function as the sum of all coefficients of z. 1270 loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z)) 1271 epsilon = 0.005 1272 jacob_t, jacob_n = gradient_checker.compute_gradient( 1273 inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon) 1274 self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 1275 1276 @test_util.run_deprecated_v1 1277 def testMulGradient(self): 1278 data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32) 1279 self._compareMulGradient(data) 1280 1281 1282class PolyvalTest(test.TestCase): 1283 1284 def _runtest(self, dtype, degree): 1285 x = np.random.rand(2, 2).astype(dtype) 1286 coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)] 1287 np_val = np.polyval(coeffs, x) 1288 with self.cached_session(): 1289 tf_val = math_ops.polyval(coeffs, x) 1290 self.assertAllClose(np_val, self.evaluate(tf_val)) 1291 1292 def testSimple(self): 1293 for dtype in [ 1294 np.int32, np.float32, np.float64, np.complex64, np.complex128 1295 ]: 1296 for degree in range(5): 1297 with self.subTest(dtype=dtype, degree=degree): 1298 self._runtest(dtype, degree) 1299 1300 def testBroadcast(self): 1301 dtype = np.float32 1302 degree = 3 1303 shapes = [(1,), (2, 1), (1, 2), (2, 2)] 1304 for x_shape in shapes: 1305 for coeff_shape in shapes: 1306 with self.subTest(x_shape=x_shape, coeff_shape=coeff_shape): 1307 x = np.random.rand(*x_shape).astype(dtype) 1308 coeffs = [ 1309 np.random.rand(*coeff_shape).astype(dtype) 1310 for _ in range(degree + 1) 1311 ] 1312 np_val = np.polyval(coeffs, x) 1313 with self.cached_session(): 1314 tf_val = math_ops.polyval(coeffs, x) 1315 self.assertAllClose(np_val, self.evaluate(tf_val)) 1316 1317 def testEmpty(self): 1318 x = np.random.rand(2, 2).astype(np.float32) 1319 coeffs = [] 1320 np_val = np.polyval(coeffs, x) 1321 with self.cached_session(): 1322 tf_val = math_ops.polyval(coeffs, x) 1323 self.assertAllClose(np_val, self.evaluate(tf_val)) 1324 1325 def test_coeffs_raise(self): 1326 x = np.random.rand(2, 2).astype(np.float32) 1327 coeffs = {} 1328 with self.assertRaisesRegex(ValueError, "Argument coeffs must be list"): 1329 math_ops.polyval(coeffs, x) 1330 1331 1332if __name__ == "__main__": 1333 test.main() 1334