• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for Relu and ReluGrad."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import numpy as np
22from six.moves import xrange  # pylint: disable=redefined-builtin
23
24from tensorflow.python import tf2
25from tensorflow.python.compat import compat
26from tensorflow.python.eager import backprop
27from tensorflow.python.framework import constant_op
28from tensorflow.python.framework import dtypes
29from tensorflow.python.framework import errors
30from tensorflow.python.framework import ops
31from tensorflow.python.framework import test_util
32from tensorflow.python.ops import gradient_checker_v2
33from tensorflow.python.ops import math_ops
34from tensorflow.python.ops import nn_ops
35from tensorflow.python.ops import random_ops
36from tensorflow.python.ops import variables
37import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
38from tensorflow.python.platform import test
39from tensorflow.python.training import gradient_descent
40
41
42def _elu_grad_grad(activation):
43  if activation < 0:
44    return np.exp(activation)
45  return 0
46
47
48class ReluTest(test.TestCase):
49
50  def _npRelu(self, np_features):
51    return np.maximum(np_features, np.zeros(np_features.shape))
52
53  def testNpRelu(self):
54    self.assertAllClose(
55        np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
56        self._npRelu(
57            np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
58                                                     0.9]])))
59
60  def _testRelu(self, np_features):
61    np_relu = self._npRelu(np_features)
62    tf_relu = nn_ops.relu(np_features)
63    self.assertAllClose(np_relu, tf_relu)
64    self.assertShapeEqual(np_relu, tf_relu)
65
66  def testNumbersCPU(self):
67    for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
68      # Force execution on CPU even if a GPU kernel is available for the type.
69      with ops.device("/device:CPU:0"):
70        self._testRelu(
71            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
72
73  def testNumbersGPU(self):
74    if not test.is_gpu_available():
75      self.skipTest("No GPU available")
76    for t in [np.float16, np.float32, np.float64]:
77      self._testRelu(
78          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
79
80  def testReluInt8x4GoodShape(self):
81    if not test.is_gpu_available(cuda_only=True):
82      self.skipTest("No GPU available")
83    inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]])
84    np_relu = self._npRelu(inputs)
85    tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8))
86    self.assertAllClose(np_relu, tf_relu)
87    self.assertShapeEqual(np_relu, tf_relu)
88
89  @test_util.disable_xla("b/123338077")  # Passes with XLA
90  def testReluInt8x4BadShape(self):
91    if not test.is_gpu_available(cuda_only=True):
92      self.skipTest("No GPU available")
93    inputs = constant_op.constant(
94        np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
95    with self.assertRaisesRegexp(
96        errors.InvalidArgumentError,
97        "Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
98      self.evaluate(nn_ops.relu(inputs))
99
100    inputs = constant_op.constant(
101        np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
102        dtypes.qint8)
103    with self.assertRaisesRegexp(
104        errors.InvalidArgumentError,
105        "Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
106      self.evaluate(nn_ops.relu(inputs))
107
108  # The gradient test for ReLU is a bit tricky as the derivative is not well
109  # defined at around zero and we want to avoid that in terms of input values.
110  def testGradientFloat32(self):
111    with self.cached_session():
112      x = np.asarray(
113          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
114          dtype=np.float32,
115          order="F")
116      err = gradient_checker_v2.max_error(
117          *gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
118    print("relu (float32) gradient err = ", err)
119    self.assertLess(err, 1e-4)
120
121  # The gradient for fp16 is inaccurate due to the low-precision.
122  # We compare the fp16 analytical gradient against their fp32 counterpart.
123  def testGradientFloat16(self):
124
125    def grad(x):
126      with backprop.GradientTape() as tape:
127        tape.watch(x)
128        y = nn_ops.l2_loss(nn_ops.relu(x))
129      return tape.gradient(y, x)
130
131    def f():
132      with test_util.use_gpu():
133        # Randomly construct a 1D shape from [1, 40)
134        shape = random_ops.random_uniform([1],
135                                          minval=1,
136                                          maxval=40,
137                                          dtype=dtypes.int32)
138        x32 = random_ops.random_uniform(shape, minval=-1, maxval=1)
139        x16 = math_ops.cast(x32, dtype=dtypes.float16)
140        return grad(x32), grad(x16)
141
142    # We're going to ensure that the fp16 and fp32 gradients
143    # are "close" to each other for ~100 random values.
144    #
145    # In TensorFlow 1.x, invoking f() (without eager execution enabled)
146    # would construct a graph. Instead of construct a graph with O(100) nodes,
147    # we construct a single graph to be executed ~100 times in a Session.
148    if not tf2.enabled():
149      d32_tensor, d16_tensor = f()
150      with self.cached_session() as sess:
151        f = lambda: sess.run([d32_tensor, d16_tensor])
152
153    # Repeat the experiment for 100 times. All tensor shapes and its tensor
154    # values are randomly generated for each run.
155    for _ in xrange(100):
156      d32, d16 = f()
157      self.assertAllClose(d32, d16, atol=3e-4)
158
159  def testGradientFloat64(self):
160    with self.cached_session():
161      x = np.asarray(
162          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
163          dtype=np.float64,
164          order="F")
165      err = gradient_checker_v2.max_error(
166          *gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
167    print("relu (float64) gradient err = ", err)
168    self.assertLess(err, 1e-10)
169
170  def testGradGradFloat32(self):
171    with self.cached_session():
172
173      def f(x):
174        assert x.dtype == dtypes.float32
175        with backprop.GradientTape() as tape:
176          tape.watch(x)
177          y = nn_ops.relu(x)
178        return tape.gradient(y, x)
179
180      x = np.asarray(
181          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
182          dtype=np.float32,
183          order="F")
184      err = gradient_checker_v2.max_error(
185          *gradient_checker_v2.compute_gradient(f, [x]))
186    print("relu (float32) gradient of gradient err = ", err)
187    self.assertLess(err, 1e-4)
188
189  def testGradGradFloat64(self):
190    with self.cached_session():
191
192      def f(x):
193        assert x.dtype == dtypes.float64
194        with backprop.GradientTape() as tape:
195          tape.watch(x)
196          y = nn_ops.relu(x)
197        return tape.gradient(y, x)
198
199      x = np.asarray(
200          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
201          dtype=np.float64,
202          order="F")
203      err = gradient_checker_v2.max_error(
204          *gradient_checker_v2.compute_gradient(f, [x]))
205    print("relu (float64) gradient of gradient err = ", err)
206    self.assertLess(err, 1e-10)
207
208  def testGradientScalar(self):
209    x = variables.Variable(100.)
210
211    def loss():
212      return nn_ops.relu(x)**2
213
214    optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
215    self.evaluate(variables.global_variables_initializer())
216    self.evaluate(optimizer.minimize(loss))
217    self.assertAllClose(x.read_value(), 50.0)
218
219
220class Relu6Test(test.TestCase):
221
222  def _npRelu6(self, np_features):
223    sixes = np.copy(np_features)
224    sixes.fill(6.0)
225    return np.minimum(
226        np.maximum(np_features, np.zeros(np_features.shape)), sixes)
227
228  def testNpRelu6(self):
229    self.assertAllClose(
230        np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
231        self._npRelu6(
232            np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7,
233                                                    0.9]])))
234
235  def _testRelu6(self, np_features):
236    np_relu6 = self._npRelu6(np_features)
237    tf_relu6 = nn_ops.relu6(np_features)
238    self.assertAllClose(np_relu6, tf_relu6)
239    self.assertShapeEqual(np_relu6, tf_relu6)
240
241  def testNumbersCPU(self):
242    for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
243      # Force execution on CPU even if a GPU kernel is available for the type.
244      with ops.device("/device:CPU:0"):
245        self._testRelu6(
246            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
247
248  def testNumbersGPU(self):
249    if not test.is_gpu_available():
250      self.skipTest("No GPU available")
251    for t in [np.float16, np.float, np.double]:
252      self._testRelu6(
253          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
254
255  # The gradient test for ReLU6 is a bit tricky as the derivative is
256  # not well defined at around zero and six and we want to avoid that
257  # in terms of input values.
258  def testGradientFloat32(self):
259    with self.cached_session():
260      x = np.asarray(
261          [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
262          dtype=np.float32,
263          order="F")
264      err = gradient_checker_v2.max_error(
265          *gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
266    print("relu6 (float32) gradient err = ", err)
267    self.assertLess(err, 1e-4)
268
269  def testGradientFloat64(self):
270    with self.cached_session():
271      x = np.asarray(
272          [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
273          dtype=np.float64,
274          order="F")
275      err = gradient_checker_v2.max_error(
276          *gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
277    print("relu6 (float64) gradient err = ", err)
278    self.assertLess(err, 1e-10)
279
280
281class LeakyReluTest(test.TestCase):
282
283  def _npLeakyRelu(self, np_features, alpha=0.1):
284    return np.maximum(np_features, alpha * np_features)
285
286  def testNpLeakyRelu(self):
287    self.assertAllClose(
288        np.array([[-0.09, 0.7, -0.05, 0.3, -0.01],
289                  [0.1, -0.03, 0.5, -0.07, 0.9]]),
290        self._npLeakyRelu(
291            np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
292                                                     0.9]]),
293            alpha=0.1))
294
295  def _testLeakyRelu(self, np_features, alpha):
296    np_leaky_relu = self._npLeakyRelu(np_features, alpha)
297    tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
298    self.assertAllClose(np_leaky_relu, tf_leaky_relu)
299    self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
300
301  def testNumbersCPU(self):
302    for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
303      # Force execution on CPU even if a GPU kernel is available for the type.
304      with ops.device("/device:CPU:0"):
305        self._testLeakyRelu(
306            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
307            alpha=0.2)
308
309  def testNumbersGPU(self):
310    if not test.is_gpu_available():
311      self.skipTest("No GPU available")
312    for t in [np.float16, np.float32, np.float64]:
313      self._testLeakyRelu(
314          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
315          alpha=0.1)
316
317  # The gradient test for Leaky ReLU is a bit tricky as the derivative is not
318  # well defined at around zero and we want to avoid that in terms of input
319  # values.
320  def testGradientFloat32(self):
321    with self.cached_session():
322      x = np.asarray(
323          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
324          dtype=np.float32,
325          order="F")
326      err = gradient_checker_v2.max_error(
327          *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
328    print("leaky_relu (float32) gradient err = ", err)
329    self.assertLess(err, 1e-4)
330
331  def testGradientFloat64(self):
332    with self.cached_session():
333      x = np.asarray(
334          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
335          dtype=np.float64,
336          order="F")
337      err = gradient_checker_v2.max_error(
338          *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
339    print("leaky_relu (float64) gradient err = ", err)
340    self.assertLess(err, 1e-10)
341
342  def testGradGradFloat32(self):
343    with compat.forward_compatibility_horizon(2018, 11, 2):
344      with self.cached_session():
345
346        def f(x):
347          assert x.dtype == dtypes.float32
348          with backprop.GradientTape() as tape:
349            tape.watch(x)
350            y = nn_ops.leaky_relu(x)
351          return tape.gradient(y, x)
352
353        x = np.asarray(
354            [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
355            dtype=np.float32,
356            order="F")
357        err = gradient_checker_v2.max_error(
358            *gradient_checker_v2.compute_gradient(f, [x]))
359      print("leaky_relu (float32) gradient of gradient err = ", err)
360      self.assertLess(err, 1e-4)
361
362  def testGradGradFloat64(self):
363    with compat.forward_compatibility_horizon(2018, 11, 2):
364      with self.cached_session():
365
366        def f(x):
367          assert x.dtype == dtypes.float64
368          with backprop.GradientTape() as tape:
369            tape.watch(x)
370            y = nn_ops.leaky_relu(x)
371          return tape.gradient(y, x)
372
373        x = np.asarray(
374            [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
375            dtype=np.float64,
376            order="F")
377        err = gradient_checker_v2.max_error(
378            *gradient_checker_v2.compute_gradient(f, [x]))
379      print("leaky_relu (float64) gradient of gradient err = ", err)
380      self.assertLess(err, 1e-10)
381
382  def testGradientScalar(self):
383    x = variables.Variable(-100.)
384
385    def loss():
386      return nn_ops.leaky_relu(x, 0.05)**2
387
388    optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
389    self.evaluate(variables.global_variables_initializer())
390    self.evaluate(optimizer.minimize(loss))
391    self.assertAllClose(x.read_value(), -99.9)
392
393
394class EluTest(test.TestCase):
395
396  def _npElu(self, np_features):
397    return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
398
399  def testNpElu(self):
400    self.assertAllClose(
401        np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
402                  [0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
403        self._npElu(
404            np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
405                                                     0.9]])))
406
407  def _testElu(self, np_features):
408    np_elu = self._npElu(np_features)
409    tf_elu = nn_ops.elu(np_features)
410    self.assertAllClose(np_elu, tf_elu)
411    self.assertShapeEqual(np_elu, tf_elu)
412
413  def testNumbersCPU(self):
414    for t in [np.float16, np.float32, np.float64]:
415      # Force execution on CPU even if a GPU kernel is available for the type.
416      with ops.device("/device:CPU:0"):
417        self._testElu(
418            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
419
420  def testNumbersGPU(self):
421    if not test.is_gpu_available():
422      self.skipTest("No GPU available")
423    for t in [np.float16, np.float32, np.float64]:
424      self._testElu(np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
425
426  def testGradientFloat32(self):
427    with self.cached_session():
428      x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
429      x = np.asarray(x_val, dtype=np.float32, order="F")
430      err = gradient_checker_v2.max_error(
431          *gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
432    print("elu (float32) gradient err = ", err)
433    self.assertLess(err, 1e-4)
434
435  def testGradientFloat64(self):
436    with self.cached_session():
437      x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
438      x = np.asarray(x_val, dtype=np.float64, order="F")
439      err = gradient_checker_v2.max_error(
440          *gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
441    print("elu (float64) gradient err = ", err)
442    self.assertLess(err, 1e-6)
443
444  def testGradGrad(self):
445    with self.cached_session():
446
447      def f(x):
448        with backprop.GradientTape(persistent=True) as tape:
449          tape.watch(x)
450          y = nn_ops.elu(x)
451          dy = tape.gradient(y, x)
452        return tape.gradient(dy, x)
453
454      for x in [-1., -0.5, 0.5, 1.]:
455        got = self.evaluate(f(constant_op.constant(x)))
456        want = _elu_grad_grad(x)
457        err = np.abs(got - want)
458        self.assertLess(err, 1e-4)
459
460  def testGradGradFloat32(self):
461    with self.cached_session():
462
463      def f(x):
464        assert x.dtype == dtypes.float32
465        with backprop.GradientTape() as tape:
466          tape.watch(x)
467          y = nn_ops.elu(x)
468        return tape.gradient(y, x)
469
470      x = np.asarray(
471          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
472          dtype=np.float32,
473          order="F")
474      err = gradient_checker_v2.max_error(
475          *gradient_checker_v2.compute_gradient(f, [x]))
476    print("elu (float32) gradient of gradient err = ", err)
477    self.assertLess(err, 1e-4)
478
479  def testGradGradFloat64(self):
480    with self.cached_session():
481
482      def f(x):
483        assert x.dtype == dtypes.float64
484        with backprop.GradientTape() as tape:
485          tape.watch(x)
486          y = nn_ops.elu(x)
487        return tape.gradient(y, x)
488
489      x = np.asarray(
490          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
491          dtype=np.float64,
492          order="F")
493      err = gradient_checker_v2.max_error(
494          *gradient_checker_v2.compute_gradient(f, [x]))
495    print("elu (float64) gradient of gradient err = ", err)
496    self.assertLess(err, 1e-6)
497
498
499class SeluTest(test.TestCase):
500
501  def _npSelu(self, np_features):
502    scale = 1.0507009873554804934193349852946
503    scale_alpha = 1.7580993408473768599402175208123
504    return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
505                    scale * np_features)
506
507  def testNpSelu(self):
508    self.assertAllClose(
509        np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103, -0.16730527],
510                  [0.1050701, -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
511        self._npSelu(
512            np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
513                                                     0.9]])))
514
515  def _testSelu(self, np_features):
516    np_selu = self._npSelu(np_features)
517    tf_selu = nn_ops.selu(np_features)
518    self.assertAllClose(np_selu, tf_selu)
519    self.assertShapeEqual(np_selu, tf_selu)
520
521  def testNumbers(self):
522    for t in [np.float16, np.float32, np.float64]:
523      self._testSelu(
524          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
525      # Force executed on CPU in case GPU kernels are available.
526      with ops.device("/device:CPU:0"):
527        self._testSelu(
528            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
529
530  def testGradientFloat32(self):
531    with self.cached_session():
532      x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
533      x = np.asarray(x_val, dtype=np.float32, order="F")
534      err = gradient_checker_v2.max_error(
535          *gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
536    print("selu (float32) gradient err = ", err)
537    self.assertLess(err, 1e-4)
538
539  def testGradientFloat64(self):
540    with self.cached_session():
541      x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
542      x = np.asarray(x_val, dtype=np.float64, order="F")
543      err = gradient_checker_v2.max_error(
544          *gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
545    print("selu (float64) gradient err = ", err)
546    self.assertLess(err, 1e-6)
547
548  def testGradGradFloat32(self):
549    with self.cached_session():
550
551      def f(x):
552        assert x.dtype == dtypes.float32
553        with backprop.GradientTape() as tape:
554          tape.watch(x)
555          y = nn_ops.selu(x)
556        return tape.gradient(y, x)
557
558      x = np.asarray(
559          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
560          dtype=np.float32,
561          order="F")
562      err = gradient_checker_v2.max_error(
563          *gradient_checker_v2.compute_gradient(f, [x]))
564    print("selu (float32) gradient of gradient err = ", err)
565    self.assertLess(err, 1e-4)
566
567  def testGradGradFloat64(self):
568    with self.cached_session():
569
570      def f(x):
571        assert x.dtype == dtypes.float64
572        with backprop.GradientTape() as tape:
573          tape.watch(x)
574          y = nn_ops.selu(x)
575        return tape.gradient(y, x)
576
577      x = np.asarray(
578          [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
579          dtype=np.float64,
580          order="F")
581      err = gradient_checker_v2.max_error(
582          *gradient_checker_v2.compute_gradient(f, [x]))
583    print("selu (float64) gradient of gradient err = ", err)
584    self.assertLess(err, 1e-6)
585
586
587class CreluTest(test.TestCase):
588
589  def testCreluShape(self):
590    f = random_ops.random_normal([50, 5, 7, 10])
591    t = nn_ops.crelu(f)
592    self.assertEqual([50, 5, 7, 20], t.get_shape())
593
594  def _testCrelu(self, np_features):
595    np_relu = np.maximum(np_features, np.zeros_like(np_features))
596    np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
597    np_crelu = np.concatenate((np_relu, np_neg_relu),
598                              len(np_features.shape) - 1)
599
600    tf_crelu = nn_ops.crelu(np_features)
601
602    self.assertAllClose(np_crelu, tf_crelu)
603    self.assertShapeEqual(np_crelu, tf_crelu)
604
605  def testNumbersCPU(self):
606    for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
607      # Force execution on CPU even if a GPU kernel is available for the type.
608      with ops.device("/device:CPU:0"):
609        self._testCrelu(
610            np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
611
612  def testNumbersGPU(self):
613    if not test.is_gpu_available():
614      self.skipTest("No GPU available")
615    for t in [np.float16, np.float32, np.float64]:
616      self._testCrelu(
617          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
618
619  def testNumbersWithAxis0(self):
620    tf_crelu = nn_ops.crelu(
621        np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=0)
622    np_crelu = np.array([[0, 7, 0, 3, 0], [1, 0, 5, 0, 9], [9, 0, 5, 0, 1],
623                         [0, 3, 0, 7, 0]])
624    self.assertAllEqual(np_crelu, tf_crelu)
625
626  def testNumbersWithAxis1(self):
627    tf_crelu = nn_ops.crelu(
628        np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=1)
629    np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1],
630                         [1, 0, 5, 0, 9, 0, 3, 0, 7, 0]])
631    self.assertAllEqual(np_crelu, tf_crelu)
632
633
634if __name__ == "__main__":
635  test.main()
636