• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import math
22
23from absl.testing import parameterized
24import numpy as np
25from six.moves import xrange  # pylint: disable=redefined-builtin
26
27from tensorflow.python.eager import def_function
28from tensorflow.python.framework import constant_op
29from tensorflow.python.framework import dtypes
30from tensorflow.python.framework import ops
31from tensorflow.python.framework import tensor_spec
32from tensorflow.python.framework import test_util
33from tensorflow.python.ops import array_ops
34from tensorflow.python.ops import gradient_checker
35from tensorflow.python.ops import math_ops
36from tensorflow.python.ops import nn_impl
37from tensorflow.python.ops import nn_ops
38from tensorflow.python.ops import partitioned_variables
39from tensorflow.python.ops import variable_scope
40from tensorflow.python.ops import variables
41import tensorflow.python.ops.nn_grad  # pylint: disable=unused-import
42from tensorflow.python.ops.nn_impl import _compute_sampled_logits
43from tensorflow.python.platform import test as test_lib
44
45
46class ZeroFractionTest(test_lib.TestCase):
47
48  def _ZeroFraction(self, x):
49    assert x.shape
50    total_elements = np.prod(x.shape)
51    nonzeros = np.count_nonzero(x.flatten())
52    return 1.0 - nonzeros / total_elements
53
54  @test_util.run_deprecated_v1
55  def testZeroFraction(self):
56    x_shape = [5, 17]
57    x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
58    y_np = self._ZeroFraction(x_np)
59
60    x_tf = constant_op.constant(x_np)
61    x_tf.set_shape(x_shape)
62    y_tf = nn_impl.zero_fraction(x_tf)
63    y_tf_np = self.evaluate(y_tf)
64
65    eps = 1e-8
66    self.assertAllClose(y_tf_np, y_np, eps)
67
68  @test_util.run_deprecated_v1
69  def testZeroFractionEmpty(self):
70    x = np.zeros(0)
71    y = self.evaluate(nn_impl.zero_fraction(x))
72    self.assertTrue(np.isnan(y))
73
74  @test_util.run_deprecated_v1
75  def testZeroFraction2_27Zeros(self):
76    sparsity = nn_impl.zero_fraction(
77        array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8))
78    self.assertAllClose(1.0, self.evaluate(sparsity))
79
80  @test_util.run_deprecated_v1
81  def testZeroFraction2_27Ones(self):
82    sparsity = nn_impl.zero_fraction(
83        array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8))
84    self.assertAllClose(0.0, self.evaluate(sparsity))
85
86  @test_util.run_deprecated_v1
87  def testUnknownSize(self):
88    value = array_ops.placeholder(dtype=dtypes.float32)
89    sparsity = nn_impl.zero_fraction(value)
90    with self.cached_session() as sess:
91      self.assertAllClose(
92          0.25,
93          sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]}))
94
95
96class SoftmaxTest(test_lib.TestCase, parameterized.TestCase):
97
98  def _softmax(self, x):
99    assert len(x.shape) == 2
100    m = x.max(1)[:, np.newaxis]
101    u = np.exp(x - m)
102    z = u.sum(1)[:, np.newaxis]
103    return u / z
104
105  @test_util.run_in_graph_and_eager_modes
106  def testSoftmax(self):
107    x_shape = [5, 10]
108    x_np = np.random.randn(*x_shape).astype(np.float32)
109    y_np = self._softmax(x_np)
110    x_tf = constant_op.constant(x_np)
111    y_tf = nn_ops.softmax_v2(x_tf)
112    y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)
113    y_tf_np = self.evaluate(y_tf)
114    y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
115    eps = 1e-3
116    self.assertAllClose(y_tf_np, y_np, eps)
117    self.assertAllClose(y_tf_last_dim_np, y_np, eps)
118
119  def testSoftmaxAxes(self):
120    arr = np.linspace(0., 1, 12).reshape(3, 4)
121    x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)
122    y_pos_axis = nn_ops.softmax_v2(arr, axis=0)
123    z_gt_axis = nn_ops.softmax_v2(arr, axis=0)
124    x_neg_axis_tf = self.evaluate(x_neg_axis)
125    y_pos_axis_tf = self.evaluate(y_pos_axis)
126    z_gt_axis_tf = self.evaluate(z_gt_axis)
127    eps = 1e-3
128    self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
129    self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
130
131  @parameterized.parameters(((5, 10),), ((2, 3, 4),))
132  @test_util.run_deprecated_v1
133  def testGradient(self, x_shape):
134    x_np = np.random.randn(*x_shape).astype(np.float64)
135    with self.cached_session():
136      x_tf = constant_op.constant(x_np)
137      y_tf = nn_ops.softmax_v2(x_tf)
138      err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
139                                                    x_shape)
140    eps = 2e-8
141    self.assertLess(err, eps)
142
143
144class LogPoissonLossTest(test_lib.TestCase):
145
146  def _log_poisson_loss(self, x, z, compute_full_loss=False):
147    lpl = np.exp(x) - z * x
148    if compute_full_loss:
149      stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
150      lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
151    return lpl
152
153  @test_util.run_in_graph_and_eager_modes
154  def testLogPoissonLoss(self):
155    x_shape = [5, 10]
156    x_np = np.random.randn(*x_shape).astype(np.float32)
157    z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
158    y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
159    y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
160    y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
161    y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)
162    y_tf_np = self.evaluate(y_tf)
163    y_tf_np_stirling = self.evaluate(y_tf_stirling)
164    eps = 1e-3
165    self.assertAllClose(y_tf_np, y_np, eps)
166    self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
167
168  @test_util.run_deprecated_v1
169  def testGradient(self):
170    x_shape = [5, 10]
171    x_np = np.random.randn(*x_shape).astype(np.float64)
172    z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
173    with self.cached_session():
174      x_tf = constant_op.constant(x_np)
175      y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)
176      y_tf_stirling = nn_impl.log_poisson_loss(
177          z_np, x_tf, compute_full_loss=True)
178      err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
179                                                    x_shape)
180      err_stirling = gradient_checker.compute_gradient_error(
181          x_tf, x_shape, y_tf_stirling, x_shape)
182    eps = 1e-6
183    self.assertLess(err, eps)
184    self.assertLess(err_stirling, eps)
185
186
187class LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase):
188
189  def _log_softmax(self, x):
190    assert len(x.shape) == 2
191    m = x.max(1)[:, np.newaxis]
192    u = x - m
193    return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
194
195  @test_util.run_in_graph_and_eager_modes
196  def testLogSoftmax(self):
197    x_shape = [5, 10]
198    x_np = np.random.randn(*x_shape).astype(np.float32)
199    y_np = self._log_softmax(x_np)
200    x_tf = constant_op.constant(x_np)
201    y_tf = nn_ops.log_softmax_v2(x_tf)
202    y_tf_np = self.evaluate(y_tf)
203    eps = 1e-3
204    self.assertAllClose(y_tf_np, y_np, eps)
205
206  def testLogSoftmaxAxes(self):
207    arr = np.linspace(0., 1, 12).reshape(3, 4)
208    x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)
209    y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)
210    z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)
211    x_neg_axis_tf = self.evaluate(x_neg_axis)
212    y_pos_axis_tf = self.evaluate(y_pos_axis)
213    z_gt_axis_tf = self.evaluate(z_gt_axis)
214    eps = 1e-3
215    self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
216    self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
217
218  @parameterized.parameters(((5, 10),), ((2, 3, 4),))
219  @test_util.run_deprecated_v1
220  def testGradient(self, x_shape):
221    x_np = np.random.randn(*x_shape).astype(np.float64)
222    with self.cached_session():
223      x_tf = constant_op.constant(x_np)
224      y_tf = nn_ops.log_softmax_v2(x_tf)
225      err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
226                                                    x_shape)
227    eps = 1e-7
228    self.assertLess(err, eps)
229
230
231class L2LossTest(test_lib.TestCase):
232
233  @test_util.run_in_graph_and_eager_modes
234  def testL2Loss(self):
235    for dtype in [dtypes.float32, dtypes.float64]:
236      x = constant_op.constant(
237          [1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
238      l2loss = nn_ops.l2_loss(x)
239      value = self.evaluate(l2loss)
240      self.assertAllClose(7.0, value)
241
242  @test_util.run_deprecated_v1
243  def testGradient(self):
244    x_shape = [20, 7, 3]
245    np.random.seed(1)  # Make it reproducible.
246    x_val = np.random.random_sample(x_shape).astype(np.float64)
247    with self.cached_session():
248      x = constant_op.constant(x_val, name="x")
249      output = nn_ops.l2_loss(x)
250      err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
251    print("L2Loss gradient err = %g " % err)
252    err_tolerance = 1e-10
253    self.assertLess(err, err_tolerance)
254
255
256class L2NormalizeTest(test_lib.TestCase):
257
258  def _l2Normalize(self, x, dim):
259    if isinstance(dim, list):
260      norm = np.linalg.norm(x, axis=tuple(dim))
261      for d in dim:
262        norm = np.expand_dims(norm, d)
263      return x / norm
264    else:
265      norm = np.apply_along_axis(np.linalg.norm, dim, x)
266      return x / np.expand_dims(norm, dim)
267
268  @test_util.run_in_graph_and_eager_modes
269  def testL2Normalize(self):
270    x_shape = [20, 7, 3]
271    np.random.seed(1)
272    x_np = np.random.random_sample(x_shape).astype(np.float32)
273    for dim in range(len(x_shape)):
274      y_np = self._l2Normalize(x_np, dim)
275      x_tf = constant_op.constant(x_np, name="x")
276      y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
277      self.assertAllClose(y_np, self.evaluate(y_tf))
278
279  @test_util.run_in_graph_and_eager_modes
280  def testL2NormalizeDimArray(self):
281    x_shape = [20, 7, 3]
282    np.random.seed(1)
283    x_np = np.random.random_sample(x_shape).astype(np.float32)
284    dim = [1, 2]
285    y_np = self._l2Normalize(x_np, dim)
286    x_tf = constant_op.constant(x_np, name="x")
287    y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
288    self.assertAllClose(y_np, self.evaluate(y_tf))
289
290  @test_util.run_deprecated_v1
291  def testL2NormalizeGradient(self):
292    x_shape = [20, 7, 3]
293    np.random.seed(1)
294    x_np = np.random.random_sample(x_shape).astype(np.float64)
295    for dim in range(len(x_shape)):
296      with self.cached_session():
297        x_tf = constant_op.constant(x_np, name="x")
298        y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
299        err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
300                                                      x_shape)
301      print("L2Normalize gradient err = %g " % err)
302      self.assertLess(err, 1e-4)
303
304
305class DropoutTest(test_lib.TestCase):
306
307  def testDropout(self):
308    # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
309    # that it is producing approximately the right number of ones over a large
310    # number of samples, based on the keep probability.
311    x_dim = 40
312    y_dim = 30
313    num_iter = 10
314    for keep_prob in [0.1, 0.5, 0.8]:
315      t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
316      dropout = nn_ops.dropout(t, keep_prob)
317      final_count = 0
318      self.assertEqual([x_dim, y_dim], dropout.get_shape())
319      for _ in xrange(0, num_iter):
320        value = self.evaluate(dropout)
321        final_count += np.count_nonzero(value)
322        # Verifies that there are only two values: 0 and 1/keep_prob.
323        sorted_value = np.unique(np.sort(value))
324        self.assertEqual(0, sorted_value[0])
325        self.assertAllClose(1 / keep_prob, sorted_value[1])
326
327      # Check that we are in the 15% error range
328      expected_count = x_dim * y_dim * keep_prob * num_iter
329      rel_error = math.fabs(final_count - expected_count) / expected_count
330      print(rel_error)
331      self.assertTrue(rel_error < 0.15)
332
333  def testShapedDropout(self):
334    # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
335    # that it is producing approximately the right number of ones over a large
336    # number of samples, based on the keep probability. This time with shaped
337    # noise.
338    x_dim = 40 * 30
339    y_dim = 3
340    num_iter = 10
341    for keep_prob in [0.1, 0.5, 0.8]:
342      t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
343      dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
344      self.assertEqual([x_dim, y_dim], dropout.get_shape())
345      final_count = 0
346      for _ in xrange(0, num_iter):
347        value = self.evaluate(dropout)
348        final_count += np.count_nonzero(value)
349        # Verifies that there are only two values: 0 and 1/keep_prob.
350        sorted_value = np.unique(np.sort(value))
351        self.assertEqual(0, sorted_value[0])
352        self.assertAllClose(1 / keep_prob, sorted_value[1])
353
354      # Check that we are in the 15% error range
355      expected_count = x_dim * y_dim * keep_prob * num_iter
356      rel_error = math.fabs(final_count - expected_count) / expected_count
357      print(rel_error)
358      self.assertTrue(rel_error < 0.15)
359
360  def testShapedDropoutCorrelation(self):
361    # Runs a shaped dropout and tests that the correlations are correct.
362    x_dim = 40
363    y_dim = 30
364    num_iter = 10
365    for keep_prob in [0.1, 0.5, 0.8]:
366      t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
367      dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
368      self.assertEqual([x_dim, y_dim], dropout.get_shape())
369      for _ in xrange(0, num_iter):
370        value = self.evaluate(dropout)
371        # Verifies that each y column as only one type of activation.
372        for i in xrange(x_dim):
373          sorted_value = np.unique(np.sort(value[i, :]))
374          self.assertEqual(sorted_value.size, 1)
375
376  @test_util.run_deprecated_v1
377  def testDropoutPlaceholderKeepProb(self):
378    # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
379    # that it is producing approximately the right number of ones over a large
380    # number of samples, based on the keep probability.
381    x_dim = 40
382    y_dim = 30
383    num_iter = 10
384    for keep_prob in [0.1, 0.5, 0.8]:
385      with self.cached_session():
386        t = constant_op.constant(
387            1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
388        keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
389        dropout = nn_ops.dropout(t, keep_prob_placeholder)
390        final_count = 0
391        self.assertEqual([x_dim, y_dim], dropout.get_shape())
392        for _ in xrange(0, num_iter):
393          value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
394          final_count += np.count_nonzero(value)
395          # Verifies that there are only two values: 0 and 1/keep_prob.
396          sorted_value = np.unique(np.sort(value))
397          self.assertEqual(0, sorted_value[0])
398          self.assertAllClose(1 / keep_prob, sorted_value[1])
399      # Check that we are in the 15% error range
400      expected_count = x_dim * y_dim * keep_prob * num_iter
401      rel_error = math.fabs(final_count - expected_count) / expected_count
402      print(rel_error)
403      self.assertTrue(rel_error < 0.15)
404
405  @test_util.run_deprecated_v1
406  def testShapedDropoutUnknownShape(self):
407    x_dim = 40
408    y_dim = 30
409    keep_prob = 0.5
410    x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
411    dropout_x = nn_ops.dropout(
412        x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
413    self.assertEqual(x.get_shape(), dropout_x.get_shape())
414
415  def testPartialShapedDropout(self):
416    x_dim = 40 * 30
417    y_dim = 3
418    num_iter = 10
419    for keep_prob in [0.1, 0.5, 0.8]:
420      t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
421      # Set noise_shape=[None, 1] which means [x_dim, 1].
422      dropout = nn_ops.dropout(t, keep_prob, noise_shape=[None, 1])
423      self.assertEqual([x_dim, y_dim], dropout.get_shape())
424      final_count = 0
425      for _ in xrange(0, num_iter):
426        value = self.evaluate(dropout)
427        final_count += np.count_nonzero(value)
428        # Verifies that there are only two values: 0 and 1/keep_prob.
429        sorted_value = np.unique(np.sort(value))
430        self.assertEqual(0, sorted_value[0])
431        self.assertAllClose(1 / keep_prob, sorted_value[1])
432
433      # Check that we are in the 15% error range
434      expected_count = x_dim * y_dim * keep_prob * num_iter
435      rel_error = math.fabs(final_count - expected_count) / expected_count
436      print(rel_error)
437      self.assertTrue(rel_error < 0.15)
438
439  @test_util.run_deprecated_v1
440  def testInvalidKeepProb(self):
441    x_dim = 40
442    y_dim = 30
443    t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
444    with self.assertRaises(ValueError):
445      nn_ops.dropout(t, -1.0)
446    with self.assertRaises(ValueError):
447      nn_ops.dropout(t, 1.1)
448    with self.assertRaises(ValueError):
449      nn_ops.dropout(t, [0.0, 1.0])
450    with self.assertRaises(ValueError):
451      nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))
452    with self.assertRaises(ValueError):
453      nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
454
455  @test_util.run_deprecated_v1
456  def testInvalidRate(self):
457    x_dim = 40
458    y_dim = 30
459    t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
460    with self.assertRaises(ValueError):
461      nn_ops.dropout_v2(t, -1.0)
462    with self.assertRaises(ValueError):
463      nn_ops.dropout_v2(t, 1.1)
464    with self.assertRaises(ValueError):
465      nn_ops.dropout_v2(t, [0.0, 1.0])
466
467  @test_util.run_deprecated_v1
468  def testShapedDropoutShapeError(self):
469    # Runs shaped dropout and verifies an error is thrown on misshapen noise.
470    x_dim = 40
471    y_dim = 30
472    keep_prob = 0.5
473    t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
474    with self.assertRaises(ValueError):
475      _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
476    with self.assertRaises(ValueError):
477      _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
478    with self.assertRaises(ValueError):
479      _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])
480    with self.assertRaises(ValueError):
481      _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])
482    # test that broadcasting proceeds
483    _ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])
484    _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])
485    _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
486    _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])
487
488  def testNoDropoutFast(self):
489    x = array_ops.zeros((5,))
490    y = nn_ops.dropout(x, keep_prob=1)
491    self.assertTrue(x is y)
492
493    y = nn_ops.dropout_v2(x, rate=0)
494    self.assertTrue(x is y)
495
496  def testDropoutWithIntegerInputs(self):
497    x = constant_op.constant([1, 1, 1, 1, 1])
498    with self.assertRaises(ValueError):
499      _ = nn_ops.dropout(x, 0.5)
500
501
502class ComputeSampledLogitsTest(test_lib.TestCase):
503
504  def setUp(self):
505    self._eps = 1e-3
506
507  def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,
508                        sampled, subtract_log_q):
509    """Randomly generates input/output data for a single test case.
510
511    This function returns numpy constants for use in a test case.
512
513    Args:
514      num_classes: An int. The number of embedding classes in the test case.
515      dim: An int. The dimension of the embedding.
516      batch_size: An int. The batch size.
517      num_true: An int. The number of target classes per training example.
518      labels: A list of batch_size * num_true ints. The target classes.
519      sampled: A list of indices in [0, num_classes).
520      subtract_log_q: A bool corresponding to the parameter in
521          _compute_sampled_logits().
522
523    Returns:
524      weights: Embedding weights to use as test input. It is a numpy array
525          of shape [num_classes, dim]
526      biases: Embedding biases to use as test input. It is a numpy array
527          of shape [num_classes].
528      hidden_acts: Forward activations of the network to use as test input.
529          It is a numpy array of shape [batch_size, dim].
530      sampled_vals: A tuple based on `sampled` to use as test input in the
531          format returned by a *_candidate_sampler function.
532      exp_logits: The output logits expected from _compute_sampled_logits().
533          It is a numpy array of shape [batch_size, num_true + len(sampled)].
534      exp_labels: The output labels expected from _compute_sampled_logits().
535          It is a numpy array of shape [batch_size, num_true + len(sampled)].
536    """
537    weights = np.random.randn(num_classes, dim).astype(np.float32)
538    biases = np.random.randn(num_classes).astype(np.float32)
539    hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
540
541    true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
542    sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
543    sampled_vals = (sampled, true_exp, sampled_exp)
544
545    sampled_w, sampled_b = weights[sampled], biases[sampled]
546    true_w, true_b = weights[labels], biases[labels]
547
548    true_logits = np.sum(
549        hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(
550            (batch_size, num_true, dim)),
551        axis=2)
552    true_b = true_b.reshape((batch_size, num_true))
553    true_logits += true_b
554    sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
555
556    if subtract_log_q:
557      true_logits -= np.log(true_exp)
558      sampled_logits -= np.log(sampled_exp[np.newaxis, :])
559
560    exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
561    exp_labels = np.hstack((np.ones_like(true_logits) / num_true,
562                            np.zeros_like(sampled_logits)))
563
564    return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
565
566  def _ShardTestEmbeddings(self, weights, biases, num_shards):
567    """Shards the weights and biases returned by _GenerateTestData.
568
569    Args:
570      weights: The weights returned by _GenerateTestData.
571      biases: The biases returned by _GenerateTestData.
572      num_shards: The number of shards to create.
573
574    Returns:
575      sharded_weights: A list of size `num_shards` containing all the weights.
576      sharded_biases: A list of size `num_shards` containing all the biases.
577    """
578    with ops.Graph().as_default() as g:
579      sharded_weights = variable_scope.get_variable(
580          "w",
581          partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
582          initializer=constant_op.constant(weights))
583      sharded_biases = variable_scope.get_variable(
584          "b",
585          partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
586          initializer=constant_op.constant(biases))
587      with self.session(graph=g) as sess:
588        variables.global_variables_initializer().run()
589        return self.evaluate([list(sharded_weights), list(sharded_biases)])
590
591  def testShapes(self):
592    np.random.seed(0)
593    num_classes = 5
594    batch_size = 3
595
596    for num_true in range(1, 5):
597      labels = np.random.randint(
598          low=0, high=num_classes, size=batch_size * num_true)
599      (weights, biases, hidden_acts, sampled_vals, exp_logits,
600       exp_labels) = self._GenerateTestData(
601           num_classes=num_classes,
602           dim=10,
603           batch_size=batch_size,
604           num_true=num_true,
605           labels=labels,
606           sampled=[1, 0, 2, 3],
607           subtract_log_q=False)
608      logits_tensor, labels_tensor = _compute_sampled_logits(
609          weights=constant_op.constant(weights),
610          biases=constant_op.constant(biases),
611          labels=constant_op.constant(
612              labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
613          inputs=constant_op.constant(hidden_acts),
614          num_sampled=4,
615          num_classes=num_classes,
616          num_true=num_true,
617          sampled_values=sampled_vals,
618          subtract_log_q=False,
619          remove_accidental_hits=False,
620          partition_strategy="div",
621          name="sampled_logits_basic_num_true_%d" % num_true)
622      got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
623      self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)
624      self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)
625
626  def testBasic(self):
627    """Without accidental hit removal or subtract_log_q."""
628    np.random.seed(0)
629    num_classes = 5
630    batch_size = 3
631
632    for num_true in range(1, 5):
633      labels = np.random.randint(
634          low=0, high=num_classes, size=batch_size * num_true)
635      (weights, biases, hidden_acts, sampled_vals, exp_logits,
636       exp_labels) = self._GenerateTestData(
637           num_classes=num_classes,
638           dim=10,
639           batch_size=batch_size,
640           num_true=num_true,
641           labels=labels,
642           sampled=[1, 0, 2, 3],
643           subtract_log_q=False)
644      logits_tensor, labels_tensor = _compute_sampled_logits(
645          weights=constant_op.constant(weights),
646          biases=constant_op.constant(biases),
647          labels=constant_op.constant(
648              labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
649          inputs=constant_op.constant(hidden_acts),
650          num_sampled=4,
651          num_classes=num_classes,
652          num_true=num_true,
653          sampled_values=sampled_vals,
654          subtract_log_q=False,
655          remove_accidental_hits=False,
656          partition_strategy="div",
657          name="sampled_logits_basic_num_true_%d" % num_true)
658      got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
659      self.assertAllClose(exp_logits, got_logits, self._eps)
660      self.assertAllClose(exp_labels, got_labels, self._eps)
661
662  def testAccidentalHitRemoval(self):
663    """With accidental hit removal, no subtract_log_q."""
664    np.random.seed(0)
665    num_classes = 5
666    batch_size = 3
667    sampled = [1, 0, 2, 3]
668
669    for num_true in range(1, 5):
670      labels = np.random.randint(
671          low=0, high=num_classes, size=batch_size * num_true)
672      (weights, biases, hidden_acts, sampled_vals, _,
673       _) = self._GenerateTestData(
674           num_classes=num_classes,
675           dim=10,
676           batch_size=batch_size,
677           num_true=num_true,
678           labels=labels,
679           sampled=sampled,
680           subtract_log_q=False)
681      logits_tensor, _ = _compute_sampled_logits(
682          weights=constant_op.constant(weights),
683          biases=constant_op.constant(biases),
684          labels=constant_op.constant(
685              labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
686          inputs=constant_op.constant(hidden_acts),
687          num_sampled=len(sampled),
688          num_classes=num_classes,
689          num_true=num_true,
690          sampled_values=sampled_vals,
691          subtract_log_q=False,
692          remove_accidental_hits=True,
693          partition_strategy="div",
694          name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true)
695      # Test that the exponentiated logits of accidental hits are near 0.
696      # First we need to find the hits in this random test run:
697      labels_reshape = labels.reshape((batch_size, num_true))
698      got_logits = self.evaluate(logits_tensor)
699      for row in xrange(batch_size):
700        row_labels = labels_reshape[row, :]
701        for col in xrange(len(sampled)):
702          if sampled[col] in row_labels:
703            # We need to add the num_true_test offset into logits_*
704            self.assertNear(
705                np.exp(got_logits[row, col + num_true]), 0., self._eps)
706
707  def testSubtractLogQ(self):
708    """With subtract_log_q, no accidental hit removal."""
709    np.random.seed(0)
710    num_classes = 5
711    batch_size = 3
712
713    for num_true in range(1, 5):
714      labels = np.random.randint(
715          low=0, high=num_classes, size=batch_size * num_true)
716      (weights, biases, hidden_acts, sampled_vals, exp_logits,
717       exp_labels) = self._GenerateTestData(
718           num_classes=num_classes,
719           dim=10,
720           batch_size=batch_size,
721           num_true=num_true,
722           labels=labels,
723           sampled=[1, 0, 2, 3],
724           subtract_log_q=True)
725      logits_tensor, labels_tensor = _compute_sampled_logits(
726          weights=constant_op.constant(weights),
727          biases=constant_op.constant(biases),
728          labels=constant_op.constant(
729              labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
730          inputs=constant_op.constant(hidden_acts),
731          num_sampled=4,
732          num_classes=num_classes,
733          num_true=num_true,
734          sampled_values=sampled_vals,
735          subtract_log_q=True,
736          remove_accidental_hits=False,
737          partition_strategy="div",
738          name="sampled_logits_subtract_log_q_num_true_%d" % num_true)
739      got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
740      self.assertAllClose(exp_logits, got_logits, self._eps)
741      self.assertAllClose(exp_labels, got_labels, self._eps)
742
743  def testSharded(self):
744    """With sharded weights and sharded biases."""
745    np.random.seed(0)
746    num_classes = 5
747    batch_size = 3
748
749    for num_true in range(1, 5):
750      labels = np.random.randint(
751          low=0, high=num_classes, size=batch_size * num_true)
752      (weights, biases, hidden_acts, sampled_vals, exp_logits,
753       exp_labels) = self._GenerateTestData(
754           num_classes=num_classes,
755           dim=10,
756           batch_size=batch_size,
757           num_true=num_true,
758           labels=labels,
759           sampled=[1, 0, 2, 3],
760           subtract_log_q=False)
761      weight_shards, bias_shards = self._ShardTestEmbeddings(
762          weights, biases, num_shards=3)
763      logits_tensor, labels_tensor = _compute_sampled_logits(
764          weights=[constant_op.constant(shard) for shard in weight_shards],
765          biases=[constant_op.constant(shard) for shard in bias_shards],
766          labels=constant_op.constant(
767              labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
768          inputs=constant_op.constant(hidden_acts),
769          num_sampled=4,
770          num_classes=num_classes,
771          num_true=num_true,
772          sampled_values=sampled_vals,
773          subtract_log_q=False,
774          remove_accidental_hits=False,
775          partition_strategy="div",
776          name="sampled_logits_sharded_num_true_%d" % num_true)
777      got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
778      self.assertAllClose(exp_logits, got_logits, self._eps)
779      self.assertAllClose(exp_labels, got_labels, self._eps)
780
781  def testNCELoss(self):
782    # A simple test to verify the numerics.
783
784    def _SigmoidCrossEntropyWithLogits(logits, targets):
785      # logits, targets: float arrays of the same shape.
786      assert logits.shape == targets.shape
787      pred = 1. / (1. + np.exp(-logits))
788      eps = 0.0001
789      pred = np.minimum(np.maximum(pred, eps), 1 - eps)
790      return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
791
792    np.random.seed(0)
793    num_classes = 5
794    batch_size = 3
795    labels = [0, 1, 2]
796    (weights, biases, hidden_acts, sampled_vals, exp_logits,
797     exp_labels) = self._GenerateTestData(
798         num_classes=num_classes,
799         dim=10,
800         batch_size=batch_size,
801         num_true=1,
802         labels=labels,
803         sampled=[1, 0, 2, 3],
804         subtract_log_q=True)
805    exp_nce_loss = np.sum(
806        _SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1)
807
808    got_nce_loss = nn_impl.nce_loss_v2(
809        weights=constant_op.constant(weights),
810        biases=constant_op.constant(biases),
811        labels=constant_op.constant(labels, shape=(batch_size, 1)),
812        inputs=constant_op.constant(hidden_acts),
813        num_sampled=4,
814        num_classes=num_classes,
815        num_true=1,
816        sampled_values=sampled_vals)
817
818    self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
819
820    # Test with sharded weights and sharded biases.
821    weight_shards, bias_shards = self._ShardTestEmbeddings(
822        weights, biases, num_shards=3)
823    got_nce_loss = nn_impl.nce_loss_v2(
824        weights=[constant_op.constant(shard) for shard in weight_shards],
825        biases=[constant_op.constant(shard) for shard in bias_shards],
826        labels=constant_op.constant(labels, shape=(batch_size, 1)),
827        inputs=constant_op.constant(hidden_acts),
828        num_sampled=4,
829        num_classes=num_classes,
830        num_true=1,
831        sampled_values=sampled_vals)
832
833    self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
834
835  def testSampledSoftmaxLoss(self):
836    # A simple test to verify the numerics.
837
838    def _SoftmaxCrossEntropyWithLogits(logits, targets):
839      # logits, targets: float arrays of the same shape.
840      assert logits.shape == targets.shape
841      stable_exp_logits = np.exp(
842          logits - np.amax(logits, axis=1, keepdims=True))
843      pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
844      return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
845
846    np.random.seed(0)
847    num_classes = 5
848    batch_size = 3
849    labels = [0, 1, 2]
850    (weights, biases, hidden_acts, sampled_vals, exp_logits,
851     exp_labels) = self._GenerateTestData(
852         num_classes=num_classes,
853         dim=10,
854         batch_size=batch_size,
855         num_true=1,
856         labels=labels,
857         sampled=[1, 0, 2, 3],
858         subtract_log_q=True)
859    exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
860        exp_logits, exp_labels)
861
862    got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
863        weights=constant_op.constant(weights),
864        biases=constant_op.constant(biases),
865        labels=constant_op.constant(labels, shape=(batch_size, 1)),
866        inputs=constant_op.constant(hidden_acts),
867        num_sampled=4,
868        num_classes=num_classes,
869        num_true=1,
870        sampled_values=sampled_vals,
871        remove_accidental_hits=False)
872
873    self.assertAllClose(exp_sampled_softmax_loss,
874                        self.evaluate(got_sampled_softmax_loss), 1e-4)
875
876    # Test with sharded weights and sharded biases.
877    weight_shards, bias_shards = self._ShardTestEmbeddings(
878        weights, biases, num_shards=3)
879    got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
880        weights=[constant_op.constant(shard) for shard in weight_shards],
881        biases=[constant_op.constant(shard) for shard in bias_shards],
882        labels=constant_op.constant(labels, shape=(batch_size, 1)),
883        inputs=constant_op.constant(hidden_acts),
884        num_sampled=4,
885        num_classes=num_classes,
886        num_true=1,
887        sampled_values=sampled_vals,
888        remove_accidental_hits=False)
889
890    self.assertAllClose(exp_sampled_softmax_loss,
891                        self.evaluate(got_sampled_softmax_loss), 1e-4)
892
893  def testSampledSoftmaxLossBf16(self):
894    # A simple test to verify the numerics for bfloat16.
895    def _SoftmaxCrossEntropyWithLogits(logits, targets):
896      # logits, targets: float arrays of the same shape.
897      assert logits.shape == targets.shape
898      stable_exp_logits = np.exp(
899          logits - np.amax(logits, axis=1, keepdims=True))
900      pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
901      return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
902
903    np.random.seed(0)
904    num_classes = 5
905    batch_size = 3
906    labels = [0, 1, 2]
907    sampled = [1, 0, 2, 3]
908    (weights, biases, hidden_acts, _, exp_logits,
909     exp_labels) = self._GenerateTestData(
910         num_classes=num_classes,
911         dim=10,
912         batch_size=batch_size,
913         num_true=1,
914         labels=labels,
915         sampled=sampled,
916         subtract_log_q=True)
917    exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
918        exp_logits, exp_labels)
919
920    true_exp_bf16 = np.full([batch_size, 1],
921                            fill_value=0.5,
922                            dtype=dtypes.bfloat16.as_numpy_dtype)
923    sampled_exp_bf16 = np.full([len(sampled)],
924                               fill_value=0.5,
925                               dtype=dtypes.bfloat16.as_numpy_dtype)
926    sampled_vals_bf16 = (sampled, true_exp_bf16, sampled_exp_bf16)
927
928    got_sampled_softmax_loss = math_ops.cast(
929        nn_impl.sampled_softmax_loss_v2(
930            weights=constant_op.constant(weights, dtype=dtypes.bfloat16),
931            biases=constant_op.constant(biases, dtype=dtypes.bfloat16),
932            labels=constant_op.constant(
933                labels, shape=(batch_size, 1), dtype=dtypes.bfloat16),
934            inputs=constant_op.constant(hidden_acts, dtype=dtypes.bfloat16),
935            num_sampled=4,
936            num_classes=num_classes,
937            num_true=1,
938            sampled_values=sampled_vals_bf16,
939            remove_accidental_hits=False), dtypes.float32)
940
941    self.assertAllClose(exp_sampled_softmax_loss,
942                        self.evaluate(got_sampled_softmax_loss), 1e-1)
943
944
945class CReluTest(test_lib.TestCase):
946
947  def test(self):
948    np.random.seed(1)  # Make it reproducible.
949    x = np.random.randn(3, 4).astype(np.float32)
950    y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
951
952    z = self.evaluate(nn_ops.crelu(constant_op.constant(x)))
953    self.assertAllClose(y, z, 1e-4)
954
955
956class ReluTest(test_lib.TestCase):
957
958  def test(self):
959    np.random.seed(1)  # Make it reproducible.
960    x = np.random.randn(3, 4).astype(np.float32)
961    y = np.maximum(x, 0.0)
962
963    z = self.evaluate(nn_ops.relu(constant_op.constant(x)))
964    self.assertAllEqual(y, z)
965
966  @test_util.run_deprecated_v1
967  def testNaNs(self):
968    # Test that relu(nan) = nan for various sizes.
969    for i in range(18):
970      x = np.zeros(i) + np.nan
971      with self.cached_session():
972        z = nn_ops.relu(constant_op.constant(x)).eval()
973        self.assertTrue(np.isnan(z).all())
974
975
976class LeakyReluTest(test_lib.TestCase):
977
978  def testRange(self):
979    batch_size = 3
980    height, width = 4, 4
981    np.random.seed(1)  # Make it reproducible.
982    inputs = np.random.uniform(size=(batch_size, height, width, 3)).astype(
983        np.float32)
984    inputs = constant_op.constant(inputs)
985
986    outputs = nn_ops.leaky_relu(inputs)
987    self.assertEquals(inputs.shape, outputs.shape)
988
989    inputs, outputs = self.evaluate([inputs, outputs])
990
991    self.assertGreaterEqual(outputs.min(), 0.0)
992    self.assertLessEqual(outputs.max(), 1.0)
993    self.assertAllClose(inputs, outputs)
994
995  @test_util.run_deprecated_v1
996  def testValues(self):
997    for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
998      np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)
999      outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
1000
1001      outputs = self.evaluate(outputs)
1002
1003      tol = 2e-3 if dtype == np.float16 else 1e-6
1004      self.assertAllClose(
1005          outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)
1006
1007  @test_util.run_deprecated_v1
1008  def testName(self):
1009    np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)
1010    outputs_with_name_set = nn_ops.leaky_relu(
1011        constant_op.constant(np_values),
1012        name='test_relu_op')
1013    self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0')
1014    outputs_without_name_set = nn_ops.leaky_relu(
1015        constant_op.constant(np_values))
1016    self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0')
1017
1018
1019class SwishTest(test_lib.TestCase):
1020
1021  @test_util.run_deprecated_v1
1022  def testValues(self):
1023    np_values = np.array(
1024        [np.linspace(-7.0, 0.0, 100),
1025         np.linspace(0.0, 7.0, 100)],
1026        dtype=np.float32)
1027    tf_values = constant_op.constant(np_values)
1028    actual_tf_outputs = nn_impl.swish(tf_values)
1029    expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values)
1030
1031    actual_outputs, expected_outputs = self.evaluate(
1032        [actual_tf_outputs, expected_tf_outputs])
1033
1034    self.assertAllClose(actual_outputs, expected_outputs)
1035
1036  @test_util.run_deprecated_v1
1037  def testGradients(self):
1038    shape = [5, 3, 4]
1039    sigma = 5
1040    input_values = np.random.randn(*shape) * sigma
1041    x_tf = constant_op.constant(input_values)
1042    y_tf = nn_impl.swish(x_tf)
1043    with self.cached_session():
1044      err = gradient_checker.compute_gradient_error(x_tf, shape, y_tf, shape)
1045    self.assertLess(err, 1e-4)
1046
1047
1048class MomentsTest(test_lib.TestCase):
1049
1050  def doOutputTest(self,
1051                   input_shape,
1052                   moments_axes,
1053                   tol=1e-4,
1054                   check_gradients=False):
1055    for mu in [0.0, 1.0, 1e3]:
1056      for sigma in [1.0, 0.1]:
1057        for keep_dims in [True, False]:
1058          input_values = np.random.rand(*input_shape) * sigma + mu
1059          expected_mean = np.mean(
1060              input_values, axis=moments_axes, keepdims=keep_dims)
1061          expected_var = np.var(
1062              input_values, axis=moments_axes, keepdims=keep_dims)
1063          with ops.Graph().as_default() as g:
1064            with self.session(graph=g) as sess:
1065              inputs = constant_op.constant(
1066                  input_values, shape=input_shape, dtype=dtypes.float32)
1067              mean, variance = nn_impl.moments_v2(
1068                  inputs, moments_axes, keepdims=keep_dims)
1069
1070              if check_gradients:
1071                err = gradient_checker.compute_gradient_error(
1072                    inputs, input_shape, mean, mean.shape.as_list())
1073                self.assertLess(err, 1e-3)
1074                err = gradient_checker.compute_gradient_error(
1075                    inputs, input_shape, variance, variance.shape.as_list())
1076                self.assertLess(err, 1e-3)
1077
1078              # Evaluate.
1079              [mean, variance] = self.evaluate([mean, variance])
1080              # Make sure that there are no NaNs
1081              self.assertFalse(np.isnan(mean).any())
1082              self.assertFalse(np.isnan(variance).any())
1083              self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
1084              self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
1085
1086  def testOutputAndGradient2DInput0(self):
1087    self.doOutputTest((10, 10), (0,), check_gradients=True)
1088
1089  def testOutputAndGradient2DInput01(self):
1090    self.doOutputTest((10, 10), (0, 1), check_gradients=True)
1091
1092  def testOutput2DInput0(self):
1093    self.doOutputTest((10, 300), (0,))
1094
1095  def testOutput2DInput1(self):
1096    self.doOutputTest((10, 300), (1,))
1097
1098  def testOutput2DInput01(self):
1099    self.doOutputTest((10, 300), (0, 1))
1100
1101  def testOutput4DInput0(self):
1102    self.doOutputTest((10, 10, 10, 30), (0,))
1103
1104  def testOutput4DInput1(self):
1105    self.doOutputTest((10, 10, 10, 30), (1,))
1106
1107  def testOutput4DInput3(self):
1108    self.doOutputTest((10, 10, 10, 30), (3,))
1109
1110  def testOutput4DInput012(self):
1111    self.doOutputTest((10, 10, 10, 30), (0, 1, 2))
1112
1113  def testOutput4DInput123(self):
1114    self.doOutputTest((10, 10, 10, 30), (1, 2, 3))
1115
1116
1117class DataFormatDimMapTest(test_lib.TestCase):
1118
1119  def _test(self, x_val, y_val_expected):
1120    x = constant_op.constant(x_val)
1121    y = nn_ops.data_format_dim_map(x)
1122
1123    y_val = self.evaluate(y)
1124    self.assertAllEqual(y_val, y_val_expected)
1125
1126  def test(self):
1127    self._test(0, 0)
1128    self._test(1, 2)
1129    self._test(2, 3)
1130    self._test(3, 1)
1131    self._test(-1, 1)
1132    self._test(-2, 3)
1133    self._test(-3, 2)
1134    self._test(-4, 0)
1135    self._test([1, 3], [2, 1])
1136    self._test([1, 3, -2], [2, 1, 3])
1137    self._test([1, -3, -2], [2, 2, 3])
1138    self._test([[1, -3], [1, -1]], [[2, 2], [2, 1]])
1139
1140  def testNHWCtoNCHW(self):
1141    x_val = [1, -3, -2]
1142    y_val_expected = [2, 2, 3]
1143    x = constant_op.constant(x_val)
1144    y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="NCHW")
1145    with test_util.use_gpu():
1146      y_val = self.evaluate(y)
1147      self.assertAllEqual(y_val, y_val_expected)
1148
1149  def testNHWCtoHWNC(self):
1150    x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
1151    y_val_expected = [2, 0, 1, 3, 2, 0, 1, 3]
1152    x = constant_op.constant(x_val)
1153    y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="HWNC")
1154    with test_util.use_gpu():
1155      y_val = self.evaluate(y)
1156      self.assertAllEqual(y_val, y_val_expected)
1157
1158  def testNHWCtoWHCN(self):
1159    x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
1160    y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2]
1161    x = constant_op.constant(x_val)
1162    y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="WHCN")
1163    with test_util.use_gpu():
1164      y_val = self.evaluate(y)
1165      self.assertAllEqual(y_val, y_val_expected)
1166
1167  def testArbitraryASCII(self):
1168    x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
1169    y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0]
1170    x = constant_op.constant(x_val)
1171    y = nn_ops.data_format_dim_map(x, src_format="qwer", dst_format="rewq")
1172    with test_util.use_gpu():
1173      y_val = self.evaluate(y)
1174      self.assertAllEqual(y_val, y_val_expected)
1175
1176
1177class DataFormatVectorPermuteTest(test_lib.TestCase):
1178
1179  def testNHWCToNCHW(self):
1180    x_val = [7, 4, 9, 3]
1181    x = constant_op.constant(x_val)
1182    y = nn_ops.data_format_vec_permute(x)
1183    with test_util.use_gpu():
1184      y_val = self.evaluate(y)
1185      self.assertAllEqual(y_val, [7, 3, 4, 9])
1186
1187  def testNCHWToNHWC(self):
1188    x_val = [7, 4, 9, 3]
1189    x = constant_op.constant(x_val)
1190    y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
1191    with test_util.use_gpu():
1192      y_val = self.evaluate(y)
1193      self.assertAllEqual(y_val, [7, 9, 3, 4])
1194
1195  def testNHWCToHWNC(self):
1196    x_val = [7, 4, 9, 3]
1197    x = constant_op.constant(x_val)
1198    y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
1199    with test_util.use_gpu():
1200      y_val = self.evaluate(y)
1201      self.assertAllEqual(y_val, [4, 9, 7, 3])
1202
1203  def testHWNCToNHWC(self):
1204    x_val = [7, 4, 9, 3]
1205    x = constant_op.constant(x_val)
1206    y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
1207    with test_util.use_gpu():
1208      y_val = self.evaluate(y)
1209      self.assertAllEqual(y_val, [9, 7, 4, 3])
1210
1211  def testNHWCToNCHW2D(self):
1212    x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
1213    x = constant_op.constant(x_val)
1214    y = nn_ops.data_format_vec_permute(x)
1215    with test_util.use_gpu():
1216      y_val = self.evaluate(y)
1217      self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]])
1218
1219  def testNHWCToHWNC2D(self):
1220    x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
1221    x = constant_op.constant(x_val)
1222    y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
1223    with test_util.use_gpu():
1224      y_val = self.evaluate(y)
1225      self.assertAllEqual(y_val, [[9, 3], [4, 5], [7, 4], [5, 1]])
1226
1227  def testHWNCToNHWC2D(self):
1228    x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
1229    x = constant_op.constant(x_val)
1230    y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
1231    with test_util.use_gpu():
1232      y_val = self.evaluate(y)
1233      self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])
1234
1235  def testNCHWToNHWC2D(self):
1236    x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
1237    x = constant_op.constant(x_val)
1238    y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
1239    with test_util.use_gpu():
1240      y_val = self.evaluate(y)
1241      self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
1242
1243
1244@test_util.run_all_in_graph_and_eager_modes
1245class AvgPoolTest(test_lib.TestCase):
1246
1247  def test1DTensor(self):
1248    x = array_ops.ones([3, 6, 5])
1249    ksize = 2
1250    strides = 2
1251
1252    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1253    y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
1254
1255    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1256
1257  def test1DNumpy(self):
1258    x = np.ones([3, 6, 5])
1259    ksize = 2
1260    strides = 2
1261
1262    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1263    y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
1264
1265    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1266
1267  def test2DTensor(self):
1268    x = array_ops.ones([3, 6, 6, 5])
1269    ksize = 2
1270    strides = 2
1271
1272    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1273    y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
1274
1275    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1276
1277  def test2DNumpy(self):
1278    x = np.ones([3, 6, 6, 5])
1279    ksize = 2
1280    strides = 2
1281
1282    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1283    y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
1284
1285    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1286
1287  def test3DTensor(self):
1288    x = array_ops.ones([3, 7, 6, 6, 5])
1289    ksize = 2
1290    strides = 2
1291
1292    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1293    y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
1294
1295    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1296
1297  def test3DNumpy(self):
1298    x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
1299    ksize = 2
1300    strides = 2
1301
1302    y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
1303    y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
1304
1305    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1306
1307
1308@test_util.run_all_in_graph_and_eager_modes
1309class MaxPoolTest(test_lib.TestCase):
1310
1311  def test1DTensor(self):
1312    x = array_ops.ones([3, 6, 5])
1313    ksize = 2
1314    strides = 2
1315
1316    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1317    y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
1318
1319    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1320
1321  def test1DNumpy(self):
1322    x = np.ones([3, 6, 5])
1323    ksize = 2
1324    strides = 2
1325
1326    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1327    y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
1328
1329    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1330
1331  def test2DTensor(self):
1332    x = array_ops.ones([3, 6, 6, 5])
1333    ksize = 2
1334    strides = 2
1335
1336    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1337    y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
1338
1339    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1340
1341  def test2DNumpy(self):
1342    x = np.ones([3, 6, 6, 5])
1343    ksize = 2
1344    strides = 2
1345
1346    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1347    y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
1348
1349    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1350
1351  def test3DTensor(self):
1352    x = array_ops.ones([3, 7, 6, 6, 5])
1353    ksize = 2
1354    strides = 2
1355
1356    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1357    y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
1358
1359    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1360
1361  def test3DNumpy(self):
1362    x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
1363    ksize = 2
1364    strides = 2
1365
1366    y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
1367    y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
1368
1369    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1370
1371  def testIncorrectSizeInputSmall(self):
1372    x = array_ops.ones([3, 4])
1373    with self.assertRaisesRegex(
1374        ValueError, "Input tensor must be of rank 3, 4 or 5 but was 2."):
1375      nn_ops.max_pool_v2(x, 2, 2, "SAME")
1376
1377  def testIncorrectSizeInput(self):
1378    x = array_ops.ones([3, 4, 1, 2, 1, 2])
1379    with self.assertRaisesRegex(
1380        ValueError, "Input tensor must be of rank 3, 4 or 5 but was 6."):
1381      nn_ops.max_pool_v2(x, 2, 2, "SAME")
1382
1383
1384@test_util.run_all_in_graph_and_eager_modes
1385class ConvolutionTest(test_lib.TestCase):
1386
1387  def testUnknownSize(self):
1388    x = tensor_spec.TensorSpec(None, dtypes.float32, name="x")
1389    k = np.ones([3, 6, 6, 5])
1390
1391    @def_function.function
1392    def F(value):
1393      return nn_ops.convolution(value, k, "SAME")
1394
1395    F.get_concrete_function(x)
1396
1397
1398class ConvTransposeTest(test_lib.TestCase):
1399
1400  def test1DTensor(self):
1401    t = array_ops.ones([2, 4, 3])
1402    v = array_ops.ones([2, 5, 3])
1403    strides = 2
1404
1405    y1 = nn_ops.conv1d_transpose(t, v, [2, 8, 5], strides)
1406    y2 = nn_ops.conv_transpose(t, v, [2, 8, 5], strides)
1407
1408    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1409
1410  def test2DTensor(self):
1411    t = array_ops.ones([2, 4, 4, 3])
1412    v = array_ops.ones([2, 2, 5, 3])
1413    strides = 2
1414
1415    y1 = nn_ops.conv2d_transpose_v2(t, v, [2, 8, 8, 5], strides)
1416    y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 5], strides)
1417
1418    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1419
1420  def test3DTensor(self):
1421    t = array_ops.ones([2, 4, 4, 4, 3])
1422    v = array_ops.ones([2, 2, 2, 5, 3])
1423    strides = 2
1424
1425    y1 = nn_ops.conv3d_transpose_v2(t, v, [2, 8, 8, 8, 5], strides)
1426    y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 8, 5], strides)
1427
1428    self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
1429
1430  def testIncorrectSizeInputSmall(self):
1431    with self.assertRaisesRegex(
1432        ValueError, "output_shape must be of length 3, 4 or 5 but was 2."):
1433      nn_ops.conv_transpose(None, 2, [2, 3], "SAME")
1434
1435  def testIncorrectSizeInput(self):
1436    with self.assertRaisesRegex(
1437        ValueError, "output_shape must be of length 3, 4 or 5 but was 6."):
1438      nn_ops.conv_transpose(None, 2, [2, 3, 4, 2, 5, 1], "SAME")
1439
1440  def testTensorsNoShape(self):
1441    with self.assertRaisesRegex(ValueError, "output_shape cannot be None"):
1442      nn_ops.conv_transpose(None, None, None, None)
1443
1444
1445if __name__ == "__main__":
1446  test_lib.main()
1447