• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for array_ops."""
16import re
17import time
18import unittest
19
20from absl.testing import parameterized
21import numpy as np
22
23from tensorflow.python.client import session
24from tensorflow.python.eager import backprop
25from tensorflow.python.eager import context
26from tensorflow.python.eager import def_function
27from tensorflow.python.framework import config
28from tensorflow.python.framework import constant_op
29from tensorflow.python.framework import dtypes
30from tensorflow.python.framework import errors
31from tensorflow.python.framework import errors_impl
32from tensorflow.python.framework import ops
33from tensorflow.python.framework import sparse_tensor
34from tensorflow.python.framework import tensor_shape
35from tensorflow.python.framework import tensor_spec
36from tensorflow.python.framework import test_ops
37from tensorflow.python.framework import test_util
38from tensorflow.python.ops import array_ops
39from tensorflow.python.ops import gen_array_ops
40from tensorflow.python.ops import gradient_checker_v2
41from tensorflow.python.ops import init_ops
42from tensorflow.python.ops import list_ops
43from tensorflow.python.ops import map_fn
44from tensorflow.python.ops import math_ops
45from tensorflow.python.ops import random_ops
46from tensorflow.python.ops import resource_variable_ops
47from tensorflow.python.ops import state_ops
48from tensorflow.python.ops import variable_scope
49from tensorflow.python.ops import variables
50from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
51from tensorflow.python.platform import test as test_lib
52
53
54@test_util.run_all_in_graph_and_eager_modes
55class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
56
57  def testNonBatchMatrix(self):
58    matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
59    expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
60    transposed = array_ops.matrix_transpose(matrix)
61    self.assertEqual((3, 2), transposed.get_shape())
62    self.assertAllEqual(expected_transposed, transposed)
63
64  def testConjugate(self):
65    m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
66    expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
67    matrix = ops.convert_to_tensor(m)
68    transposed = array_ops.matrix_transpose(matrix, conjugate=True)
69    self.assertEqual((3, 2), transposed.get_shape())
70    self.assertAllEqual(expected_transposed, transposed)
71
72  def testBatchMatrix(self):
73    matrix_0 = [[1, 2, 3], [4, 5, 6]]
74    matrix_0_t = [[1, 4], [2, 5], [3, 6]]
75    matrix_1 = [[11, 22, 33], [44, 55, 66]]
76    matrix_1_t = [[11, 44], [22, 55], [33, 66]]
77    batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
78    expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
79    transposed = array_ops.matrix_transpose(batch_matrix)
80    self.assertEqual((2, 3, 2), transposed.get_shape())
81    self.assertAllEqual(expected_transposed, transposed)
82
83  def testNonBatchMatrixDynamicallyDefined(self):
84    # needs explicit `constant` because lists are not automatically
85    # converted to sensors when applying `transpose` below
86    matrix = constant_op.constant([[1, 2, 3], [4, 5, 6]])  # Shape (2, 3)
87    expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
88
89    @def_function.function(input_signature=[
90        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
91    ])
92    def transpose(matrix):
93      self.assertIs(matrix.shape.ndims, None)
94      return array_ops.matrix_transpose(matrix)
95
96    self.assertAllEqual(expected_transposed, transpose(matrix))
97
98  def testBatchMatrixDynamicallyDefined(self):
99    matrix_0 = [[1, 2, 3], [4, 5, 6]]
100    matrix_0_t = [[1, 4], [2, 5], [3, 6]]
101    matrix_1 = [[11, 22, 33], [44, 55, 66]]
102    matrix_1_t = [[11, 44], [22, 55], [33, 66]]
103    # needs explicit `constant` because lists are not automatically
104    # converted to sensors when applying `transpose` below
105    batch_matrix = constant_op.constant([matrix_0, matrix_1])  # Shape (2, 2, 3)
106    expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
107
108    @def_function.function(input_signature=[
109        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
110    ])
111    def transpose(matrix):
112      self.assertIs(matrix.shape.ndims, None)
113      return array_ops.matrix_transpose(matrix)
114
115    self.assertAllEqual(expected_transposed, transpose(batch_matrix))
116
117  def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
118    vector = [1, 2, 3]
119    with self.assertRaisesRegex(ValueError, "should be a "):
120      array_ops.matrix_transpose(vector)
121
122  def testNarrowMatrixConjugateTranspose(self):
123    for dtype in (dtypes.float32, dtypes.float64):
124      for conjugate in (True, False):
125        with self.subTest(complex_type=dtype, conjugate=conjugate):
126          vector = math_ops.complex(
127              constant_op.constant(0, dtype=dtype),
128              math_ops.range(96, dtype=dtype))
129          column_vector = array_ops.expand_dims(vector, axis=-1)
130          row_vector = array_ops.expand_dims(vector, axis=0)
131          narrow_matrix = array_ops.tile(column_vector, [1, 2])  # [96, 2]
132          expected_transposed = array_ops.tile(row_vector, [2, 1])  # [2, 96]
133          if conjugate:
134            expected_transposed = -expected_transposed
135
136          transposed = array_ops.matrix_transpose(
137              narrow_matrix, conjugate=conjugate)
138
139          self.assertEqual((2, 96), transposed.get_shape())
140          self.assertAllEqual(expected_transposed, transposed)
141
142
143class BooleanMaskTest(test_util.TensorFlowTestCase):
144
145  def setUp(self):
146    self.rng = np.random.RandomState(42)
147
148  def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None, axis=None):
149    """Check equivalence between boolean_mask and numpy masking."""
150    if make_mask is None:
151      make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
152    arr = np.random.rand(*arr_shape)
153    mask = make_mask(arr_shape[:ndims_mask])
154    if axis is not None:
155      mask = make_mask(arr_shape[axis:ndims_mask + axis])
156    if axis is None or axis == 0:
157      masked_arr = arr[mask]
158    elif axis == 1:
159      masked_arr = arr[:, mask]
160    elif axis == 2:
161      masked_arr = arr[:, :, mask]
162    masked_tensor = array_ops.boolean_mask(arr, mask, axis=axis)
163
164    # Leading dimension size of masked_tensor is always unknown until runtime
165    # since we don't how many elements will be kept.
166    leading = 1 if axis is None else axis + 1
167    self.assertAllEqual(masked_tensor.get_shape()[leading:],
168                        masked_arr.shape[leading:])
169
170    self.assertAllClose(masked_arr, masked_tensor)
171
172  def testMaskDim1ArrDim2Axis1(self):
173    ndims_mask = 1
174    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
175      with self.subTest(arr_shape=arr_shape):
176        self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
177
178  def testMaskDim2ArrDim2Axis1(self):
179    ndims_mask = 2
180    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
181      with self.subTest(arr_shape=arr_shape):
182        self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
183
184  def testMaskDim1ArrDim1(self):
185    ndims_mask = 1
186    for arr_shape in [(1,), (2,), (3,), (10,)]:
187      with self.subTest(arr_shape=arr_shape):
188        self.CheckVersusNumpy(ndims_mask, arr_shape)
189
190  def testMaskDim1ArrDim2(self):
191    ndims_mask = 1
192    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
193      with self.subTest(arr_shape=arr_shape):
194        self.CheckVersusNumpy(ndims_mask, arr_shape)
195
196  def testMaskDim2ArrDim2(self):
197    ndims_mask = 2
198    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
199      with self.subTest(arr_shape=arr_shape):
200        self.CheckVersusNumpy(ndims_mask, arr_shape)
201
202  def testMaskDim2ArrDim3(self):
203    ndims_mask = 2
204    for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]:
205      with self.subTest(arr_shape=arr_shape):
206        self.CheckVersusNumpy(ndims_mask, arr_shape)
207
208  def testEmptyInput2D(self):
209    mask = np.array([True, False])
210    arr = np.array([[], []]).astype(np.float32)
211    numpy_result = arr[mask]
212    tf_result = array_ops.boolean_mask(arr, mask)
213    self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
214    with self.cached_session():
215      self.assertAllClose(numpy_result, tf_result)
216
217  def testEmptyInput1D(self):
218    mask = np.array([]).astype(bool)
219    arr = np.array([]).astype(np.float32)
220    numpy_result = arr[mask]
221    tf_result = array_ops.boolean_mask(arr, mask)
222    self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
223    with self.cached_session():
224      self.assertAllClose(numpy_result, tf_result)
225
226  def testEmptyOutput(self):
227    make_mask = lambda shape: np.zeros(shape, dtype=bool)
228    for ndims_mask in range(1, 4):
229      for ndims_arr in range(ndims_mask, ndims_mask + 3):
230        for _ in range(3):
231          with self.subTest(ndims_mask=ndims_mask, ndims_arr=ndims_arr, _=_):
232            arr_shape = np.random.randint(1, 5, size=ndims_arr)
233            self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
234
235  def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
236    # The rank of the mask tensor must be specified. This is explained
237    # in the docstring as well.
238    @def_function.function
239    def func(ph_tensor, ph_mask):
240      return array_ops.boolean_mask(ph_tensor, ph_mask)
241
242    f = func.get_concrete_function(
243        tensor_spec.TensorSpec(None, dtypes.int32),
244        tensor_spec.TensorSpec([None], dtypes.bool))
245    arr = np.array([[1, 2], [3, 4]], np.int32)
246    mask = np.array([False, True])
247    masked_tensor = f(arr, mask)
248    self.assertAllEqual(masked_tensor, arr[mask])
249
250  def testMaskDimensionsSetToNoneRaises(self):
251    # The rank of the mask tensor must be specified. This is explained
252    # in the docstring as well.
253    @def_function.function
254    def func(tensor, mask):
255      return array_ops.boolean_mask(tensor, mask)
256
257    with self.assertRaisesRegex(ValueError, "dimensions must be specified"):
258      _ = func.get_concrete_function(
259          tensor_spec.TensorSpec([None, 2], dtypes.int32),
260          tensor_spec.TensorSpec(None, dtypes.bool))
261
262  def testMaskHasMoreDimsThanTensorRaises(self):
263    mask = [[True, True], [False, False]]
264    tensor = [1, 2, 3, 4]
265    with self.cached_session():
266      with self.assertRaisesRegex(ValueError, "incompatible"):
267        self.evaluate(array_ops.boolean_mask(tensor, mask))
268
269  def testMaskIsScalarRaises(self):
270    mask = True
271    tensor = 1
272    with self.cached_session():
273      with self.assertRaisesRegex(ValueError, "mask.*scalar"):
274        self.evaluate(array_ops.boolean_mask(tensor, mask))
275
276  def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
277    mask = [True, True, True]
278    tensor = [[1, 2], [3, 4]]
279    with self.cached_session():
280      with self.assertRaisesRegex(ValueError, "incompatible"):
281        self.evaluate(array_ops.boolean_mask(tensor, mask))
282
283  def testStringMask(self):
284    # Reproduces b/111171330, where the optimized boolean_mask graph would
285    # be incorrectly placed on GPU.
286    config.set_optimizer_experimental_options({"shape_optimization": True})
287
288    @def_function.function
289    def func(tile_input):
290      string_tensor = array_ops.tile([["hello"]], tile_input)
291      bool_tensor = array_ops.tile([[True]], tile_input)
292      masked_tensor = array_ops.boolean_mask(string_tensor, bool_tensor)
293      return masked_tensor
294
295    result = func([2, 2])
296    self.assertAllEqual([b"hello", b"hello", b"hello", b"hello"], result)
297
298  def testMaskWithAxisTensor(self):
299
300    @def_function.function(autograph=False)
301    def f():
302      return array_ops.boolean_mask([1, 2, 3], [True, False, True],
303                                    axis=constant_op.constant(
304                                        0, dtype=dtypes.int32))
305
306    self.assertAllEqual(self.evaluate(f()), [1, 3])
307
308  def testMaskWithAxisNonConstTensor(self):
309
310    @def_function.function(
311        autograph=False,
312        input_signature=[
313            tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
314        ])
315    def f(axis):
316      return array_ops.boolean_mask([1, 2, 3], [True, False, True], axis=axis)
317
318    self.assertAllEqual(
319        self.evaluate(f(constant_op.constant(0, dtype=dtypes.int32))), [1, 3])
320
321
322@test_util.run_all_in_graph_and_eager_modes
323class OperatorShapeTest(test_util.TensorFlowTestCase):
324
325  def testExpandScalar(self):
326    scalar = "hello"
327    scalar_expanded = array_ops.expand_dims(scalar, [0])
328    self.assertEqual(scalar_expanded.get_shape(), (1,))
329
330  def testSqueezeScalar(self):
331    scalar = "hello"
332    scalar_squeezed = array_ops.squeeze(scalar, ())
333    self.assertEqual(scalar_squeezed.get_shape(), ())
334
335  def testSqueezeMatrix(self):
336    matrix = [[1, 2, 3]]
337    matrix_squeezed = array_ops.squeeze(matrix, [0])
338    self.assertEqual(matrix_squeezed.get_shape(), (3))
339
340    with self.assertRaisesRegex(
341        Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
342      matrix_squeezed = array_ops.squeeze(matrix, [1])
343
344  def testSqueezeScalarDim(self):
345    matrix = [[1, 2, 3]]
346    matrix_squeezed = array_ops.squeeze(matrix, 0)
347    self.assertEqual(matrix_squeezed.get_shape(), (3))
348
349  def testExpandDimsWithNonScalarDim(self):
350    with self.assertRaisesRegex(Exception,
351                                "must be a tensor with a single value"):
352      array_ops.expand_dims(1, axis=[0, 1])
353
354  def testReshapeWithManyDims(self):
355    with self.assertRaisesRegex(errors.InvalidArgumentError,
356                                "too many dimensions"):
357      self.evaluate(
358          array_ops.reshape(
359              tensor=[[1]],
360              shape=constant_op.constant([1 for i in range(254)],
361                                         dtype=dtypes.int64)))
362
363
364@test_util.with_eager_op_as_function
365class ReverseV2Test(test_util.TensorFlowTestCase):
366
367  def testReverse0DimAuto(self):
368    x_np = 4
369    for use_gpu in [False, True]:
370      with self.subTest(use_gpu=use_gpu):
371        with self.cached_session(use_gpu=use_gpu):
372          x_tf = self.evaluate(array_ops.reverse_v2(x_np, []))
373          self.assertAllEqual(x_tf, x_np)
374
375  def _reverse1DimAuto(self, np_dtype):
376    x_np = np.array([1, 200, 3, 40, 5], dtype=np_dtype)
377
378    for use_gpu in [False, True]:
379      for axis_dtype in [dtypes.int32, dtypes.int64]:
380        with self.subTest(use_gpu=use_gpu, axis_dtype=axis_dtype):
381          x_tf = self.evaluate(
382              array_ops.reverse_v2(x_np,
383                                   constant_op.constant([0], dtype=axis_dtype)))
384          self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
385
386  def _reverse2DimAuto(self, np_dtype):
387    x_np = np.array([[1, 200, 3], [4, 5, 60]], dtype=np_dtype)
388
389    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
390      for use_gpu in [False, True]:
391        for axis_dtype in [dtypes.int32, dtypes.int64]:
392          with self.subTest(
393              reverse_f=reverse_f, use_gpu=use_gpu, axis_dtype=axis_dtype):
394            x_tf_1 = self.evaluate(
395                reverse_f(x_np, constant_op.constant([0], dtype=axis_dtype)))
396            x_tf_2 = self.evaluate(
397                reverse_f(x_np, constant_op.constant([-2], dtype=axis_dtype)))
398            x_tf_3 = self.evaluate(
399                reverse_f(x_np, constant_op.constant([1], dtype=axis_dtype)))
400            x_tf_4 = self.evaluate(
401                reverse_f(x_np, constant_op.constant([-1], dtype=axis_dtype)))
402            x_tf_5 = self.evaluate(
403                reverse_f(x_np, constant_op.constant([1, 0], dtype=axis_dtype)))
404            self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
405            self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
406            self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
407            self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
408            self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
409
410  # This test covers the axis validation in the shape function
411  # (no eval())
412  def testInvalidAxis(self):
413    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
414    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
415                                "is out of.* range"):
416      array_ops.reverse_v2(x_np, [-30])
417    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
418                                "is out of.* range"):
419      array_ops.reverse_v2(x_np, [2])
420    with self.assertRaisesRegex(
421        (ValueError, errors.InvalidArgumentError),
422        r"axis 0 specified more than once|axis 0 was repeated"):
423      array_ops.reverse_v2(x_np, [0, -2])
424
425  # This is the version of reverse that uses axis indices rather than
426  # bool tensors
427  # TODO(b/32254538): Change this test to use array_ops.reverse
428  #
429  # Note: this test passes placeholder as constant axis is validated
430  # in shape function (see testInvalidAxis)
431  def testInvalid(self):
432    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
433
434    @def_function.function
435    def func(ax):
436      return array_ops.reverse_v2(x_np, ax)
437
438    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
439                                "is out of.*range"):
440      func([-30])
441    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
442                                "is out of.*range"):
443      func([2])
444    with self.assertRaisesRegex(
445        (ValueError, errors_impl.InvalidArgumentError),
446        "(axis 0 specified more than once|canonicalized axis 0 was repeated.)"):
447      func([0, -2])
448
449  def testReverse1DimAuto(self):
450    for dtype in [
451        np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
452        np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
453        np.complex128,
454        np.array(b"").dtype.type
455    ]:
456      self._reverse1DimAuto(dtype)
457
458  def testReverse2DimAuto(self):
459    for dtype in [
460        np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
461        np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
462        np.complex128,
463        np.array(b"").dtype.type
464    ]:
465      self._reverse2DimAuto(dtype)
466
467  def testReverseRowsOf3Channels(self):
468    """Tests optimized code for reversing rows with last dim size = 3."""
469    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
470      for outer_size in (1, 2):
471        for middle_size in list(range(50)) + [100000]:
472          with self.subTest(
473              reverse_f=reverse_f,
474              outer_size=outer_size,
475              middle_size=middle_size,
476              use_gpu=True):
477            x_np = np.reshape(
478                np.arange(outer_size * middle_size * 3, dtype=np.float32),
479                newshape=(outer_size, middle_size, 3))
480            x_tf = self.evaluate(reverse_f(x_np, [1]))
481            np_answer = x_np[:, ::-1, :]
482            self.assertAllEqual(x_tf, np_answer)
483
484  def testReverseRowsOf4Channels(self):
485    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
486      for outer_size in (1, 2):
487        for middle_size in list(range(50)) + [100000]:
488          with self.subTest(
489              reverse_f=reverse_f,
490              outer_size=outer_size,
491              middle_size=middle_size,
492              use_gpu=True):
493            x_np = np.reshape(
494                np.arange(outer_size * middle_size * 4, dtype=np.float32),
495                newshape=(outer_size, middle_size, 4))
496            x_tf = self.evaluate(reverse_f(x_np, [1]))
497            np_answer = x_np[:, ::-1, :]
498            self.assertAllEqual(x_tf, np_answer)
499
500  def testReverseColumnsOf3Channels(self):
501    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
502      for outer_size in list(range(50)) + [100000]:
503        for middle_size in (1, 2):
504          with self.subTest(
505              reverse_f=reverse_f,
506              outer_size=outer_size,
507              middle_size=middle_size,
508              use_gpu=True):
509            x_np = np.reshape(
510                np.arange(outer_size * middle_size * 3, dtype=np.float32),
511                newshape=(outer_size, middle_size, 3))
512            x_tf = self.evaluate(reverse_f(x_np, [0]))
513            np_answer = x_np[::-1, :, :]
514            self.assertAllEqual(x_tf, np_answer)
515
516  def testReverseInvalidShape(self):
517    x = np.ndarray(shape=[0, 1, 1])
518    v = array_ops.reverse_v2(x, axis=[1])
519    self.assertAllEqual(self.evaluate(v), v)
520
521
522class MeshgridTest(test_util.TensorFlowTestCase):
523
524  def _compareDiff(self, x, y, use_gpu):
525    for index in ("ij", "xy"):
526      numpy_out = np.meshgrid(x, y, indexing=index)
527      tf_out = array_ops.meshgrid(x, y, indexing=index)
528      with self.cached_session(use_gpu=use_gpu):
529        for xx, yy in zip(numpy_out, tf_out):
530          self.assertAllEqual(xx, yy)
531
532  def _compareDiffType(self, n, np_dtype, use_gpu):
533    inputs = []
534    for index in ("ij", "xy"):
535      for _ in range(n):
536        x = np.linspace(-10, 10, 5).astype(np_dtype)
537        if np_dtype in (np.complex64, np.complex128):
538          x += 1j
539        inputs.append(x)
540      numpy_out = np.meshgrid(*inputs, indexing=index)
541      with test_util.device(use_gpu=use_gpu):
542        tf_out = array_ops.meshgrid(*inputs, indexing=index)
543        for x_np, x_tf in zip(numpy_out, tf_out):
544          self.assertAllEqual(x_np, x_tf)
545
546  def testCompare(self):
547    for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
548              np.complex64, np.complex128):
549      with self.subTest(t=t):
550        self._compareDiffType(2, t, False)
551        self._compareDiffType(3, t, False)
552
553        x = [1, 2, 3]
554        y = [4, 5]
555
556        a = [[1, 1], [1, 1]]
557
558        self._compareDiff(x, y, False)
559        self._compareDiff(x, a, False)
560
561
562class StridedSliceChecker(object):
563  """Check a given tensor against the numpy result."""
564
565  REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
566  REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
567
568  def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
569    self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
570    if tensor_type.is_bool:
571      self.x_np = np.array(x % 3).astype(np.bool_)
572    # Give the value a non-zero imaginary component for complex types.
573    if tensor_type.is_complex:
574      self.x_np -= 1j * self.x_np
575    self.test = test
576    self.x = constant_op.constant(self.x_np, dtype=tensor_type)
577    self.check_type_infer = check_type_infer
578
579  def __getitem__(self, spec):
580    op = self.x.__getitem__(spec)
581
582    def eval_if_tensor(x):
583      try:
584        return self.test.evaluate(x)
585      except (AttributeError, TypeError, ValueError):
586        return x
587
588    def casts_to_bool_nparray(x):
589      try:
590        return np.asarray(x).dtype == bool
591      except NotImplementedError:
592        return False
593
594    if isinstance(spec, bool) or \
595      (isinstance(spec, ops.Tensor) and spec.dtype == dtypes.bool) or \
596      (isinstance(spec, np.ndarray) and spec.dtype == bool) or \
597      (isinstance(spec, (list, tuple)) and casts_to_bool_nparray(spec)):
598      tensor = self.test.evaluate(op)
599      np_spec = eval_if_tensor(spec)
600      self.test.assertAllEqual(self.x_np[np_spec], tensor)
601      return tensor
602
603    if not isinstance(spec, (list, tuple)):
604      spec = [spec]
605
606    tensor = self.test.evaluate(op)
607
608    # Make a numpy spec that pre-evals the tensors
609    np_specs = []
610
611    for s in spec:
612      if isinstance(s, slice):
613        start = eval_if_tensor(s.start)
614        stop = eval_if_tensor(s.stop)
615        step = eval_if_tensor(s.step)
616        np_specs.append(slice(start, stop, step))
617      else:
618        np_specs.append(eval_if_tensor(s))
619
620    self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
621    if self.check_type_infer:
622      self.test.assertAllEqual(tensor.shape, op.get_shape())
623    return tensor
624
625
626STRIDED_SLICE_TYPES = [
627    dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.uint8,
628    dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128,
629    dtypes.bool
630]
631
632
633class StridedSliceTest(test_util.TensorFlowTestCase):
634  """Test the strided slice operation with variants of slices."""
635
636  def test_basic_slice(self):
637    for tensor_type in STRIDED_SLICE_TYPES:
638      with self.subTest(tensor_type=tensor_type, use_gpu=True):
639        checker = StridedSliceChecker(
640            self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
641        _ = checker[:, :, :]
642        # Various ways of representing identity slice
643        _ = checker[:, :, :]
644        _ = checker[::, ::, ::]
645        _ = checker[::1, ::1, ::1]
646        # Not zero slice
647        _ = checker[::1, ::5, ::2]
648        # Reverse in each dimension independently
649        _ = checker[::-1, :, :]
650        _ = checker[:, ::-1, :]
651        _ = checker[:, :, ::-1]
652        ## negative index tests i.e. n-2 in first component
653        _ = checker[-2::-1, :, ::1]
654        # negative index tests i.e. n-2 in first component, non-unit stride
655        _ = checker[-2::-1, :, ::2]
656
657        # Check rank-0 examples
658        checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type)
659        _ = checker2[None]
660        _ = checker2[...]
661        _ = checker2[tuple()]
662
663  def testInt64GPU(self):
664    if not test_util.is_gpu_available():
665      self.skipTest("No GPU available")
666
667    with test_util.force_gpu():
668      x = constant_op.constant([1., 2., 3.])
669      begin = constant_op.constant([2], dtype=dtypes.int64)
670      end = constant_op.constant([3], dtype=dtypes.int64)
671      strides = constant_op.constant([1], dtype=dtypes.int64)
672      s = array_ops.strided_slice(x, begin, end, strides)
673      self.assertAllEqual([3.], self.evaluate(s))
674
675  @test_util.assert_no_new_pyobjects_executing_eagerly
676  @test_util.assert_no_garbage_created
677  def testTensorSliceEagerMemory(self):
678    with context.eager_mode():
679      inputs = constant_op.constant([[[1], [2], [3], [4]]],
680                                    dtype=dtypes.float32)
681      # Tests that slicing an EagerTensor doesn't leak memory
682      inputs[0]  # pylint: disable=pointless-statement
683
684  @test_util.assert_no_new_pyobjects_executing_eagerly
685  @test_util.assert_no_garbage_created
686  def testVariableSliceEagerMemory(self):
687    with context.eager_mode():
688      v = variables.Variable([1., 2.])
689      v[0]  # pylint: disable=pointless-statement
690
691  def testDegenerateSlices(self):
692    with test_util.device(use_gpu=True):
693      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
694      # degenerate by offering a forward interval with a negative stride
695      _ = checker[0:-1:-1, :, :]
696      # degenerate with a reverse interval with a positive stride
697      _ = checker[-1:0, :, :]
698      # empty interval in every dimension
699      _ = checker[-1:0, 2:2, 2:3:-1]
700      # empty first dimension only (used to break for aligned tensors).
701      checker = StridedSliceChecker(self,
702                                    StridedSliceChecker.REF_TENSOR_ALIGNED)
703      _ = checker[1:0]
704
705  def testSliceWithUndefinedDimension(self):
706    t = constant_op.constant([1, 2, 3])
707    d = tensor_shape.Dimension(None)
708    self.assertAllEqual(t[d:d:d], t)
709
710  def testEllipsis(self):
711    with test_util.device(use_gpu=True):
712      raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
713      checker = StridedSliceChecker(self, raw)
714
715      _ = checker[0:]
716      # implicit ellipsis
717      _ = checker[0:, ...]
718      # ellipsis alone
719      _ = checker[...]
720      # ellipsis at end
721      _ = checker[0:1, ...]
722      # ellipsis at begin
723      _ = checker[..., 0:1]
724      # ellipsis at middle
725      _ = checker[0:1, ..., 0:1]
726      # multiple ellipses not allowed
727      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
728                                  "Multiple ellipses"):
729        _ = checker[..., :, ...].eval()
730
731  def testShrink(self):
732    with test_util.device(use_gpu=True):
733      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
734              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
735      checker = StridedSliceChecker(self, raw)
736      _ = checker[:, :, :, :, 3]
737      _ = checker[..., 3]
738      _ = checker[:, 0]
739      _ = checker[:, :, 0]
740
741  def testBothNewAxisAndShrink(self):
742    with test_util.device(use_gpu=True):
743
744      @def_function.function
745      def func(inp):
746        return inp[array_ops.newaxis, :, 0]
747
748      f = func.get_concrete_function(
749          tensor_spec.TensorSpec([2, 2], dtypes.int16))
750
751      # TODO(b/190416665): Allow the constant to be eagerly copied/created on
752      # the GPU.
753      with ops.device("CPU"):
754        ones = constant_op.constant([[1, 1], [1, 1]], dtypes.int16)
755      self.assertAllEqual([[1, 1]], self.evaluate(f(ones)))
756
757  def testTensorIndexing(self):
758    with test_util.device(use_gpu=True):
759      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
760              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
761      checker = StridedSliceChecker(self, raw, check_type_infer=False)
762      bar = constant_op.constant(2)
763      bar2 = constant_op.constant(3)
764      _ = checker[..., bar:bar2]
765      _ = checker[..., bar]
766      _ = checker[..., 3]
767      _ = checker[..., 2**64 // 2**63]  # Test longs in Python 2
768
769  def testTensorIndexingTypeError(self):
770    with self.session():
771      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
772      expected = re.escape(array_ops._SLICE_TYPE_ERROR)
773      with self.assertRaisesRegex(TypeError, expected):
774        _ = checker["foo"]
775      with self.assertRaisesRegex(TypeError, expected):
776        _ = checker[constant_op.constant("foo")]
777      with self.assertRaisesRegex(TypeError, expected):
778        _ = checker[0.0]
779      with self.assertRaisesRegex(TypeError, expected):
780        _ = checker[constant_op.constant(0.0)]
781      with self.assertRaisesRegex(TypeError, expected):
782        _ = checker[constant_op.constant([1, 2, 3])]
783      with self.assertRaisesRegex(TypeError, expected):
784        _ = checker[[2.1, -0.7, 1.5]]
785
786  def testExpand(self):
787    with test_util.device(use_gpu=True):
788      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
789              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
790      checker = StridedSliceChecker(self, raw)
791      # new axis (followed by implicit ellipsis)
792      _ = checker[np.newaxis]
793      # newaxis after ellipsis
794      _ = checker[..., np.newaxis]
795      # newaxis in between ellipsis and explicit range
796      _ = checker[..., np.newaxis, :]
797      _ = checker[:, ..., np.newaxis, :, :]
798      # Reverse final dimension with new axis
799      _ = checker[:, :, np.newaxis, :, 2::-1]
800      # Ellipsis in middle of two newaxis
801      _ = checker[np.newaxis, ..., np.newaxis]
802
803  def testExpandVariable(self):
804    with test_util.device(use_gpu=True):
805      x = variables.Variable(7, dtype=dtypes.int32)
806      self.evaluate(x.initializer)
807      y = self.evaluate(x[None])
808      self.assertEqual(y.shape, (1,))
809      self.assertAllEqual(y, (7,))
810
811  def testOptimizedCases(self):
812    with test_util.device(use_gpu=True):
813      checker = StridedSliceChecker(self,
814                                    StridedSliceChecker.REF_TENSOR_ALIGNED)
815      # Identity
816      _ = checker[:]
817      # Identity
818      _ = checker[...]
819      # Identity
820      _ = checker[np.newaxis, ..., np.newaxis]
821      # First axis slice
822      _ = checker[1:]
823      # First axis slice
824      _ = checker[np.newaxis, 1:]
825
826  def testMasks(self):
827    with test_util.device(use_gpu=True):
828      scalar = np.array(0)
829      # Test tensor type mask
830      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
831      _ = checker[checker.x > 2]
832      _ = checker[checker.x <= 5]
833      _ = checker[ops.convert_to_tensor(scalar)]
834
835      # Test numpy array type mask
836      raw = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
837                       [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23,
838                                                              24]]]]])
839      checker1 = StridedSliceChecker(self, raw)
840      _ = checker1[raw >= 4]
841      _ = checker1[raw < 19]
842      _ = checker1[scalar]
843
844      # Test boolean and non boolean cases
845      mask = np.array([True, False, True])
846      raw1 = np.array([[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]])
847      checker2 = StridedSliceChecker(self, raw1)
848      _ = checker2[mask]
849      _ = checker2[ops.convert_to_tensor(mask)]
850
851  def test_int16_indices(self):
852
853    def _int16(i):
854      return constant_op.constant(i, dtype=dtypes.int16)
855
856    def _int64(i):
857      return constant_op.constant(i, dtype=dtypes.int64)
858
859    for tensor_type in STRIDED_SLICE_TYPES:
860      with self.subTest(tensor_type=tensor_type, use_gpu=True):
861        checker = StridedSliceChecker(
862            self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
863
864        _ = checker[_int16(1)]
865
866        with self.assertRaises(Exception):
867          _ = checker[_int16(1)::1, :, 1:_int64(3):2]
868        with self.assertRaises(Exception):
869          _ = checker[:, _int16(1):_int16(5):-1, :]
870        with self.assertRaises(Exception):
871          _ = checker[::_int64(1), _int64(1):10:_int16(3), ::_int64(2)]
872
873        _ = checker[::_int16(1), _int16(1)::_int16(5), ::2]
874        _ = checker[_int16(1):_int16(5):_int16(2), 1:2, :]
875
876
877class StridedSliceShapeTest(test_util.TensorFlowTestCase):
878  """Test the shape inference of StridedSliceShapes."""
879
880  def testUnknown(self):
881    with test_util.device(use_gpu=True):
882
883      @def_function.function
884      def f(x):
885        y = x[...]
886        self.assertAllEqual(y.get_shape().ndims, None)
887
888      _ = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
889
890  def tensorShapeEqual(self, x, y):
891    self.assertTrue(x is not None and y is not None or x is None and y is None)
892    self.assertEqual(x.as_list(), y.as_list())
893
894  def testTensorShapeUncertain(self):
895    with test_util.device(use_gpu=True):
896
897      @def_function.function
898      def f1(x):
899        y = x[3:5]
900        self.tensorShapeEqual(y.get_shape(),
901                              tensor_shape.TensorShape([2, None, 7]))
902
903      _ = f1.get_concrete_function(
904          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
905
906      @def_function.function
907      def f2(x):
908        y = x[3:5, :, 4]
909        self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2,
910                                                                       None]))
911
912      _ = f2.get_concrete_function(
913          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
914
915      @def_function.function
916      def f3(x):
917        y = x[3:5, 3:4, 4]
918        self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2,
919                                                                       None]))
920
921      _ = f3.get_concrete_function(
922          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
923
924      @def_function.function
925      def f4(x):
926        y = x[3:5, :, 5:10]
927        self.tensorShapeEqual(y.get_shape(),
928                              tensor_shape.TensorShape([2, None, 2]))
929
930      _ = f4.get_concrete_function(
931          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
932
933      @def_function.function
934      def f5(x):
935        y = x[3:5, :, 50:3]
936        self.tensorShapeEqual(y.get_shape(),
937                              tensor_shape.TensorShape([2, None, 0]))
938
939      _ = f5.get_concrete_function(
940          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
941
942      @def_function.function
943      def f6(x):
944        y = x[3:5, :, array_ops.newaxis, 50:3,]
945        self.tensorShapeEqual(y.get_shape(),
946                              tensor_shape.TensorShape([2, None, 1, 0]))
947
948      _ = f6.get_concrete_function(
949          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
950
951      @def_function.function
952      def f7(x):
953        y = x[1:5:2, :, array_ops.newaxis, 50:3,]
954        self.tensorShapeEqual(y.get_shape(),
955                              tensor_shape.TensorShape([2, None, 1, 0]))
956
957      _ = f7.get_concrete_function(
958          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
959
960      @def_function.function
961      def f8(x):
962        y = x[:5:3, :, array_ops.newaxis, 50:3,]
963        self.tensorShapeEqual(y.get_shape(),
964                              tensor_shape.TensorShape([2, None, 1, 0]))
965
966      _ = f8.get_concrete_function(
967          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
968
969      @def_function.function
970      def f9(x):
971        y = x[:2:3, :, array_ops.newaxis, 50:3,]
972        self.tensorShapeEqual(y.get_shape(),
973                              tensor_shape.TensorShape([1, None, 1, 0]))
974
975      _ = f9.get_concrete_function(
976          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
977
978      @def_function.function
979      def f10(x):
980        y = x[::-1, :, array_ops.newaxis, ::-2]
981        self.tensorShapeEqual(y.get_shape(),
982                              tensor_shape.TensorShape([5, None, 1, 4]))
983
984      _ = f10.get_concrete_function(
985          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
986
987  def testTensorValuedIndexShape(self):
988    with self.session():
989
990      @def_function.function
991      def f1(x, y):
992        z = x[y]
993        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7]))
994
995      _ = f1.get_concrete_function(
996          tensor_spec.TensorSpec((5, 3, 7)),
997          tensor_spec.TensorSpec((), dtypes.int32))
998
999      @def_function.function
1000      def f2(x, y):
1001        z = x[y, ::-1]
1002        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7]))
1003
1004      _ = f2.get_concrete_function(
1005          tensor_spec.TensorSpec((5, 3, 7)),
1006          tensor_spec.TensorSpec((), dtypes.int32))
1007
1008      @def_function.function
1009      def f3(x, y):
1010        z = x[y, ::-2]
1011        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([2, 7]))
1012
1013      _ = f3.get_concrete_function(
1014          tensor_spec.TensorSpec((5, 3, 7)),
1015          tensor_spec.TensorSpec((), dtypes.int32))
1016
1017      @def_function.function
1018      def f4(x, y, s):
1019        z = x[y, s:2]
1020        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([None,
1021                                                                       7]))
1022
1023      _ = f4.get_concrete_function(
1024          tensor_spec.TensorSpec((5, 3, 7)),
1025          tensor_spec.TensorSpec((), dtypes.int32),
1026          tensor_spec.TensorSpec((), dtypes.int32))
1027
1028
1029class GradSliceChecker(object):
1030  """Tests that we can compute a gradient for var^2."""
1031
1032  def __init__(self, test, var, varnp, use_tape):
1033    self.test = test
1034    self.var = var
1035    self.varnp = varnp
1036    self.use_tape = use_tape
1037
1038  def __getitem__(self, spec):
1039    with test_util.AbstractGradientTape(
1040        use_tape=self.use_tape, persistent=True) as tape:
1041      tape.watch(self.var)
1042      val = self.var * self.var
1043      slice_var = self.var[spec]
1044      slice_val = val[spec]
1045
1046      # compute analytic 2nd derivative
1047      analytic_grad2 = 2 * slice_val
1048
1049      dy = variables.Variable(
1050          array_ops.ones_like(slice_var, dtype=dtypes.float32))
1051      assign = dy.assign(slice_var)
1052
1053      slice_val_grad = tape.gradient(slice_val, self.var, [dy])
1054      slice_val_grad2 = tape.gradient(slice_val_grad, dy, [self.var])
1055    self.test.evaluate(assign)
1056    slice_val_grad_evaled, slice_val_grad2_evaled = (
1057        self.test.evaluate([slice_val_grad, slice_val_grad2]))
1058    analytic_grad2_evaled = self.test.evaluate(analytic_grad2)
1059    self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
1060
1061    # compute analytic gradient for slice
1062    np_val_grad = (2 * self.varnp * self.varnp)
1063    np_sliceval_grad = np.zeros(self.var.get_shape())
1064    if isinstance(spec, ops.Tensor):
1065      spec = self.test.evaluate(spec)
1066    np_sliceval_grad[spec] = np_val_grad[spec]
1067    # verify gradient
1068    self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
1069
1070
1071class StridedSliceGradTest(test_util.TensorFlowTestCase,
1072                           parameterized.TestCase):
1073  """Test that strided slice's custom gradient produces correct gradients."""
1074
1075  @parameterized.parameters(set((True, context.executing_eagerly())))
1076  @test_util.disable_xla(
1077      "b/210077724: Auto-clustering with where op isn't supported. Has loose "
1078      "output shape bounds")
1079  def testGradient(self, use_tape):
1080    with test_util.device(use_gpu=True):
1081      var = variables.Variable(
1082          array_ops.reshape(
1083              math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
1084      self.evaluate(var.initializer)
1085
1086      raw = np.array(range(1, 97, 1)).reshape((6, 4, 4))
1087      grad = GradSliceChecker(self, var, raw, use_tape)
1088      _ = grad[2:6:2, 1:3, 1:3]
1089      _ = grad[3:0:-2, 1:3, 1:3]
1090      _ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
1091      _ = grad[3:0:-2, 1:3, 2]
1092      _ = grad[:, -1, :]
1093      _ = grad[:, -2, :]
1094      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
1095                                  "out of bounds"):
1096        _ = grad[:, -200, :]
1097      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
1098                                  "out of bounds"):
1099        _ = grad[:, 200, :]
1100
1101      # Test numpy array type mask
1102      _ = grad[raw > 51]
1103      # Test tensor type mask
1104      _ = grad[ops.convert_to_tensor(raw) <= 76]
1105
1106  @parameterized.parameters(set((True, context.executing_eagerly())))
1107  def testGradientZero(self, use_tape):
1108    with test_util.device(use_gpu=True):
1109      var = variables.Variable(8.)
1110      self.evaluate(var.initializer)
1111      grad = GradSliceChecker(self, var, np.array(8), use_tape)
1112      _ = grad[tuple()]
1113
1114  @parameterized.parameters(set((True, context.executing_eagerly())))
1115  def testInt64Indices(self, use_tape):
1116    with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
1117      a = math_ops.range(3, dtype=dtypes.float32)
1118      tape.watch(a)
1119      index = constant_op.constant(1, dtype=dtypes.int64)
1120      b = 2. * a[index]
1121    grad = tape.gradient(b, a)
1122    self.assertAllEqual(self.evaluate(grad), [0., 2., 0.])
1123
1124
1125class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
1126  """Test varied index types and host located memory."""
1127
1128  def testHostVsDevice(self):
1129    var2 = variables.Variable(
1130        array_ops.reshape(
1131            math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
1132            shape=(4, 1, 1)))
1133    varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
1134    begin = constant_op.constant([0, 0, 0])
1135    end = constant_op.constant([4, 1, 1])
1136    strides = constant_op.constant([1, 1, 1])
1137    foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
1138    self.evaluate(var2.initializer)
1139    self.evaluate(varshape.initializer)
1140    self.evaluate(foo)
1141
1142  def testInt64Shape(self):
1143    original_dy = array_ops.reshape(
1144        math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1))
1145    original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
1146    begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
1147    end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
1148    strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
1149    dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
1150                                      original_dy)
1151    self.evaluate(dx)
1152
1153  def testMixedIndexTypes(self):
1154    original_dy = array_ops.reshape(
1155        math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1))
1156    original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
1157    begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
1158    end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
1159    strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
1160    with self.assertRaises((TypeError, errors_impl.InvalidArgumentError)):
1161      dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
1162                                        original_dy)
1163      self.evaluate(dx)
1164
1165
1166class BenchmarkSlice(object):
1167
1168  def __init__(self, tensor):
1169    self.tensor = tensor
1170
1171  def __getitem__(self, x):
1172    return self.tensor[x]
1173
1174
1175class StridedSliceBenchmark(test_lib.Benchmark):
1176  """Benchmark new strided slice operation on non-trivial case."""
1177
1178  def run_and_time(self, slice_op):
1179    self.evaluate(variables.global_variables_initializer())
1180    for _ in range(10):
1181      _ = self.evaluate(slice_op)
1182    iters = 1000
1183    t0 = time.time()
1184    for _ in range(iters):
1185      self.evaluate(slice_op)
1186    t1 = time.time()
1187    self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
1188
1189  def make_variable(self):
1190    n = 256
1191    shape = (n, n, n)
1192    items = n**3
1193    var = variables.Variable(
1194        array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
1195        dtype=dtypes.float32)
1196    return var
1197
1198  def benchmark_strided_slice_skip(self):
1199    with session.Session():
1200      var = self.make_variable()
1201      helper = BenchmarkSlice(var)
1202      slice_op = helper[::2, ::1, ::2]
1203      self.run_and_time(slice_op)
1204
1205  def benchmark_strided_slice_easy(self):
1206    with session.Session():
1207      var = self.make_variable()
1208      helper = BenchmarkSlice(var)
1209      slice_op = helper[3::1, 3::1, 3::1]
1210      self.run_and_time(slice_op)
1211
1212  def benchmark_slice_easy(self):
1213    with session.Session():
1214      var = self.make_variable()
1215      slice_op = var[3::1, 3::1, 3::1]
1216      self.run_and_time(slice_op)
1217
1218
1219class StridedSliceAssignChecker(object):
1220
1221  def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False):
1222    self.tensor_type = tensor_type
1223    self.test = test
1224    self._use_resource = use_resource
1225
1226    self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
1227    # Give the value a non-zero imaginary component for complex types.
1228    if tensor_type.is_complex:
1229      self.x_np -= 1j * self.x_np
1230    self.x = constant_op.constant(self.x_np, dtype=tensor_type)
1231
1232  def __setitem__(self, index, value):
1233    value = np.array(value).astype(self.tensor_type.as_numpy_dtype)
1234    # Give the value a non-zero imaginary component for complex types.
1235    if self.tensor_type.is_complex:
1236      value -= 1j * value
1237
1238    with test_util.device(use_gpu=True):
1239      if self._use_resource:
1240        var = resource_variable_ops.ResourceVariable(self.x)
1241      else:
1242        var = variables.Variable(self.x)
1243      self.test.evaluate(var.initializer)
1244      val = self.test.evaluate(var[index].assign(value))
1245      # val_copy is used to check that tf.compat.v1.assign works equivalently
1246      # to the assign method above.
1247      val_copy = self.test.evaluate(state_ops.assign(var[index], value))
1248      valnp = np.copy(self.x_np)
1249      valnp[index] = np.array(value)
1250      self.test.assertAllEqual(val, valnp)
1251      self.test.assertAllEqual(val_copy, valnp)
1252
1253
1254class SliceAssignTest(test_util.TensorFlowTestCase, parameterized.TestCase):
1255
1256  def testInvalidSlice(self):
1257    foo = constant_op.constant([1, 2, 3])
1258    with self.assertRaisesRegex(AttributeError, "no attribute 'assign'"):
1259      bar = foo[:2].assign(constant_op.constant([1, 2]))
1260      self.evaluate(bar)
1261
1262  def doTestSliceAssign(self, use_resource):
1263    for dtype in STRIDED_SLICE_TYPES:
1264      with self.subTest(dtype=dtype):
1265        checker = StridedSliceAssignChecker(
1266            self, [[1, 2, 3], [4, 5, 6]],
1267            use_resource=use_resource,
1268            tensor_type=dtype)
1269        # Check if equal
1270        checker[:] = [[10, 20, 30], [40, 50, 60]]
1271        # Check trivial (1,1) shape tensor
1272        checker[1:2, 1:2] = [[66]]
1273        # shrinks shape changes
1274        checker[1:2, 1] = [66]
1275        checker[1, 1:2] = [66]
1276        checker[1, 1] = 66
1277        # newaxis shape changes
1278        checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
1279        # shrink and newaxis
1280        checker[None, None, 0, 0:1] = [[[99]]]
1281        # Non unit strides
1282        checker[::1, ::-2] = [[3, 33], [4, 44]]
1283        # degenerate interval
1284        checker[8:10, 0] = []
1285        checker[8:10, 8:10] = [[]]
1286    # Assign vector to scalar (rank-0) using newaxis
1287    checker2 = StridedSliceAssignChecker(self, 222)
1288    checker2[()] = 6  # no indices
1289    checker2[...] = 6  # ellipsis
1290    checker2[None] = [6]  # new axis
1291
1292  def doTestSliceAssignWithBroadcasting(self, use_resource):
1293    for dtype in STRIDED_SLICE_TYPES:
1294      with self.subTest(dtype=dtype):
1295        checker = StridedSliceAssignChecker(
1296            self, [[1, 2, 3], [4, 5, 6]],
1297            use_resource=use_resource,
1298            tensor_type=dtype)
1299        # Broadcast to full LHS.
1300        checker[:] = [[40, 50, 60]]
1301        # Assign a trivial (1,1) tensor.
1302        checker[1:2, 1:2] = 66
1303        # Broadcast with shrink axis shape changes.
1304        checker[1:2, 1] = 66
1305        checker[1, 1:2] = 66
1306        # Broadcast with newaxis shape changes.
1307        checker[:, None, :] = [10, 20, 30]
1308        # Broadcast with both shrink and newaxis.
1309        checker[None, None, 0, 0:1] = 99
1310        # Broadcast with non-unit strides.
1311        checker[::1, ::-2] = [[4, 44]]
1312        # Broadcast a degenerate interval.
1313        checker[8:10, 8:10] = []
1314
1315  @test_util.disable_xla("b/123559667")
1316  def testSliceAssign(self):
1317    self.doTestSliceAssign(use_resource=False)
1318
1319  @test_util.disable_xla("b/123559667")
1320  def testSliceAssignWithBroadcasting(self):
1321    self.doTestSliceAssignWithBroadcasting(use_resource=False)
1322
1323  @test_util.disable_xla("b/123559667")
1324  def testSliceAssignResource(self):
1325    self.doTestSliceAssign(use_resource=True)
1326
1327  def testTypeError(self):
1328    init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
1329    too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
1330    too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
1331    v = variables.VariableV1(init_val)
1332    with self.assertRaises((ValueError, TypeError)):
1333      self.evaluate(v[:].assign(too_small_val))
1334    with self.assertRaises((ValueError, TypeError)):
1335      self.evaluate(v[:].assign(too_large_val))
1336
1337  def testTypeErrorResource(self):
1338    init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
1339    too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
1340    too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
1341    v = resource_variable_ops.ResourceVariable(init_val)
1342    self.evaluate(v.initializer)
1343    with self.assertRaises(ValueError):
1344      self.evaluate(v[:].assign(too_large_val))
1345    with self.assertRaises(ValueError):
1346      self.evaluate(v[:].assign(too_small_val))
1347
1348  @test_util.disable_xla("b/123559667")
1349  @test_util.run_in_graph_and_eager_modes
1350  def testTensorStridedSliceUpdateWithInputForward(self):
1351    """Tests tensor_strided_slice_update with input-forwarding taking effect."""
1352    @def_function.function
1353    def assign(x):
1354      y = x + 1
1355      return gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0])
1356    self.assertAllEqual([0, 1], self.evaluate(assign(array_ops.zeros([2]))))
1357
1358  @test_util.run_in_graph_and_eager_modes
1359  def testTensorStridedSliceUpdateWithInputForwardInt32(self):
1360    """Tests tensor_strided_slice_update with int32."""
1361    @def_function.function
1362    def assign(x):
1363      y = x + 1
1364      return gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0])
1365    self.assertAllEqual(
1366        [0, 1], self.evaluate(assign(array_ops.zeros([2], dtype=dtypes.int32))))
1367
1368  @test_util.disable_xla("b/123559667")
1369  @test_util.run_in_graph_and_eager_modes
1370  def testTensorStridedSliceUpdateNoInputForward(self):
1371    """Tests tensor_strided_slice_update with no input-forwarding."""
1372    x = constant_op.constant([0.2, 0.3])
1373    y = x + 1
1374    # y's buffer won't be forwarded to z because y and z will be alive at the
1375    # same time later.
1376    z = gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0.4])
1377    ans = y + z
1378    self.assertAllClose([1.6, 2.6], self.evaluate(ans))
1379
1380  @test_util.disable_xla("b/123559667")
1381  def testTensorStridedSliceUpdateGradSimple(self):
1382    original = constant_op.constant([0.2, 0.3])
1383    updates = constant_op.constant([0.4])
1384    with backprop.GradientTape() as tape:
1385      tape.watch([original, updates])
1386      updated = gen_array_ops.tensor_strided_slice_update(
1387          original, [0], [1], [1], updates)
1388    d1, d2 = tape.gradient(updated, [original, updates],
1389                           output_gradients=constant_op.constant([2.0, 3.0]))
1390    self.assertAllClose([0.0, 3.0], d1)
1391    self.assertAllClose([2.0], d2)
1392
1393  @parameterized.named_parameters(
1394      ("_%s" % i, *args) for i, args in enumerate([  # pylint:disable=g-complex-comprehension
1395          ([2, 5], [0, 1], [1, 0], [1, 2], [2], 0, 2, 0, 0, 1),
1396          ([4], [5], [3], [1], [3], 1, 0, 0, 0, 0),
1397          ([2, 2, 3, 2], [0, 0, 1], [1, 0, 2], [1, 0, 1], [2, 3], 0, 0, 2, 0, 5)
1398      ]))
1399  @test_util.disable_xla("b/123559667")
1400  def testTensorStridedSliceUpdateGrad(
1401      self, shape, begin, end, strides, updates_shape, *args):
1402    with self.cached_session():
1403      def f(a, b):
1404        return gen_array_ops.tensor_strided_slice_update(
1405            a, begin, end, strides, b, *args)
1406      theoretical, numerical = gradient_checker_v2.compute_gradient(
1407          f, [array_ops.zeros(shape), array_ops.ones(updates_shape)], delta=1.0)
1408      self.assertAllClose(theoretical, numerical)
1409
1410  @parameterized.named_parameters(("_%s" % i, *args) for i, args in enumerate([  # pylint:disable=g-complex-comprehension
1411      ([2, 5], [0, 1], [1, 0], [1, 2], [1], 0, 2, 0, 0,
1412       1), ([4], [5], [3], [1], [], 1, 0, 0, 0, 0),
1413      ([2, 2, 3, 2], [0, 0, 1], [1, 0, 2], [1, 0, 1], [2, 1], 0, 0, 2, 0, 5)
1414  ]))
1415  @test_util.disable_xla("b/123559667")
1416  def testTensorStridedSliceUpdateWithBroadcastingGrad(self, shape, begin, end,
1417                                                       strides, updates_shape,
1418                                                       *args):
1419    with self.cached_session():
1420
1421      def f(a, b):
1422        return gen_array_ops.tensor_strided_slice_update(
1423            a, begin, end, strides, b, *args)
1424
1425      theoretical, numerical = gradient_checker_v2.compute_gradient(
1426          f, [array_ops.zeros(shape),
1427              array_ops.ones(updates_shape)], delta=1.0)
1428      self.assertAllClose(theoretical, numerical)
1429
1430
1431class ShapeSizeRankTest(test_util.TensorFlowTestCase):
1432
1433  @test_util.run_in_graph_and_eager_modes
1434  def testDenseShape(self):
1435    t_value = [[0, 42], [24, 0]]
1436    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
1437    self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
1438    self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
1439
1440    t = constant_op.constant(t_value)
1441    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
1442    self.assertEqual(4, self.evaluate(array_ops.size(t)))
1443    self.assertEqual(2, self.evaluate(array_ops.rank(t)))
1444
1445  @test_util.run_in_graph_and_eager_modes
1446  def testSparseShape(self):
1447    sp_value = sparse_tensor.SparseTensorValue(
1448        indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
1449    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
1450    self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
1451    self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
1452
1453    sp = sparse_tensor.SparseTensor.from_value(sp_value)
1454    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
1455    self.assertEqual(4, self.evaluate(array_ops.size(sp)))
1456    self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
1457
1458  @test_util.run_in_graph_and_eager_modes
1459  def testSizeDtype(self):
1460    tensor = [1]
1461    self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
1462    self.assertEqual(
1463        dtypes.int64,
1464        self.evaluate(array_ops.size(tensor, out_type=dtypes.int64)).dtype)
1465
1466
1467class SequenceMaskTest(test_util.TensorFlowTestCase):
1468
1469  def testExceptions(self):
1470    with self.cached_session():
1471      with self.assertRaisesRegex(ValueError, "`maxlen` must be scalar"):
1472        array_ops.sequence_mask([10, 20], [10, 20])
1473
1474  def testOneDimensionalWithMaxlen(self):
1475    res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
1476    self.assertAllEqual(res.get_shape(), [3, 5])
1477    self.assertAllEqual(
1478        res,
1479        [[True, False, False, False, False], [True, True, True, False, False],
1480         [True, True, False, False, False]])
1481
1482  def testOneDimensionalDtypeWithoutMaxlen(self):
1483    # test dtype and default maxlen:
1484    res = array_ops.sequence_mask(
1485        constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
1486    self.assertAllEqual(res.get_shape().as_list(), [3, 4])
1487    self.assertAllEqual(
1488        res, [[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
1489
1490  def testOneDimensionalWithoutMaxlen(self):
1491    res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]))
1492    self.assertAllEqual(res.get_shape().as_list(), [3, 4])
1493    self.assertAllEqual(res,
1494                        [[False, False, False, False],
1495                         [True, False, False, False], [True, True, True, True]])
1496
1497  def testTwoDimensional(self):
1498    res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
1499    self.assertAllEqual(res.get_shape(), [1, 3, 5])
1500    self.assertAllEqual(
1501        res,
1502        [[[True, False, False, False, False], [True, True, True, False, False],
1503          [True, True, False, False, False]]])
1504
1505    # test dtype and default maxlen:
1506    res = array_ops.sequence_mask(
1507        constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
1508    self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
1509    self.assertAllEqual(
1510        res,
1511        [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
1512         [[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
1513
1514  def testDtypes(self):
1515
1516    def check_dtypes(lengths_dtype, maxlen_dtype):
1517      res = array_ops.sequence_mask(
1518          constant_op.constant([1, 3, 2], dtype=lengths_dtype),
1519          constant_op.constant(5, dtype=maxlen_dtype))
1520      self.assertAllEqual(res.get_shape(), [3, 5])
1521      self.assertAllEqual(
1522          res,
1523          [[True, False, False, False, False], [True, True, True, False, False],
1524           [True, True, False, False, False]])
1525
1526    check_dtypes(dtypes.int32, dtypes.int32)
1527    check_dtypes(dtypes.int32, dtypes.int64)
1528    check_dtypes(dtypes.int64, dtypes.int32)
1529    check_dtypes(dtypes.int64, dtypes.int64)
1530
1531  def testOutputDtype(self):
1532
1533    def check_output_dtype(output_dtype):
1534      res = self.evaluate(
1535          array_ops.sequence_mask(
1536              constant_op.constant([1, 3, 2], dtype=dtypes.int32),
1537              constant_op.constant(5, dtype=dtypes.int32),
1538              dtype=output_dtype))
1539      self.assertAllEqual(
1540          res,
1541          self.evaluate(
1542              math_ops.cast([[True, False, False, False, False],
1543                             [True, True, True, False, False],
1544                             [True, True, False, False, False]], output_dtype)))
1545
1546    check_output_dtype(dtypes.bool)
1547    check_output_dtype("bool")
1548    check_output_dtype(np.bool_)
1549    check_output_dtype(dtypes.int32)
1550    check_output_dtype("int32")
1551    check_output_dtype(np.int32)
1552    check_output_dtype(dtypes.float32)
1553    check_output_dtype("float32")
1554    check_output_dtype(np.float32)
1555    check_output_dtype(dtypes.int64)
1556    check_output_dtype("float64")
1557    check_output_dtype(np.float64)
1558
1559
1560class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
1561
1562  @test_util.run_in_graph_and_eager_modes
1563  def testConcatSlice(self):
1564    r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
1565    r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
1566    c = array_ops.stack([r1, r2])
1567    s = array_ops.strided_slice(c, [1], [2])
1568    self.evaluate(test_ops.resource_create_op(s))
1569    with self.assertRaises(errors.AlreadyExistsError):
1570      self.evaluate(test_ops.resource_create_op(r2))
1571
1572
1573class IdentityTest(test_util.TensorFlowTestCase):
1574
1575  @test_util.run_gpu_only
1576  def testEagerIdentity(self):
1577    with context.eager_mode():
1578
1579      def _test(x, y, device):
1580        self.assertAllEqual(x.numpy(), y.numpy())
1581        self.assertTrue(device in y.device.lower())
1582
1583      with test_util.force_gpu():
1584        a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
1585      with test_util.force_gpu():
1586        b = array_ops.identity(a)
1587        _test(a, b, "gpu")
1588      with test_util.force_cpu():
1589        c = array_ops.identity(b)
1590        _test(b, c, "cpu")
1591      with test_util.force_cpu():
1592        d = array_ops.identity(c)
1593        _test(c, d, "cpu")
1594      with test_util.force_gpu():
1595        e = array_ops.identity(d)
1596        _test(d, e, "gpu")
1597
1598  def testIdentityVariable(self):
1599    v = resource_variable_ops.ResourceVariable(1.0)
1600    self.evaluate(v.initializer)
1601    result = array_ops.identity(v)
1602    self.assertIsInstance(result, ops.Tensor)
1603    self.assertAllEqual(result, v)
1604
1605
1606class PadTest(test_util.TensorFlowTestCase):
1607
1608  def testEager(self):
1609    with context.eager_mode():
1610      t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
1611      paddings = constant_op.constant([[
1612          1,
1613          1,
1614      ], [2, 2]])
1615      padded = array_ops.pad(t, paddings, "CONSTANT")
1616      self.assertAllEqual(padded.numpy(),
1617                          [[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
1618                           [0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
1619
1620  def testSymmetricMirrorPadGrad(self):
1621    t = np.broadcast_to(np.arange(0, 7), (3, 2, 1, 7))
1622    paddings = constant_op.constant([
1623        [1, 1],
1624        [0, 0],
1625        [0, 0],
1626        [2, 2],
1627    ])
1628    expected = np.broadcast_to(np.array([9, 27, 27]), (1, 2, 1, 3))
1629    result = gen_array_ops.mirror_pad_grad(t, paddings, "SYMMETRIC")
1630    self.assertAllEqual(result, expected)
1631
1632  def testReflectMirrorPadGrad(self):
1633    t = np.broadcast_to(np.reshape(np.arange(0, 7), (7, 1)), (1, 4, 7, 1))
1634    paddings = constant_op.constant([
1635        [0, 0],
1636        [1, 1],
1637        [2, 2],
1638        [0, 0],
1639    ])
1640    expected = np.broadcast_to(
1641        np.reshape(np.array([16, 18, 8]), (3, 1)), (1, 2, 3, 1))
1642    result = gen_array_ops.mirror_pad_grad(t, paddings, "REFLECT")
1643    self.assertAllEqual(result, expected)
1644
1645
1646class InvertPermutationTest(test_util.TensorFlowTestCase):
1647
1648  def testInvertPermutation(self):
1649    for dtype in [dtypes.int32, dtypes.int64]:
1650      with self.subTest(dtype=dtype, use_gpu=True):
1651        x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
1652        y = array_ops.invert_permutation(x)
1653        self.assertAllEqual(y.get_shape(), [5])
1654        self.assertAllEqual(y, [2, 4, 3, 0, 1])
1655
1656
1657class UnravelIndexTest(test_util.TensorFlowTestCase):
1658
1659  # TODO(b/73086570): Reenable test.
1660  @unittest.skip("Test does not pass internally.")
1661  def testUnravelIndex(self):
1662    with self.cached_session():
1663      for dtype in [dtypes.int32, dtypes.int64]:
1664        with self.subTest(dtype=dtype):
1665          indices_1 = constant_op.constant(1621, dtype=dtype)
1666          dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
1667          out_1 = array_ops.unravel_index(indices_1, dims_1)
1668          self.assertAllEqual(out_1, [3, 1, 4, 1])
1669
1670          indices_2 = constant_op.constant([1621], dtype=dtype)
1671          dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
1672          out_2 = array_ops.unravel_index(indices_2, dims_2)
1673          self.assertAllEqual(out_2, [[3], [1], [4], [1]])
1674
1675          indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
1676          dims_3 = constant_op.constant([7, 6], dtype=dtype)
1677          out_3 = array_ops.unravel_index(indices_3, dims_3)
1678          self.assertAllEqual(out_3, [[3, 6, 6], [4, 5, 1]])
1679
1680  # Test case for GitHub issue 40204.
1681  def testUnravelIndexZeroDim(self):
1682    with self.cached_session():
1683      for dtype in [dtypes.int32, dtypes.int64]:
1684        with self.assertRaisesRegex(errors.InvalidArgumentError,
1685                                    "dims cannot contain a dim of zero"):
1686          indices = constant_op.constant([2, 5, 7], dtype=dtype)
1687          dims = constant_op.constant([3, 0], dtype=dtype)
1688          self.evaluate(array_ops.unravel_index(indices=indices, dims=dims))
1689
1690  def testUnravelIndexIntegerOverflow(self):
1691    with self.cached_session():
1692      for dtype in [dtypes.int32, dtypes.int64]:
1693        with self.assertRaisesRegex(
1694            errors.InvalidArgumentError,
1695            r"Input dims product is causing integer overflow"):
1696          indices = constant_op.constant(-0x100000, dtype=dtype)
1697          if dtype == dtypes.int32:
1698            value = 0x10000000
1699          else:
1700            value = 0x7FFFFFFFFFFFFFFF
1701          dims = constant_op.constant([value, value], dtype=dtype)
1702          self.evaluate(array_ops.unravel_index(indices=indices, dims=dims))
1703
1704
1705class GuaranteeConstOpTest(test_util.TensorFlowTestCase):
1706
1707  def testSimple(self):
1708    a = array_ops.constant(10)
1709    guarantee_a = array_ops.guarantee_const(a)
1710    self.assertEqual(10, self.evaluate(guarantee_a))
1711
1712  def testVariables(self):
1713    for use_resource in [False, True]:
1714      with self.subTest(use_resource=use_resource):
1715        a = variable_scope.get_variable(
1716            "var_{}".format(use_resource), [],
1717            initializer=init_ops.constant_initializer(10.0),
1718            use_resource=use_resource)
1719        guarantee_a = array_ops.guarantee_const(a)
1720        self.evaluate(a.initializer)
1721        self.assertEqual(10.0, self.evaluate(guarantee_a))
1722
1723  def testResourceRejection(self):
1724    with ops.device("/cpu:0"):
1725      a = variable_scope.get_variable(
1726          "resource_var", [],
1727          initializer=init_ops.constant_initializer(10.0),
1728          use_resource=True)
1729    with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
1730                                             "cannot be a resource variable"):
1731      guarantee_a = array_ops.guarantee_const(a.handle)
1732      self.evaluate(a.initializer)
1733      self.evaluate(guarantee_a)
1734
1735
1736class SnapshotOpTest(test_util.TensorFlowTestCase):
1737
1738  def testInvertPermutation(self):
1739    for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
1740      with self.subTest(dtype=dtype, use_gpu=True):
1741        x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
1742        y = gen_array_ops.snapshot(x)
1743        self.assertAllEqual(y, [0, 1, 2, 3])
1744
1745
1746@test_util.with_eager_op_as_function
1747@test_util.run_all_in_graph_and_eager_modes
1748class QuantizeAndDequantizeTest(test_util.TensorFlowTestCase):
1749
1750  # Generates a tensor of the specified `shape` using values from `values`
1751  # scaled by (slice_idx + 1) along `axis` dimension.
1752  def _scale_per_slice(self, shape, axis, values):
1753    # Note: repeats the values if the shape is larger than values.
1754    out = np.take(values, np.remainder(np.arange(np.prod(shape)),
1755                                       len(values))).reshape(shape)
1756    if axis is not None:
1757      scale_shape = [1] * len(shape)
1758      scale_shape[axis] = shape[axis]
1759      out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
1760    return out
1761
1762  def testAxis(self):
1763    shape = np.array([2, 3, 4, 5])
1764    values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)
1765    quant_values = np.array(
1766        [-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5],
1767        dtype=np.float32)
1768    for axis in [None, 0, 1, 2, 3]:
1769      with self.subTest(axis=axis):
1770        inputs = constant_op.constant(
1771            self._scale_per_slice(shape, axis, values))
1772        expected = self._scale_per_slice(shape, axis, quant_values)
1773        unused_minmax_value = 0 if axis is None else [0] * shape[axis]
1774        fake_quantized = self.evaluate(
1775            array_ops.quantize_and_dequantize_v2(
1776                inputs,
1777                unused_minmax_value,
1778                unused_minmax_value,
1779                range_given=False,
1780                round_mode="HALF_UP",
1781                axis=axis))
1782        self.assertAllEqual(fake_quantized, expected)
1783        if axis is not None:
1784          fake_quantized = self.evaluate(
1785              array_ops.quantize_and_dequantize_v2(
1786                  inputs,
1787                  unused_minmax_value,
1788                  unused_minmax_value,
1789                  range_given=False,
1790                  axis=(axis - 4)))
1791          self.assertAllClose(fake_quantized, expected)
1792
1793  def testBadAxis(self):
1794    input_tensor = [2.5, 2.5]
1795    input_min = [0, 0]
1796    input_max = [1, 1]
1797    # When eager_op_as_function mode is enabled XLA auto-clustering kicks in.
1798    # XLA raises an UnimplementedError on invalid axis.
1799    error_message_pattern = (r"Shape must be at least rank 11 but is rank "
1800                             r"1|invalid axis")
1801    # TODO(b/171260356): Eager mode and graph mode throw different error types
1802    error = (errors.InvalidArgumentError, ValueError, errors.UnimplementedError)
1803    with self.assertRaisesRegex(error, error_message_pattern):
1804      self.evaluate(
1805          array_ops.quantize_and_dequantize_v2(
1806              input=input_tensor,
1807              input_min=input_min,
1808              input_max=input_max,
1809              axis=10))
1810
1811  def testQuantizeDequantizeGrad(self):
1812    shape = (2, 2)
1813    max_threshold = 0
1814    min_threshold = -10
1815    input_value = np.random.rand(2, 2) * 40.0 - 20.0
1816    input_tensor = constant_op.constant(input_value, shape=shape,
1817                                        name="input_tensor")
1818    with self.cached_session():
1819      def f(a):
1820        return array_ops.quantize_and_dequantize_v2(
1821            a,
1822            input_min=min_threshold,
1823            input_max=max_threshold,
1824            range_given=True)
1825      output_grad = gradient_checker_v2.compute_gradient(f, [input_tensor])
1826      self.assertAllClose(output_grad[0], np.zeros([1, 4, 4]))
1827
1828  def testOutOfBoundAxis(self):
1829    input_tensor = constant_op.constant([1., 1.])
1830    input_min = [0]
1831    input_max = [1]
1832    q_input, _, _ = array_ops.quantize(input_tensor, 0, 1, dtypes.qint32)
1833    error = (errors.InvalidArgumentError, ValueError)
1834    with self.assertRaisesRegex(error,
1835                                r".*Axis must be less than input dimension.*"):
1836      self.evaluate(
1837          gen_array_ops.dequantize(
1838              input=q_input,
1839              min_range=input_min,
1840              max_range=input_max,
1841              axis=2**31 - 1))
1842
1843
1844@test_util.run_all_in_graph_and_eager_modes
1845class SortedSearchTest(test_util.TensorFlowTestCase):
1846
1847  def testUpperBoundFloatHandCoded(self):
1848    cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
1849    arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
1850                   dtype=np.float32)
1851    result = np.searchsorted(cdf, arr, side="right")
1852    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1853    self.assertAllEqual(result, tf_result)
1854
1855  def testUpperBoundFloatRandomNd(self):
1856    dim_size = 7
1857    for d in range(1, 5):
1858      shape = [dim_size] * d
1859      cdf = np.cumsum(
1860          np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
1861      arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
1862
1863      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1864
1865      cdf = cdf.reshape([-1, dim_size])
1866      arr = arr.reshape([-1, dim_size])
1867      result = np.zeros(arr.shape, dtype=np.int32)
1868      for i in range(dim_size**(d - 1)):
1869        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1870
1871      result = result.reshape(shape)
1872
1873      self.assertAllEqual(result, tf_result)
1874
1875  def testUpperBoundFloatUneven(self):
1876    batch_size = 7
1877    size_search_array = 1000
1878    size_values = 47
1879    cdf = np.cumsum(
1880        np.random.uniform(size=[batch_size, size_search_array]).astype(
1881            np.float32),
1882        axis=1)
1883    arr = np.random.uniform(size=[batch_size, size_values]).astype(
1884        np.float32) * size_search_array
1885
1886    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1887
1888    result = np.zeros(arr.shape, dtype=np.int32)
1889    for i in range(batch_size):
1890      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1891
1892    self.assertAllEqual(result, tf_result)
1893
1894  def testLowerBoundFloatHandCoded(self):
1895    cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
1896    arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
1897                   dtype=np.float32)
1898    result = np.searchsorted(cdf, arr, side="left")
1899    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1900    self.assertAllEqual(result, tf_result)
1901
1902  def testLowerBoundFloatRandomNd(self):
1903    dim_size = 7
1904    for d in range(1, 5):
1905      shape = [dim_size] * d
1906      cdf = np.cumsum(
1907          np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
1908      arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
1909
1910      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1911
1912      cdf = cdf.reshape([-1, dim_size])
1913      arr = arr.reshape([-1, dim_size])
1914      result = np.zeros(arr.shape, dtype=np.int32)
1915      for i in range(dim_size**(d - 1)):
1916        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1917
1918      result = result.reshape(shape)
1919
1920      self.assertAllEqual(result, tf_result)
1921
1922  def testLowerBoundFloatUneven(self):
1923    batch_size = 7
1924    size_search_array = 1000
1925    size_values = 47
1926    cdf = np.cumsum(
1927        np.random.uniform(size=[batch_size, size_search_array]).astype(
1928            np.float32),
1929        axis=1)
1930    arr = np.random.uniform(size=[batch_size, size_values]).astype(
1931        np.float32) * size_search_array
1932
1933    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1934
1935    result = np.zeros(arr.shape, dtype=np.int32)
1936    for i in range(batch_size):
1937      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1938
1939    self.assertAllEqual(result, tf_result)
1940
1941  def testUpperBoundIntHandCoded(self):
1942    cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
1943    arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
1944    result = np.searchsorted(cdf, arr, side="right")
1945    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1946    self.assertAllEqual(result, tf_result)
1947
1948  def testUpperBoundIntRandomNd(self):
1949    dim_size = 7
1950    for d in range(1, 5):
1951      shape = [dim_size] * d
1952      cdf = np.cumsum(
1953          np.random.randint(low=0, high=10, size=shape).astype(np.int64),
1954          axis=(d - 1))
1955      arr = np.random.randint(
1956          low=0, high=10 * dim_size, size=shape).astype(np.int64)
1957
1958      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1959
1960      cdf = cdf.reshape([-1, dim_size])
1961      arr = arr.reshape([-1, dim_size])
1962      result = np.zeros(arr.shape, dtype=np.int32)
1963      for i in range(dim_size**(d - 1)):
1964        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1965
1966      result = result.reshape(shape)
1967
1968      self.assertAllEqual(result, tf_result)
1969
1970  def testUpperBoundIntUneven(self):
1971    batch_size = 7
1972    size_search_array = 1000
1973    size_values = 47
1974    cdf = np.cumsum(
1975        np.random.randint(low=0, high=10,
1976                          size=[batch_size,
1977                                size_search_array]).astype(np.int64),
1978        axis=1)
1979    arr = np.random.randint(
1980        low=0, high=10 * size_search_array, size=[batch_size,
1981                                                  size_values]).astype(np.int64)
1982
1983    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1984
1985    result = np.zeros(arr.shape, dtype=np.int32)
1986    for i in range(batch_size):
1987      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1988
1989    self.assertAllEqual(result, tf_result)
1990
1991  def testLowerBoundIntHandCoded(self):
1992    cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
1993    arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
1994    result = np.searchsorted(cdf, arr, side="left")
1995    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1996    self.assertAllEqual(result, tf_result)
1997
1998  def testLowerBoundIntRandomNd(self):
1999    dim_size = 7
2000    for d in range(1, 5):
2001      shape = [dim_size] * d
2002      cdf = np.cumsum(
2003          np.random.randint(low=0, high=10, size=shape).astype(np.int64),
2004          axis=(d - 1))
2005      arr = np.random.randint(
2006          low=0, high=10 * dim_size, size=shape).astype(np.int64)
2007
2008      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
2009
2010      cdf = cdf.reshape([-1, dim_size])
2011      arr = arr.reshape([-1, dim_size])
2012      result = np.zeros(arr.shape, dtype=np.int32)
2013      for i in range(dim_size**(d - 1)):
2014        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
2015
2016      result = result.reshape(shape)
2017
2018      self.assertAllEqual(result, tf_result)
2019
2020  def testLowerBoundIntUneven(self):
2021    batch_size = 7
2022    size_search_array = 1000
2023    size_values = 47
2024    cdf = np.cumsum(
2025        np.random.randint(low=0, high=10,
2026                          size=[batch_size,
2027                                size_search_array]).astype(np.int64),
2028        axis=1)
2029    arr = np.random.randint(
2030        low=0, high=10 * size_search_array, size=[batch_size,
2031                                                  size_values]).astype(np.int64)
2032
2033    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
2034
2035    result = np.zeros(arr.shape, dtype=np.int32)
2036    for i in range(batch_size):
2037      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
2038
2039    self.assertAllEqual(result, tf_result)
2040
2041  def testZeroSequenceSize(self):
2042    dtype = dtypes.int32
2043    for side in ("left", "right"):
2044      with self.subTest(side=side):
2045        self.assertAllEqual(
2046            array_ops.searchsorted(
2047                array_ops.ones([2, 0]),
2048                array_ops.ones([2, 3]),
2049                side=side,
2050                out_type=dtype), array_ops.zeros([2, 3], dtype))
2051
2052  def testZeroValueSize(self):
2053    dtype = dtypes.int32
2054    for side in ("left", "right"):
2055      with self.subTest(side=side):
2056        self.assertAllEqual(
2057            array_ops.searchsorted(
2058                array_ops.ones([2, 3]),
2059                array_ops.ones([2, 0]),
2060                side=side,
2061                out_type=dtype), array_ops.zeros([2, 0], dtype))
2062
2063  def testZeroInputSize(self):
2064    dtype = dtypes.int32
2065    for side in ("left", "right"):
2066      with self.subTest(side=side):
2067        self.assertAllEqual(
2068            array_ops.searchsorted(
2069                array_ops.ones([2, 0]),
2070                array_ops.ones([2, 3]),
2071                side=side,
2072                out_type=dtype), array_ops.zeros([2, 3], dtype))
2073
2074  def testInt64(self):
2075
2076    @def_function.function
2077    def g():
2078      x = random_ops.random_normal(shape=[int(1e10)])
2079      y = array_ops.ones(shape=[int(1e10)])
2080      return array_ops.searchsorted(x, y, out_type=dtypes.int64)
2081
2082    _ = g.get_concrete_function()
2083
2084  def testInt64UnspecifiedOutType(self):
2085
2086    @def_function.function
2087    def g():
2088      x = random_ops.random_normal(shape=[int(1e10)])
2089      y = array_ops.ones(shape=[int(1e10)])
2090      return array_ops.searchsorted(x, y)
2091
2092    _ = g.get_concrete_function()
2093
2094
2095class BatchGatherNdTest(test_util.TensorFlowTestCase):
2096
2097  def testShapesMatch(self):
2098    """Tests for various different shape combinations."""
2099    shapes = []
2100    # params_shape, indices_shape, batch_dims
2101    shapes.append(((2, 2, 2), (2, 1), 1),)
2102    shapes.append(((2, 2, 2), (2, 2), 1),)
2103    shapes.append(((2, 2, 2), (2, 3), 0),)
2104    shapes.append(((2, 2, 2), (3,), 0),)
2105    shapes.append(((2, 2, 2), (1,), 0),)
2106    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
2107    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
2108    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
2109    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
2110    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
2111    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
2112    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
2113    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
2114    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
2115    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
2116    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
2117    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2118
2119    for params_shape, indices_shape, batch_dims in shapes:
2120      with self.subTest(
2121          params_shape=params_shape,
2122          indices_shape=indices_shape,
2123          batch_dims=batch_dims):
2124        params = constant_op.constant(1.0, shape=(params_shape))
2125        indices = constant_op.constant(
2126            1, shape=(indices_shape), dtype=dtypes.int32)
2127        out = array_ops.batch_gather_nd(
2128            params=params, indices=indices, batch_dims=batch_dims)
2129        ndims_params = len(params_shape) - batch_dims
2130        ndims_rows = ndims_params - indices_shape[-1]
2131        expected_out_shape = indices_shape[:-1]
2132        if ndims_rows > 0:
2133          expected_out_shape += params_shape[-ndims_rows:]
2134        self.assertSequenceEqual(out.shape, expected_out_shape)
2135
2136  def testReducesToGatherNDWhenBatchDimIsZero(self):
2137    """Confirms setting batch_dims to zero reduces to tf.gather_nd."""
2138    params = constant_op.constant(np.random.uniform(0.0, 1.0, size=(7, 8, 9)))
2139    indices_shapes = []
2140    indices_shapes.append((1,))
2141    indices_shapes.append((3, 1))
2142    indices_shapes.append((3, 3, 1))
2143    indices_shapes.append((2,))
2144    indices_shapes.append((3, 2))
2145    indices_shapes.append((3, 3, 2))
2146    indices_shapes.append((3,))
2147    indices_shapes.append((3, 3))
2148    indices_shapes.append((3, 3, 3))
2149
2150    for indices_shape in indices_shapes:
2151      with self.subTest(indices_shape=indices_shape):
2152        indices = np.random.randint(0, 7, size=indices_shape)
2153        gather_nd_result = gen_array_ops.gather_nd(params, indices)
2154        batch_gather_nd_result = array_ops.batch_gather_nd(
2155            params=params, indices=indices, batch_dims=0)
2156        self.assertAllEqual(gather_nd_result, batch_gather_nd_result)
2157
2158  def testSameResultAsMapFn(self):
2159    """Compares results with gather_nd called on every element with map_fn."""
2160    shapes = []
2161    # params_shape, indices_shape, batch_dims
2162    shapes.append(((2, 2, 2), (2, 1), 1),)
2163    shapes.append(((2, 2, 2), (2, 2), 1),)
2164    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
2165    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
2166    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
2167    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
2168    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
2169    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
2170    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
2171    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
2172    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
2173    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
2174    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
2175    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2176
2177    for params_shape, indices_shape, batch_dims in shapes:
2178      with self.subTest(
2179          params_shape=params_shape,
2180          indices_shape=indices_shape,
2181          batch_dims=batch_dims):
2182        params = constant_op.constant(
2183            np.random.uniform(0.0, 1.0, size=(params_shape)))
2184        indices = np.random.randint(0, 2, size=indices_shape)
2185        batch_gather_nd_result = array_ops.batch_gather_nd(
2186            params=params, indices=indices, batch_dims=batch_dims)
2187
2188        if batch_dims > 1:
2189          params = array_ops.reshape(
2190              params, shape=[-1] + list(params_shape[batch_dims:]))
2191          indices = array_ops.reshape(
2192              indices, shape=[-1] + list(indices_shape[batch_dims:]))
2193
2194        map_fn_gather_nd_result = map_fn.map_fn(
2195            fn=self._map_fn_body, elems=(params, indices), dtype=dtypes.float64)
2196
2197        if batch_dims > 1:
2198          out_shape = map_fn_gather_nd_result.shape.as_list()
2199          out_shape = list(params_shape[:batch_dims]) + out_shape[1:]
2200          map_fn_gather_nd_result = array_ops.reshape(
2201              map_fn_gather_nd_result, shape=out_shape)
2202
2203        self.assertAllEqual(map_fn_gather_nd_result, batch_gather_nd_result)
2204
2205  def _map_fn_body(self, elems):
2206    return gen_array_ops.gather_nd(elems[0], elems[1])
2207
2208  def testBatchDimsAsTensor(self):
2209    """Tests Tensor batch_dims as input works as intended."""
2210    shapes = []
2211    # params_shape, indices_shape, batch_dims
2212    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 0),)
2213    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 1),)
2214    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2215
2216    for params_shape, indices_shape, batch_dims in shapes:
2217      with self.subTest(
2218          params_shape=params_shape,
2219          indices_shape=indices_shape,
2220          batch_dims=batch_dims):
2221        params = constant_op.constant(
2222            np.random.uniform(0.0, 1.0, size=(params_shape)))
2223        indices = np.random.randint(0, 2, size=indices_shape)
2224        batch_gather_nd_result = array_ops.gather_nd(
2225            params=params, indices=indices, batch_dims=batch_dims)
2226        batch_dims_tensor = constant_op.constant([batch_dims])
2227        batch_gather_nd_tensor_batch_dims_result = array_ops.gather_nd(
2228            params=params, indices=indices, batch_dims=batch_dims_tensor)
2229
2230        self.assertAllEqual(batch_gather_nd_tensor_batch_dims_result,
2231                            batch_gather_nd_result)
2232
2233  def testInvalidBatchDimsRaisesException(self):
2234    """Tests whether invalid batch_dims raise expected exceptions."""
2235    params = constant_op.constant(
2236        np.random.uniform(0.0, 1.0, size=(3, 2, 2, 3, 4)))
2237    indices = np.random.randint(0, 2, size=(3, 2, 3))
2238
2239    with self.assertRaises(TypeError):
2240      array_ops.batch_gather_nd(
2241          params=params,
2242          indices=indices,
2243          batch_dims=constant_op.constant((0, 1)))
2244
2245    with self.assertRaises(ValueError):
2246      array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=-1)
2247
2248    with self.assertRaises(ValueError):
2249      array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=4)
2250
2251  def testNoneBatchDimensions(self):
2252    """Tests gather_nd works with None dimensions."""
2253    shapes = []
2254    # params_shape, indices_shape, batch_dims
2255    shapes.append(((2, 2, 2), (2, 1), 1),)
2256    shapes.append(((2, 2, 2), (2, 2), 1),)
2257    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
2258    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
2259    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
2260    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
2261    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
2262    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
2263    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
2264    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
2265    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
2266    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
2267    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
2268    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2269
2270    for params_shape, indices_shape, batch_dims in shapes:
2271      params_ph_shape = list(params_shape)
2272      indices_ph_shape = list(indices_shape)
2273      for i in range(batch_dims):
2274        params_ph_shape[i] = None
2275        indices_ph_shape[i] = None
2276
2277      @def_function.function
2278      def func(params, indices):
2279        return array_ops.batch_gather_nd(
2280            params=params, indices=indices, batch_dims=batch_dims)  # pylint: disable=cell-var-from-loop
2281
2282      f = func.get_concrete_function(
2283          tensor_spec.TensorSpec(params_ph_shape, dtypes.float32),
2284          tensor_spec.TensorSpec(indices_ph_shape, dtypes.int32))
2285
2286      params_val = np.ones(dtype=np.float32, shape=params_shape)
2287      indices_val = np.ones(dtype=np.int32, shape=indices_shape)
2288      res = f(params_val, indices_val)
2289      row_ndims = len(params_shape) - batch_dims - indices_shape[-1]
2290      expected_out_shape = indices_shape[:-1]
2291      if row_ndims > 0:
2292        expected_out_shape += params_shape[-row_ndims:]
2293
2294      self.assertSequenceEqual(res.shape, expected_out_shape)
2295
2296
2297@test_util.run_all_in_graph_and_eager_modes
2298class RepeatTest(test_util.TensorFlowTestCase, parameterized.TestCase):
2299
2300  @parameterized.parameters(
2301      (3, 4, None),
2302      ([[1, 2], [3, 4]], 2, None),
2303      ([[1, 2], [3, 4]], [1, 2], 0),
2304      ([[1, 2], [3, 4]], [1, 2], 1),
2305      ([[1, 2], [3, 4]], 3, 1),
2306      ([[1, 2], [3, 4]], [1, 2, 3, 4], None),
2307      (np.ones([0, 4]), 0, 1),
2308      (np.ones([1, 2]), [2], None),
2309  )
2310  @test_util.with_forward_compatibility_horizons(None, [2052, 2, 7])
2311  def testRepeat(self, array, repeats, axis):
2312    array = np.array(array)
2313
2314    @def_function.function(
2315        input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)] * 2)
2316    def repeat_fn(array, repeats):
2317      return array_ops.repeat(array, repeats, axis)
2318
2319    v_tf = array_ops.repeat(constant_op.constant(array), repeats, axis)
2320    v_tf_fn = repeat_fn(
2321        constant_op.constant(array, dtype=dtypes.int32), repeats)
2322    v_np = np.repeat(array, repeats, axis)
2323    self.assertAllEqual(v_tf, v_np)
2324    self.assertAllEqual(v_tf_fn, v_np)
2325
2326
2327class RepeatBenchmark(test_lib.Benchmark):
2328  """Benchmark the repeat implementation."""
2329
2330  def run_and_time(self, op, iters=100, warmup_iters=10):
2331    self.evaluate(variables.global_variables_initializer())
2332    for _ in range(warmup_iters):
2333      _ = self.evaluate(op)
2334    t0 = time.time()
2335    for _ in range(iters):
2336      self.evaluate(op)
2337    t1 = time.time()
2338    self.report_benchmark(iters=iters, wall_time=(t1 - t0) / float(iters))
2339
2340  def make_variable(self, shape, dtype=dtypes.float32):
2341    items = 1
2342    for dim in shape:
2343      items *= dim
2344    var = variables.Variable(
2345        array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
2346        dtype=dtype)
2347    return var
2348
2349  def run_benchmark(self, shape, max_repeats, axis=None):
2350    with session.Session():
2351      var = self.make_variable(shape)
2352      if axis is None:
2353        axis_size = 1
2354        for dim in shape:
2355          axis_size *= dim
2356      else:
2357        axis_size = shape[axis]
2358      repeats = constant_op.constant(
2359          np.random.randint(max_repeats, size=[axis_size]), dtype=dtypes.int64)
2360      repeat_op = array_ops.repeat(var, repeats, axis=axis)
2361      # Return a scalar to reduce the device-to-host memcopy overhead.
2362      repeat_op = repeat_op[(0,) * len(shape)]
2363      self.run_and_time(repeat_op)
2364
2365  def benchmark_repeat_few_1d(self):
2366    self.run_benchmark(shape=[1024 * 1024], max_repeats=8, axis=0)
2367
2368  def benchmark_repeat_many_1d(self):
2369    self.run_benchmark(shape=[8 * 1024], max_repeats=1024, axis=0)
2370
2371  def benchmark_repeat_few_2d_axis0(self):
2372    self.run_benchmark(shape=[8, 128 * 1024], max_repeats=8, axis=0)
2373
2374  def benchmark_repeat_many_2d_axis0(self):
2375    self.run_benchmark(shape=[8, 1024], max_repeats=1024, axis=0)
2376
2377  def benchmark_repeat_many_2d_axis0_big(self):
2378    self.run_benchmark(shape=[1024, 32], max_repeats=1024, axis=0)
2379
2380  def benchmark_repeat_few_2d_axis1(self):
2381    self.run_benchmark(shape=[8, 128 * 1024], max_repeats=8, axis=1)
2382
2383  def benchmark_repeat_many_2d_axis1(self):
2384    self.run_benchmark(shape=[8, 1024], max_repeats=1024, axis=1)
2385
2386
2387@test_util.run_all_in_graph_and_eager_modes
2388class TileVariantTest(test_util.TensorFlowTestCase):
2389
2390  def test_tile_tensor_list(self):
2391    t = constant_op.constant(np.random.uniform(size=[2, 3, 4]))
2392    handle = list_ops.tensor_list_from_tensor(t, element_shape=None)
2393    with ops.device("CPU:0"):
2394      tiled_handles = array_ops.tile(array_ops.reshape(handle, [1]), [2])
2395    tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
2396                                                [3, 4])
2397    tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
2398                                                [3, 4])
2399    self.assertAllEqual(t, tiled_tensor_0)
2400    self.assertAllEqual(t, tiled_tensor_1)
2401    # Now mutate some of the lists and make sure the changes are not reflected
2402    # in the tiled handles.
2403    with ops.control_dependencies([
2404        list_ops.tensor_list_scatter([t[0] + 1], [0], input_handle=handle),
2405        list_ops.tensor_list_set_item(tiled_handles[0], 0, t[0] + 2)]):
2406      tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
2407                                                  [3, 4])
2408      tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
2409                                                  [3, 4])
2410    self.assertAllEqual(t, tiled_tensor_0)
2411    self.assertAllEqual(t, tiled_tensor_1)
2412
2413
2414class StopGradientTest(test_util.TensorFlowTestCase, parameterized.TestCase):
2415
2416  def testStopGradient(self):
2417    x = array_ops.zeros(3)
2418    y = array_ops.stop_gradient(x)
2419    self.assertAllEqual(x, y)
2420
2421  def testStopGradientRaggedTensor(self):
2422    x = RaggedTensor.from_row_splits(values=[1, 2, 3], row_splits=[0, 1, 1, 3])
2423    y = array_ops.stop_gradient(x)
2424    self.assertAllEqual(x, y)
2425
2426  def testStopGradientGradientTape(self):
2427    x = array_ops.zeros(3)
2428    with backprop.GradientTape() as tape:
2429      y = array_ops.stop_gradient(x)
2430
2431    self.assertIsNone(tape.gradient(y, x))
2432
2433  def testStopGradientGradientTapeRaggedTensor(self):
2434    x = RaggedTensor.from_row_splits(values=[1, 2, 3], row_splits=[0, 1, 1, 3])
2435    with backprop.GradientTape() as tape:
2436      y = array_ops.stop_gradient(x)
2437
2438    self.assertIsNone(tape.gradient(y, x))
2439
2440  @parameterized.named_parameters([
2441      ("TFFunction", def_function.function),
2442      ("PythonFunction", lambda f: f),
2443  ])
2444  def test_stop_gradient_resource_variable(self, decorator):
2445    x = resource_variable_ops.ResourceVariable([1.0])
2446    self.evaluate(x.initializer)
2447
2448    @decorator
2449    def stop_gradient_f(x):
2450      return array_ops.stop_gradient(x)
2451
2452    with backprop.GradientTape() as tape:
2453      y = stop_gradient_f(x)
2454    self.assertIsNone(tape.gradient(y, x))
2455    # stop_gradient converts ResourceVariable to Tensor
2456    self.assertIsInstance(y, ops.Tensor)
2457    self.assertAllEqual(y, x)
2458
2459if __name__ == "__main__":
2460  test_lib.main()
2461