• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for array_ops."""
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import re
21import time
22import unittest
23
24from absl.testing import parameterized
25import numpy as np
26
27from tensorflow.python.client import session
28from tensorflow.python.eager import backprop
29from tensorflow.python.eager import context
30from tensorflow.python.eager import def_function
31from tensorflow.python.framework import config
32from tensorflow.python.framework import constant_op
33from tensorflow.python.framework import dtypes
34from tensorflow.python.framework import errors
35from tensorflow.python.framework import errors_impl
36from tensorflow.python.framework import ops
37from tensorflow.python.framework import sparse_tensor
38from tensorflow.python.framework import tensor_shape
39from tensorflow.python.framework import tensor_spec
40from tensorflow.python.framework import test_ops
41from tensorflow.python.framework import test_util
42from tensorflow.python.ops import array_ops
43from tensorflow.python.ops import gen_array_ops
44from tensorflow.python.ops import gradient_checker_v2
45from tensorflow.python.ops import init_ops
46from tensorflow.python.ops import list_ops
47from tensorflow.python.ops import map_fn
48from tensorflow.python.ops import math_ops
49from tensorflow.python.ops import resource_variable_ops
50from tensorflow.python.ops import state_ops
51from tensorflow.python.ops import variable_scope
52from tensorflow.python.ops import variables
53from tensorflow.python.platform import test as test_lib
54
55
56@test_util.run_all_in_graph_and_eager_modes
57class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
58
59  def testNonBatchMatrix(self):
60    matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
61    expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
62    transposed = array_ops.matrix_transpose(matrix)
63    self.assertEqual((3, 2), transposed.get_shape())
64    self.assertAllEqual(expected_transposed, transposed)
65
66  def testConjugate(self):
67    m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
68    expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
69    matrix = ops.convert_to_tensor(m)
70    transposed = array_ops.matrix_transpose(matrix, conjugate=True)
71    self.assertEqual((3, 2), transposed.get_shape())
72    self.assertAllEqual(expected_transposed, transposed)
73
74  def testBatchMatrix(self):
75    matrix_0 = [[1, 2, 3], [4, 5, 6]]
76    matrix_0_t = [[1, 4], [2, 5], [3, 6]]
77    matrix_1 = [[11, 22, 33], [44, 55, 66]]
78    matrix_1_t = [[11, 44], [22, 55], [33, 66]]
79    batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
80    expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
81    transposed = array_ops.matrix_transpose(batch_matrix)
82    self.assertEqual((2, 3, 2), transposed.get_shape())
83    self.assertAllEqual(expected_transposed, transposed)
84
85  def testNonBatchMatrixDynamicallyDefined(self):
86    # needs explicit `constant` because lists are not automatically
87    # converted to sensors when applying `transpose` below
88    matrix = constant_op.constant([[1, 2, 3], [4, 5, 6]])  # Shape (2, 3)
89    expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
90
91    @def_function.function(input_signature=[
92        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
93    ])
94    def transpose(matrix):
95      self.assertIs(matrix.shape.ndims, None)
96      return array_ops.matrix_transpose(matrix)
97
98    self.assertAllEqual(expected_transposed, transpose(matrix))
99
100  def testBatchMatrixDynamicallyDefined(self):
101    matrix_0 = [[1, 2, 3], [4, 5, 6]]
102    matrix_0_t = [[1, 4], [2, 5], [3, 6]]
103    matrix_1 = [[11, 22, 33], [44, 55, 66]]
104    matrix_1_t = [[11, 44], [22, 55], [33, 66]]
105    # needs explicit `constant` because lists are not automatically
106    # converted to sensors when applying `transpose` below
107    batch_matrix = constant_op.constant([matrix_0, matrix_1])  # Shape (2, 2, 3)
108    expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
109
110    @def_function.function(input_signature=[
111        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
112    ])
113    def transpose(matrix):
114      self.assertIs(matrix.shape.ndims, None)
115      return array_ops.matrix_transpose(matrix)
116
117    self.assertAllEqual(expected_transposed, transpose(batch_matrix))
118
119  def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
120    vector = [1, 2, 3]
121    with self.assertRaisesRegex(ValueError, "should be a "):
122      array_ops.matrix_transpose(vector)
123
124  def testNarrowMatrixConjugateTranspose(self):
125    for dtype in (dtypes.float32, dtypes.float64):
126      for conjugate in (True, False):
127        with self.subTest(complex_type=dtype, conjugate=conjugate):
128          vector = math_ops.complex(
129              constant_op.constant(0, dtype=dtype),
130              math_ops.range(96, dtype=dtype))
131          column_vector = array_ops.expand_dims(vector, axis=-1)
132          row_vector = array_ops.expand_dims(vector, axis=0)
133          narrow_matrix = array_ops.tile(column_vector, [1, 2])  # [96, 2]
134          expected_transposed = array_ops.tile(row_vector, [2, 1])  # [2, 96]
135          if conjugate:
136            expected_transposed = -expected_transposed
137
138          transposed = array_ops.matrix_transpose(
139              narrow_matrix, conjugate=conjugate)
140
141          self.assertEqual((2, 96), transposed.get_shape())
142          self.assertAllEqual(expected_transposed, transposed)
143
144
145class BooleanMaskTest(test_util.TensorFlowTestCase):
146
147  def setUp(self):
148    self.rng = np.random.RandomState(42)
149
150  def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None, axis=None):
151    """Check equivalence between boolean_mask and numpy masking."""
152    if make_mask is None:
153      make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
154    arr = np.random.rand(*arr_shape)
155    mask = make_mask(arr_shape[:ndims_mask])
156    if axis is not None:
157      mask = make_mask(arr_shape[axis:ndims_mask + axis])
158    if axis is None or axis == 0:
159      masked_arr = arr[mask]
160    elif axis == 1:
161      masked_arr = arr[:, mask]
162    elif axis == 2:
163      masked_arr = arr[:, :, mask]
164    masked_tensor = array_ops.boolean_mask(arr, mask, axis=axis)
165
166    # Leading dimension size of masked_tensor is always unknown until runtime
167    # since we don't how many elements will be kept.
168    leading = 1 if axis is None else axis + 1
169    self.assertAllEqual(masked_tensor.get_shape()[leading:],
170                        masked_arr.shape[leading:])
171
172    self.assertAllClose(masked_arr, masked_tensor)
173
174  def testMaskDim1ArrDim2Axis1(self):
175    ndims_mask = 1
176    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
177      with self.subTest(arr_shape=arr_shape):
178        self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
179
180  def testMaskDim2ArrDim2Axis1(self):
181    ndims_mask = 2
182    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
183      with self.subTest(arr_shape=arr_shape):
184        self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
185
186  def testMaskDim1ArrDim1(self):
187    ndims_mask = 1
188    for arr_shape in [(1,), (2,), (3,), (10,)]:
189      with self.subTest(arr_shape=arr_shape):
190        self.CheckVersusNumpy(ndims_mask, arr_shape)
191
192  def testMaskDim1ArrDim2(self):
193    ndims_mask = 1
194    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
195      with self.subTest(arr_shape=arr_shape):
196        self.CheckVersusNumpy(ndims_mask, arr_shape)
197
198  def testMaskDim2ArrDim2(self):
199    ndims_mask = 2
200    for arr_shape in [(1, 1), (2, 2), (2, 5)]:
201      with self.subTest(arr_shape=arr_shape):
202        self.CheckVersusNumpy(ndims_mask, arr_shape)
203
204  def testMaskDim2ArrDim3(self):
205    ndims_mask = 2
206    for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]:
207      with self.subTest(arr_shape=arr_shape):
208        self.CheckVersusNumpy(ndims_mask, arr_shape)
209
210  def testEmptyInput2D(self):
211    mask = np.array([True, False])
212    arr = np.array([[], []]).astype(np.float32)
213    numpy_result = arr[mask]
214    tf_result = array_ops.boolean_mask(arr, mask)
215    self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
216    with self.cached_session():
217      self.assertAllClose(numpy_result, tf_result)
218
219  def testEmptyInput1D(self):
220    mask = np.array([]).astype(bool)
221    arr = np.array([]).astype(np.float32)
222    numpy_result = arr[mask]
223    tf_result = array_ops.boolean_mask(arr, mask)
224    self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
225    with self.cached_session():
226      self.assertAllClose(numpy_result, tf_result)
227
228  def testEmptyOutput(self):
229    make_mask = lambda shape: np.zeros(shape, dtype=bool)
230    for ndims_mask in range(1, 4):
231      for ndims_arr in range(ndims_mask, ndims_mask + 3):
232        for _ in range(3):
233          with self.subTest(ndims_mask=ndims_mask, ndims_arr=ndims_arr, _=_):
234            arr_shape = np.random.randint(1, 5, size=ndims_arr)
235            self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
236
237  def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
238    # The rank of the mask tensor must be specified. This is explained
239    # in the docstring as well.
240    @def_function.function
241    def func(ph_tensor, ph_mask):
242      return array_ops.boolean_mask(ph_tensor, ph_mask)
243
244    f = func.get_concrete_function(
245        tensor_spec.TensorSpec(None, dtypes.int32),
246        tensor_spec.TensorSpec([None], dtypes.bool))
247    arr = np.array([[1, 2], [3, 4]], np.int32)
248    mask = np.array([False, True])
249    masked_tensor = f(arr, mask)
250    self.assertAllEqual(masked_tensor, arr[mask])
251
252  def testMaskDimensionsSetToNoneRaises(self):
253    # The rank of the mask tensor must be specified. This is explained
254    # in the docstring as well.
255    @def_function.function
256    def func(tensor, mask):
257      return array_ops.boolean_mask(tensor, mask)
258
259    with self.assertRaisesRegex(ValueError, "dimensions must be specified"):
260      _ = func.get_concrete_function(
261          tensor_spec.TensorSpec([None, 2], dtypes.int32),
262          tensor_spec.TensorSpec(None, dtypes.bool))
263
264  def testMaskHasMoreDimsThanTensorRaises(self):
265    mask = [[True, True], [False, False]]
266    tensor = [1, 2, 3, 4]
267    with self.cached_session():
268      with self.assertRaisesRegex(ValueError, "incompatible"):
269        self.evaluate(array_ops.boolean_mask(tensor, mask))
270
271  def testMaskIsScalarRaises(self):
272    mask = True
273    tensor = 1
274    with self.cached_session():
275      with self.assertRaisesRegex(ValueError, "mask.*scalar"):
276        self.evaluate(array_ops.boolean_mask(tensor, mask))
277
278  def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
279    mask = [True, True, True]
280    tensor = [[1, 2], [3, 4]]
281    with self.cached_session():
282      with self.assertRaisesRegex(ValueError, "incompatible"):
283        self.evaluate(array_ops.boolean_mask(tensor, mask))
284
285  def testStringMask(self):
286    # Reproduces b/111171330, where the optimized boolean_mask graph would
287    # be incorrectly placed on GPU.
288    config.set_optimizer_experimental_options({"shape_optimization": True})
289
290    @def_function.function
291    def func(tile_input):
292      string_tensor = array_ops.tile([["hello"]], tile_input)
293      bool_tensor = array_ops.tile([[True]], tile_input)
294      masked_tensor = array_ops.boolean_mask(string_tensor, bool_tensor)
295      return masked_tensor
296
297    result = func([2, 2])
298    self.assertAllEqual([b"hello", b"hello", b"hello", b"hello"], result)
299
300  def testMaskWithAxisTensor(self):
301
302    @def_function.function(autograph=False)
303    def f():
304      return array_ops.boolean_mask([1, 2, 3], [True, False, True],
305                                    axis=constant_op.constant(
306                                        0, dtype=dtypes.int32))
307
308    self.assertAllEqual(self.evaluate(f()), [1, 3])
309
310  def testMaskWithAxisNonConstTensor(self):
311
312    @def_function.function(
313        autograph=False,
314        input_signature=[
315            tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
316        ])
317    def f(axis):
318      return array_ops.boolean_mask([1, 2, 3], [True, False, True], axis=axis)
319
320    self.assertAllEqual(
321        self.evaluate(f(constant_op.constant(0, dtype=dtypes.int32))), [1, 3])
322
323
324@test_util.run_all_in_graph_and_eager_modes
325class OperatorShapeTest(test_util.TensorFlowTestCase):
326
327  def testExpandScalar(self):
328    scalar = "hello"
329    scalar_expanded = array_ops.expand_dims(scalar, [0])
330    self.assertEqual(scalar_expanded.get_shape(), (1,))
331
332  def testSqueezeScalar(self):
333    scalar = "hello"
334    scalar_squeezed = array_ops.squeeze(scalar, ())
335    self.assertEqual(scalar_squeezed.get_shape(), ())
336
337  def testSqueezeMatrix(self):
338    matrix = [[1, 2, 3]]
339    matrix_squeezed = array_ops.squeeze(matrix, [0])
340    self.assertEqual(matrix_squeezed.get_shape(), (3))
341
342    with self.assertRaisesRegex(
343        Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
344      matrix_squeezed = array_ops.squeeze(matrix, [1])
345
346  def testSqueezeScalarDim(self):
347    matrix = [[1, 2, 3]]
348    matrix_squeezed = array_ops.squeeze(matrix, 0)
349    self.assertEqual(matrix_squeezed.get_shape(), (3))
350
351  def testExpandDimsWithNonScalarDim(self):
352    with self.assertRaisesRegex(Exception,
353                                "must be a tensor with a single value"):
354      array_ops.expand_dims(1, axis=[0, 1])
355
356
357class ReverseV2Test(test_util.TensorFlowTestCase):
358
359  def testReverse0DimAuto(self):
360    x_np = 4
361    for use_gpu in [False, True]:
362      with self.subTest(use_gpu=use_gpu):
363        with self.cached_session(use_gpu=use_gpu):
364          x_tf = self.evaluate(array_ops.reverse_v2(x_np, []))
365          self.assertAllEqual(x_tf, x_np)
366
367  def _reverse1DimAuto(self, np_dtype):
368    x_np = np.array([1, 200, 3, 40, 5], dtype=np_dtype)
369
370    for use_gpu in [False, True]:
371      for axis_dtype in [dtypes.int32, dtypes.int64]:
372        with self.subTest(use_gpu=use_gpu, axis_dtype=axis_dtype):
373          x_tf = self.evaluate(
374              array_ops.reverse_v2(x_np,
375                                   constant_op.constant([0], dtype=axis_dtype)))
376          self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
377
378  def _reverse2DimAuto(self, np_dtype):
379    x_np = np.array([[1, 200, 3], [4, 5, 60]], dtype=np_dtype)
380
381    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
382      for use_gpu in [False, True]:
383        for axis_dtype in [dtypes.int32, dtypes.int64]:
384          with self.subTest(
385              reverse_f=reverse_f, use_gpu=use_gpu, axis_dtype=axis_dtype):
386            x_tf_1 = self.evaluate(
387                reverse_f(x_np, constant_op.constant([0], dtype=axis_dtype)))
388            x_tf_2 = self.evaluate(
389                reverse_f(x_np, constant_op.constant([-2], dtype=axis_dtype)))
390            x_tf_3 = self.evaluate(
391                reverse_f(x_np, constant_op.constant([1], dtype=axis_dtype)))
392            x_tf_4 = self.evaluate(
393                reverse_f(x_np, constant_op.constant([-1], dtype=axis_dtype)))
394            x_tf_5 = self.evaluate(
395                reverse_f(x_np, constant_op.constant([1, 0], dtype=axis_dtype)))
396            self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
397            self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
398            self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
399            self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
400            self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
401
402  # This test covers the axis validation in the shape function
403  # (no eval())
404  def testInvalidAxis(self):
405    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
406    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
407                                "is out of valid range"):
408      array_ops.reverse_v2(x_np, [-30])
409    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
410                                "is out of valid range"):
411      array_ops.reverse_v2(x_np, [2])
412    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
413                                "axis 0 specified more than once"):
414      array_ops.reverse_v2(x_np, [0, -2])
415
416  # This is the version of reverse that uses axis indices rather than
417  # bool tensors
418  # TODO(b/32254538): Change this test to use array_ops.reverse
419  #
420  # Note: this test passes placeholder as constant axis is validated
421  # in shape function (see testInvalidAxis)
422  def testInvalid(self):
423    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
424
425    @def_function.function
426    def func(ax):
427      return array_ops.reverse_v2(x_np, ax)
428
429    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
430                                "is out of.*range"):
431      func([-30])
432    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
433                                "is out of.*range"):
434      func([2])
435    with self.assertRaisesRegex(
436        (ValueError, errors_impl.InvalidArgumentError),
437        "(axis 0 specified more than once|canonicalized axis 0 was repeated.)"):
438      func([0, -2])
439
440  def testReverse1DimAuto(self):
441    for dtype in [
442        np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
443        np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
444        np.complex128,
445        np.array(b"").dtype.type
446    ]:
447      self._reverse1DimAuto(dtype)
448
449  def testReverse2DimAuto(self):
450    for dtype in [
451        np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
452        np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
453        np.complex128,
454        np.array(b"").dtype.type
455    ]:
456      self._reverse2DimAuto(dtype)
457
458  def testReverseRowsOf3Channels(self):
459    """Tests optimized code for reversing rows with last dim size = 3."""
460    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
461      for outer_size in (1, 2):
462        for middle_size in list(range(50)) + [100000]:
463          with self.subTest(
464              reverse_f=reverse_f,
465              outer_size=outer_size,
466              middle_size=middle_size,
467              use_gpu=True):
468            x_np = np.reshape(
469                np.arange(outer_size * middle_size * 3, dtype=np.float32),
470                newshape=(outer_size, middle_size, 3))
471            x_tf = self.evaluate(reverse_f(x_np, [1]))
472            np_answer = x_np[:, ::-1, :]
473            self.assertAllEqual(x_tf, np_answer)
474
475  def testReverseRowsOf4Channels(self):
476    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
477      for outer_size in (1, 2):
478        for middle_size in list(range(50)) + [100000]:
479          with self.subTest(
480              reverse_f=reverse_f,
481              outer_size=outer_size,
482              middle_size=middle_size,
483              use_gpu=True):
484            x_np = np.reshape(
485                np.arange(outer_size * middle_size * 4, dtype=np.float32),
486                newshape=(outer_size, middle_size, 4))
487            x_tf = self.evaluate(reverse_f(x_np, [1]))
488            np_answer = x_np[:, ::-1, :]
489            self.assertAllEqual(x_tf, np_answer)
490
491  def testReverseColumnsOf3Channels(self):
492    for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
493      for outer_size in list(range(50)) + [100000]:
494        for middle_size in (1, 2):
495          with self.subTest(
496              reverse_f=reverse_f,
497              outer_size=outer_size,
498              middle_size=middle_size,
499              use_gpu=True):
500            x_np = np.reshape(
501                np.arange(outer_size * middle_size * 3, dtype=np.float32),
502                newshape=(outer_size, middle_size, 3))
503            x_tf = self.evaluate(reverse_f(x_np, [0]))
504            np_answer = x_np[::-1, :, :]
505            self.assertAllEqual(x_tf, np_answer)
506
507  def testReverseInvalidShape(self):
508    x = np.ndarray(shape=[0, 1, 1])
509    v = array_ops.reverse_v2(x, axis=[1])
510    self.assertAllEqual(self.evaluate(v), v)
511
512
513class MeshgridTest(test_util.TensorFlowTestCase):
514
515  def _compareDiff(self, x, y, use_gpu):
516    for index in ("ij", "xy"):
517      numpy_out = np.meshgrid(x, y, indexing=index)
518      tf_out = array_ops.meshgrid(x, y, indexing=index)
519      with self.cached_session(use_gpu=use_gpu):
520        for xx, yy in zip(numpy_out, tf_out):
521          self.assertAllEqual(xx, yy)
522
523  def _compareDiffType(self, n, np_dtype, use_gpu):
524    inputs = []
525    for index in ("ij", "xy"):
526      for _ in range(n):
527        x = np.linspace(-10, 10, 5).astype(np_dtype)
528        if np_dtype in (np.complex64, np.complex128):
529          x += 1j
530        inputs.append(x)
531      numpy_out = np.meshgrid(*inputs, indexing=index)
532      with test_util.device(use_gpu=use_gpu):
533        tf_out = array_ops.meshgrid(*inputs, indexing=index)
534        for x_np, x_tf in zip(numpy_out, tf_out):
535          self.assertAllEqual(x_np, x_tf)
536
537  def testCompare(self):
538    for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
539              np.complex64, np.complex128):
540      with self.subTest(t=t):
541        self._compareDiffType(2, t, False)
542        self._compareDiffType(3, t, False)
543
544        x = [1, 2, 3]
545        y = [4, 5]
546
547        a = [[1, 1], [1, 1]]
548
549        self._compareDiff(x, y, False)
550        self._compareDiff(x, a, False)
551
552
553class StridedSliceChecker(object):
554  """Check a given tensor against the numpy result."""
555
556  REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
557  REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
558
559  def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
560    self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
561    if tensor_type.is_bool:
562      self.x_np = np.array(x % 3).astype(np.bool_)
563    # Give the value a non-zero imaginary component for complex types.
564    if tensor_type.is_complex:
565      self.x_np -= 1j * self.x_np
566    self.test = test
567    self.x = constant_op.constant(self.x_np, dtype=tensor_type)
568    self.check_type_infer = check_type_infer
569
570  def __getitem__(self, spec):
571    op = self.x.__getitem__(spec)
572
573    def eval_if_tensor(x):
574      try:
575        return self.test.evaluate(x)
576      except (AttributeError, TypeError, ValueError):
577        return x
578
579    if isinstance(spec, bool) or \
580      (isinstance(spec, ops.Tensor) and spec.dtype == dtypes.bool) or \
581      (isinstance(spec, np.ndarray) and spec.dtype == bool) or \
582      (isinstance(spec, (list, tuple)) and np.asarray(spec).dtype == bool):
583      tensor = self.test.evaluate(op)
584      np_spec = eval_if_tensor(spec)
585      self.test.assertAllEqual(self.x_np[np_spec], tensor)
586      return tensor
587
588    if not isinstance(spec, (list, tuple)):
589      spec = [spec]
590
591    tensor = self.test.evaluate(op)
592
593    # Make a numpy spec that pre-evals the tensors
594    np_specs = []
595
596    for s in spec:
597      if isinstance(s, slice):
598        start = eval_if_tensor(s.start)
599        stop = eval_if_tensor(s.stop)
600        step = eval_if_tensor(s.step)
601        np_specs.append(slice(start, stop, step))
602      else:
603        np_specs.append(eval_if_tensor(s))
604
605    self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
606    if self.check_type_infer:
607      self.test.assertAllEqual(tensor.shape, op.get_shape())
608    return tensor
609
610
611STRIDED_SLICE_TYPES = [
612    dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.uint8,
613    dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128,
614    dtypes.bool
615]
616
617
618class StridedSliceTest(test_util.TensorFlowTestCase):
619  """Test the strided slice operation with variants of slices."""
620
621  def test_basic_slice(self):
622    for tensor_type in STRIDED_SLICE_TYPES:
623      with self.subTest(tensor_type=tensor_type, use_gpu=True):
624        checker = StridedSliceChecker(
625            self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
626        _ = checker[:, :, :]
627        # Various ways of representing identity slice
628        _ = checker[:, :, :]
629        _ = checker[::, ::, ::]
630        _ = checker[::1, ::1, ::1]
631        # Not zero slice
632        _ = checker[::1, ::5, ::2]
633        # Reverse in each dimension independently
634        _ = checker[::-1, :, :]
635        _ = checker[:, ::-1, :]
636        _ = checker[:, :, ::-1]
637        ## negative index tests i.e. n-2 in first component
638        _ = checker[-2::-1, :, ::1]
639        # negative index tests i.e. n-2 in first component, non-unit stride
640        _ = checker[-2::-1, :, ::2]
641
642        # Check rank-0 examples
643        checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type)
644        _ = checker2[None]
645        _ = checker2[...]
646        _ = checker2[tuple()]
647
648  def testInt64GPU(self):
649    if not test_util.is_gpu_available():
650      self.skipTest("No GPU available")
651
652    with test_util.force_gpu():
653      x = constant_op.constant([1., 2., 3.])
654      begin = constant_op.constant([2], dtype=dtypes.int64)
655      end = constant_op.constant([3], dtype=dtypes.int64)
656      strides = constant_op.constant([1], dtype=dtypes.int64)
657      s = array_ops.strided_slice(x, begin, end, strides)
658      self.assertAllEqual([3.], self.evaluate(s))
659
660  @test_util.assert_no_new_pyobjects_executing_eagerly
661  @test_util.assert_no_garbage_created
662  def testTensorSliceEagerMemory(self):
663    with context.eager_mode():
664      inputs = constant_op.constant([[[1], [2], [3], [4]]],
665                                    dtype=dtypes.float32)
666      # Tests that slicing an EagerTensor doesn't leak memory
667      inputs[0]  # pylint: disable=pointless-statement
668
669  @test_util.assert_no_new_pyobjects_executing_eagerly
670  @test_util.assert_no_garbage_created
671  def testVariableSliceEagerMemory(self):
672    with context.eager_mode():
673      v = variables.Variable([1., 2.])
674      v[0]  # pylint: disable=pointless-statement
675
676  def testDegenerateSlices(self):
677    with test_util.device(use_gpu=True):
678      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
679      # degenerate by offering a forward interval with a negative stride
680      _ = checker[0:-1:-1, :, :]
681      # degenerate with a reverse interval with a positive stride
682      _ = checker[-1:0, :, :]
683      # empty interval in every dimension
684      _ = checker[-1:0, 2:2, 2:3:-1]
685      # empty first dimension only (used to break for aligned tensors).
686      checker = StridedSliceChecker(self,
687                                    StridedSliceChecker.REF_TENSOR_ALIGNED)
688      _ = checker[1:0]
689
690  def testSliceWithUndefinedDimension(self):
691    t = constant_op.constant([1, 2, 3])
692    d = tensor_shape.Dimension(None)
693    self.assertAllEqual(t[d:d:d], t)
694
695  def testEllipsis(self):
696    with test_util.device(use_gpu=True):
697      raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
698      checker = StridedSliceChecker(self, raw)
699
700      _ = checker[0:]
701      # implicit ellipsis
702      _ = checker[0:, ...]
703      # ellipsis alone
704      _ = checker[...]
705      # ellipsis at end
706      _ = checker[0:1, ...]
707      # ellipsis at begin
708      _ = checker[..., 0:1]
709      # ellipsis at middle
710      _ = checker[0:1, ..., 0:1]
711      # multiple ellipses not allowed
712      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
713                                  "Multiple ellipses"):
714        _ = checker[..., :, ...].eval()
715
716  def testShrink(self):
717    with test_util.device(use_gpu=True):
718      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
719              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
720      checker = StridedSliceChecker(self, raw)
721      _ = checker[:, :, :, :, 3]
722      _ = checker[..., 3]
723      _ = checker[:, 0]
724      _ = checker[:, :, 0]
725
726  def testBothNewAxisAndShrink(self):
727    with test_util.device(use_gpu=True):
728
729      @def_function.function
730      def func(inp):
731        return inp[array_ops.newaxis, :, 0]
732
733      f = func.get_concrete_function(
734          tensor_spec.TensorSpec([2, 2], dtypes.int16))
735
736      # TODO(b/190416665): Allow the constant to be eagerly copied/created on
737      # the GPU.
738      with ops.device("CPU"):
739        ones = constant_op.constant([[1, 1], [1, 1]], dtypes.int16)
740      self.assertAllEqual([[1, 1]], self.evaluate(f(ones)))
741
742  def testTensorIndexing(self):
743    with test_util.device(use_gpu=True):
744      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
745              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
746      checker = StridedSliceChecker(self, raw, check_type_infer=False)
747      bar = constant_op.constant(2)
748      bar2 = constant_op.constant(3)
749      _ = checker[..., bar:bar2]
750      _ = checker[..., bar]
751      _ = checker[..., 3]
752      _ = checker[..., 2**64 // 2**63]  # Test longs in Python 2
753
754  def testTensorIndexingTypeError(self):
755    with self.session():
756      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
757      expected = re.escape(array_ops._SLICE_TYPE_ERROR)
758      with self.assertRaisesRegex(TypeError, expected):
759        _ = checker["foo"]
760      with self.assertRaisesRegex(TypeError, expected):
761        _ = checker[constant_op.constant("foo")]
762      with self.assertRaisesRegex(TypeError, expected):
763        _ = checker[0.0]
764      with self.assertRaisesRegex(TypeError, expected):
765        _ = checker[constant_op.constant(0.0)]
766      with self.assertRaisesRegex(TypeError, expected):
767        _ = checker[constant_op.constant([1, 2, 3])]
768      with self.assertRaisesRegex(TypeError, expected):
769        _ = checker[[2.1, -0.7, 1.5]]
770
771  def testExpand(self):
772    with test_util.device(use_gpu=True):
773      raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
774              [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
775      checker = StridedSliceChecker(self, raw)
776      # new axis (followed by implicit ellipsis)
777      _ = checker[np.newaxis]
778      # newaxis after ellipsis
779      _ = checker[..., np.newaxis]
780      # newaxis in between ellipsis and explicit range
781      _ = checker[..., np.newaxis, :]
782      _ = checker[:, ..., np.newaxis, :, :]
783      # Reverse final dimension with new axis
784      _ = checker[:, :, np.newaxis, :, 2::-1]
785      # Ellipsis in middle of two newaxis
786      _ = checker[np.newaxis, ..., np.newaxis]
787
788  def testExpandVariable(self):
789    with test_util.device(use_gpu=True):
790      x = variables.Variable(7, dtype=dtypes.int32)
791      self.evaluate(x.initializer)
792      y = self.evaluate(x[None])
793      self.assertEqual(y.shape, (1,))
794      self.assertAllEqual(y, (7,))
795
796  def testOptimizedCases(self):
797    with test_util.device(use_gpu=True):
798      checker = StridedSliceChecker(self,
799                                    StridedSliceChecker.REF_TENSOR_ALIGNED)
800      # Identity
801      _ = checker[:]
802      # Identity
803      _ = checker[...]
804      # Identity
805      _ = checker[np.newaxis, ..., np.newaxis]
806      # First axis slice
807      _ = checker[1:]
808      # First axis slice
809      _ = checker[np.newaxis, 1:]
810
811  def testMasks(self):
812    with test_util.device(use_gpu=True):
813      scalar = np.array(0)
814      # Test tensor type mask
815      checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
816      _ = checker[checker.x > 2]
817      _ = checker[checker.x <= 5]
818      _ = checker[ops.convert_to_tensor(scalar)]
819
820      # Test numpy array type mask
821      raw = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
822                       [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23,
823                                                              24]]]]])
824      checker1 = StridedSliceChecker(self, raw)
825      _ = checker1[raw >= 4]
826      _ = checker1[raw < 19]
827      _ = checker1[scalar]
828
829      # Test boolean and non boolean cases
830      mask = np.array([True, False, True])
831      raw1 = np.array([[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]])
832      checker2 = StridedSliceChecker(self, raw1)
833      _ = checker2[mask]
834      _ = checker2[ops.convert_to_tensor(mask)]
835
836
837class StridedSliceShapeTest(test_util.TensorFlowTestCase):
838  """Test the shape inference of StridedSliceShapes."""
839
840  def testUnknown(self):
841    with test_util.device(use_gpu=True):
842
843      @def_function.function
844      def f(x):
845        y = x[...]
846        self.assertAllEqual(y.get_shape().ndims, None)
847
848      _ = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
849
850  def tensorShapeEqual(self, x, y):
851    self.assertTrue(x is not None and y is not None or x is None and y is None)
852    self.assertEqual(x.as_list(), y.as_list())
853
854  def testTensorShapeUncertain(self):
855    with test_util.device(use_gpu=True):
856
857      @def_function.function
858      def f1(x):
859        y = x[3:5]
860        self.tensorShapeEqual(y.get_shape(),
861                              tensor_shape.TensorShape([2, None, 7]))
862
863      _ = f1.get_concrete_function(
864          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
865
866      @def_function.function
867      def f2(x):
868        y = x[3:5, :, 4]
869        self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2,
870                                                                       None]))
871
872      _ = f2.get_concrete_function(
873          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
874
875      @def_function.function
876      def f3(x):
877        y = x[3:5, 3:4, 4]
878        self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2,
879                                                                       None]))
880
881      _ = f3.get_concrete_function(
882          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
883
884      @def_function.function
885      def f4(x):
886        y = x[3:5, :, 5:10]
887        self.tensorShapeEqual(y.get_shape(),
888                              tensor_shape.TensorShape([2, None, 2]))
889
890      _ = f4.get_concrete_function(
891          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
892
893      @def_function.function
894      def f5(x):
895        y = x[3:5, :, 50:3]
896        self.tensorShapeEqual(y.get_shape(),
897                              tensor_shape.TensorShape([2, None, 0]))
898
899      _ = f5.get_concrete_function(
900          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
901
902      @def_function.function
903      def f6(x):
904        y = x[3:5, :, array_ops.newaxis, 50:3,]
905        self.tensorShapeEqual(y.get_shape(),
906                              tensor_shape.TensorShape([2, None, 1, 0]))
907
908      _ = f6.get_concrete_function(
909          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
910
911      @def_function.function
912      def f7(x):
913        y = x[1:5:2, :, array_ops.newaxis, 50:3,]
914        self.tensorShapeEqual(y.get_shape(),
915                              tensor_shape.TensorShape([2, None, 1, 0]))
916
917      _ = f7.get_concrete_function(
918          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
919
920      @def_function.function
921      def f8(x):
922        y = x[:5:3, :, array_ops.newaxis, 50:3,]
923        self.tensorShapeEqual(y.get_shape(),
924                              tensor_shape.TensorShape([2, None, 1, 0]))
925
926      _ = f8.get_concrete_function(
927          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
928
929      @def_function.function
930      def f9(x):
931        y = x[:2:3, :, array_ops.newaxis, 50:3,]
932        self.tensorShapeEqual(y.get_shape(),
933                              tensor_shape.TensorShape([1, None, 1, 0]))
934
935      _ = f9.get_concrete_function(
936          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
937
938      @def_function.function
939      def f10(x):
940        y = x[::-1, :, array_ops.newaxis, ::-2]
941        self.tensorShapeEqual(y.get_shape(),
942                              tensor_shape.TensorShape([5, None, 1, 4]))
943
944      _ = f10.get_concrete_function(
945          tensor_spec.TensorSpec((5, None, 7), dtypes.float32))
946
947  def testTensorValuedIndexShape(self):
948    with self.session():
949
950      @def_function.function
951      def f1(x, y):
952        z = x[y]
953        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7]))
954
955      _ = f1.get_concrete_function(
956          tensor_spec.TensorSpec((5, 3, 7)),
957          tensor_spec.TensorSpec((), dtypes.int32))
958
959      @def_function.function
960      def f2(x, y):
961        z = x[y, ::-1]
962        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7]))
963
964      _ = f2.get_concrete_function(
965          tensor_spec.TensorSpec((5, 3, 7)),
966          tensor_spec.TensorSpec((), dtypes.int32))
967
968      @def_function.function
969      def f3(x, y):
970        z = x[y, ::-2]
971        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([2, 7]))
972
973      _ = f3.get_concrete_function(
974          tensor_spec.TensorSpec((5, 3, 7)),
975          tensor_spec.TensorSpec((), dtypes.int32))
976
977      @def_function.function
978      def f4(x, y, s):
979        z = x[y, s:2]
980        self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([None,
981                                                                       7]))
982
983      _ = f4.get_concrete_function(
984          tensor_spec.TensorSpec((5, 3, 7)),
985          tensor_spec.TensorSpec((), dtypes.int32),
986          tensor_spec.TensorSpec((), dtypes.int32))
987
988
989class GradSliceChecker(object):
990  """Tests that we can compute a gradient for var^2."""
991
992  def __init__(self, test, var, varnp, use_tape):
993    self.test = test
994    self.var = var
995    self.varnp = varnp
996    self.use_tape = use_tape
997
998  def __getitem__(self, spec):
999    with test_util.AbstractGradientTape(
1000        use_tape=self.use_tape, persistent=True) as tape:
1001      tape.watch(self.var)
1002      val = self.var * self.var
1003      slice_var = self.var[spec]
1004      slice_val = val[spec]
1005
1006      # compute analytic 2nd derivative
1007      analytic_grad2 = 2 * slice_val
1008
1009      dy = variables.Variable(
1010          array_ops.ones_like(slice_var, dtype=dtypes.float32))
1011      assign = dy.assign(slice_var)
1012
1013      slice_val_grad = tape.gradient(slice_val, self.var, [dy])
1014      slice_val_grad2 = tape.gradient(slice_val_grad, dy, [self.var])
1015    self.test.evaluate(assign)
1016    slice_val_grad_evaled, slice_val_grad2_evaled = (
1017        self.test.evaluate([slice_val_grad, slice_val_grad2]))
1018    analytic_grad2_evaled = self.test.evaluate(analytic_grad2)
1019    self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
1020
1021    # compute analytic gradient for slice
1022    np_val_grad = (2 * self.varnp * self.varnp)
1023    np_sliceval_grad = np.zeros(self.var.get_shape())
1024    if isinstance(spec, ops.Tensor):
1025      spec = self.test.evaluate([spec])
1026    np_sliceval_grad[spec] = np_val_grad[spec]
1027    # verify gradient
1028    self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
1029
1030
1031class StridedSliceGradTest(test_util.TensorFlowTestCase,
1032                           parameterized.TestCase):
1033  """Test that strided slice's custom gradient produces correct gradients."""
1034
1035  @parameterized.parameters(set((True, context.executing_eagerly())))
1036  def testGradient(self, use_tape):
1037    with test_util.device(use_gpu=True):
1038      var = variables.Variable(
1039          array_ops.reshape(
1040              math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
1041      self.evaluate(var.initializer)
1042
1043      raw = np.array(range(1, 97, 1)).reshape((6, 4, 4))
1044      grad = GradSliceChecker(self, var, raw, use_tape)
1045      _ = grad[2:6:2, 1:3, 1:3]
1046      _ = grad[3:0:-2, 1:3, 1:3]
1047      _ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
1048      _ = grad[3:0:-2, 1:3, 2]
1049      _ = grad[:, -1, :]
1050      _ = grad[:, -2, :]
1051      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
1052                                  "out of bounds"):
1053        _ = grad[:, -200, :]
1054      with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
1055                                  "out of bounds"):
1056        _ = grad[:, 200, :]
1057
1058      # Test numpy array type mask
1059      _ = grad[raw > 51]
1060      # Test tensor type mask
1061      _ = grad[ops.convert_to_tensor(raw) <= 76]
1062
1063  @parameterized.parameters(set((True, context.executing_eagerly())))
1064  def testGradientZero(self, use_tape):
1065    with test_util.device(use_gpu=True):
1066      var = variables.Variable(8.)
1067      self.evaluate(var.initializer)
1068      grad = GradSliceChecker(self, var, np.array(8), use_tape)
1069      _ = grad[tuple()]
1070
1071  @parameterized.parameters(set((True, context.executing_eagerly())))
1072  def testInt64Indices(self, use_tape):
1073    with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
1074      a = math_ops.range(3, dtype=dtypes.float32)
1075      tape.watch(a)
1076      index = constant_op.constant(1, dtype=dtypes.int64)
1077      b = 2. * a[index]
1078    grad = tape.gradient(b, a)
1079    self.assertAllEqual(self.evaluate(grad), [0., 2., 0.])
1080
1081
1082class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
1083  """Test varied index types and host located memory."""
1084
1085  def testHostVsDevice(self):
1086    var2 = variables.Variable(
1087        array_ops.reshape(
1088            math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
1089            shape=(4, 1, 1)))
1090    varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
1091    begin = constant_op.constant([0, 0, 0])
1092    end = constant_op.constant([4, 1, 1])
1093    strides = constant_op.constant([1, 1, 1])
1094    foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
1095    self.evaluate(var2.initializer)
1096    self.evaluate(varshape.initializer)
1097    self.evaluate(foo)
1098
1099  def testInt64Shape(self):
1100    original_dy = array_ops.reshape(
1101        math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1))
1102    original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
1103    begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
1104    end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
1105    strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
1106    dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
1107                                      original_dy)
1108    self.evaluate(dx)
1109
1110  def testMixedIndexTypes(self):
1111    original_dy = array_ops.reshape(
1112        math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1))
1113    original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
1114    begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
1115    end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
1116    strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
1117    with self.assertRaises((TypeError, errors_impl.InvalidArgumentError)):
1118      dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
1119                                        original_dy)
1120      self.evaluate(dx)
1121
1122
1123class BenchmarkSlice(object):
1124
1125  def __init__(self, tensor):
1126    self.tensor = tensor
1127
1128  def __getitem__(self, x):
1129    return self.tensor[x]
1130
1131
1132class StridedSliceBenchmark(test_lib.Benchmark):
1133  """Benchmark new strided slice operation on non-trivial case."""
1134
1135  def run_and_time(self, slice_op):
1136    self.evaluate(variables.global_variables_initializer())
1137    for _ in range(10):
1138      _ = self.evaluate(slice_op)
1139    iters = 1000
1140    t0 = time.time()
1141    for _ in range(iters):
1142      self.evaluate(slice_op)
1143    t1 = time.time()
1144    self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
1145
1146  def make_variable(self):
1147    n = 256
1148    shape = (n, n, n)
1149    items = n**3
1150    var = variables.Variable(
1151        array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
1152        dtype=dtypes.float32)
1153    return var
1154
1155  def benchmark_strided_slice_skip(self):
1156    with session.Session():
1157      var = self.make_variable()
1158      helper = BenchmarkSlice(var)
1159      slice_op = helper[::2, ::1, ::2]
1160      self.run_and_time(slice_op)
1161
1162  def benchmark_strided_slice_easy(self):
1163    with session.Session():
1164      var = self.make_variable()
1165      helper = BenchmarkSlice(var)
1166      slice_op = helper[3::1, 3::1, 3::1]
1167      self.run_and_time(slice_op)
1168
1169  def benchmark_slice_easy(self):
1170    with session.Session():
1171      var = self.make_variable()
1172      slice_op = var[3::1, 3::1, 3::1]
1173      self.run_and_time(slice_op)
1174
1175
1176class StridedSliceAssignChecker(object):
1177
1178  def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False):
1179    self.tensor_type = tensor_type
1180    self.test = test
1181    self._use_resource = use_resource
1182
1183    self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
1184    # Give the value a non-zero imaginary component for complex types.
1185    if tensor_type.is_complex:
1186      self.x_np -= 1j * self.x_np
1187    self.x = constant_op.constant(self.x_np, dtype=tensor_type)
1188
1189  def __setitem__(self, index, value):
1190    value = np.array(value).astype(self.tensor_type.as_numpy_dtype)
1191    # Give the value a non-zero imaginary component for complex types.
1192    if self.tensor_type.is_complex:
1193      value -= 1j * value
1194
1195    with test_util.device(use_gpu=True):
1196      if self._use_resource:
1197        var = resource_variable_ops.ResourceVariable(self.x)
1198      else:
1199        var = variables.Variable(self.x)
1200      self.test.evaluate(var.initializer)
1201      val = self.test.evaluate(var[index].assign(value))
1202      # val_copy is used to check that tf.compat.v1.assign works equivalently
1203      # to the assign method above.
1204      val_copy = self.test.evaluate(state_ops.assign(var[index], value))
1205      valnp = np.copy(self.x_np)
1206      valnp[index] = np.array(value)
1207      self.test.assertAllEqual(val, valnp)
1208      self.test.assertAllEqual(val_copy, valnp)
1209
1210
1211class SliceAssignTest(test_util.TensorFlowTestCase, parameterized.TestCase):
1212
1213  def testInvalidSlice(self):
1214    foo = constant_op.constant([1, 2, 3])
1215    with self.assertRaisesRegex(AttributeError, "no attribute 'assign'"):
1216      bar = foo[:2].assign(constant_op.constant([1, 2]))
1217      self.evaluate(bar)
1218
1219  def doTestSliceAssign(self, use_resource):
1220    for dtype in STRIDED_SLICE_TYPES:
1221      with self.subTest(dtype=dtype):
1222        checker = StridedSliceAssignChecker(
1223            self, [[1, 2, 3], [4, 5, 6]],
1224            use_resource=use_resource,
1225            tensor_type=dtype)
1226        # Check if equal
1227        checker[:] = [[10, 20, 30], [40, 50, 60]]
1228        # Check trivial (1,1) shape tensor
1229        checker[1:2, 1:2] = [[66]]
1230        # shrinks shape changes
1231        checker[1:2, 1] = [66]
1232        checker[1, 1:2] = [66]
1233        checker[1, 1] = 66
1234        # newaxis shape changes
1235        checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
1236        # shrink and newaxis
1237        checker[None, None, 0, 0:1] = [[[99]]]
1238        # Non unit strides
1239        checker[::1, ::-2] = [[3, 33], [4, 44]]
1240        # degenerate interval
1241        checker[8:10, 0] = []
1242        checker[8:10, 8:10] = [[]]
1243    # Assign vector to scalar (rank-0) using newaxis
1244    checker2 = StridedSliceAssignChecker(self, 222)
1245    checker2[()] = 6  # no indices
1246    checker2[...] = 6  # ellipsis
1247    checker2[None] = [6]  # new axis
1248
1249  @test_util.disable_xla("b/123559667")
1250  def testSliceAssign(self):
1251    self.doTestSliceAssign(use_resource=False)
1252
1253  @test_util.disable_xla("b/123559667")
1254  def testSliceAssignResource(self):
1255    self.doTestSliceAssign(use_resource=True)
1256
1257  def testTypeError(self):
1258    init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
1259    too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
1260    too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
1261    v = variables.VariableV1(init_val)
1262    with self.assertRaises((ValueError, TypeError)):
1263      self.evaluate(v[:].assign(too_small_val))
1264    with self.assertRaises((ValueError, TypeError)):
1265      self.evaluate(v[:].assign(too_large_val))
1266
1267  def testTypeErrorResource(self):
1268    init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
1269    too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
1270    too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
1271    v = resource_variable_ops.ResourceVariable(init_val)
1272    self.evaluate(v.initializer)
1273    with self.assertRaises(ValueError):
1274      self.evaluate(v[:].assign(too_large_val))
1275    with self.assertRaises(ValueError):
1276      self.evaluate(v[:].assign(too_small_val))
1277
1278  @test_util.disable_xla("b/123559667")
1279  @test_util.run_in_graph_and_eager_modes
1280  def testTensorStridedSliceUpdateWithInputForward(self):
1281    """Tests tensor_strided_slice_update with input-forwarding taking effect."""
1282    @def_function.function
1283    def assign(x):
1284      y = x + 1
1285      return gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0])
1286    self.assertAllEqual([0, 1], self.evaluate(assign(array_ops.zeros([2]))))
1287
1288  @test_util.disable_xla("b/123559667")
1289  @test_util.run_in_graph_and_eager_modes
1290  def testTensorStridedSliceUpdateNoInputForward(self):
1291    """Tests tensor_strided_slice_update with no input-forwarding."""
1292    x = constant_op.constant([0.2, 0.3])
1293    y = x + 1
1294    # y's buffer won't be forwarded to z because y and z will be alive at the
1295    # same time later.
1296    z = gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0.4])
1297    ans = y + z
1298    self.assertAllClose([1.6, 2.6], self.evaluate(ans))
1299
1300  @test_util.disable_xla("b/123559667")
1301  def testTensorStridedSliceUpdateGradSimple(self):
1302    original = constant_op.constant([0.2, 0.3])
1303    updates = constant_op.constant([0.4])
1304    with backprop.GradientTape() as tape:
1305      tape.watch([original, updates])
1306      updated = gen_array_ops.tensor_strided_slice_update(
1307          original, [0], [1], [1], updates)
1308    d1, d2 = tape.gradient(updated, [original, updates],
1309                           output_gradients=constant_op.constant([2.0, 3.0]))
1310    self.assertAllClose([0.0, 3.0], d1)
1311    self.assertAllClose([2.0], d2)
1312
1313  @parameterized.named_parameters(
1314      ("_%s" % i, *args) for i, args in enumerate([  # pylint:disable=g-complex-comprehension
1315          ([2, 5], [0, 1], [1, 0], [1, 2], [2], 0, 2, 0, 0, 1),
1316          ([4], [5], [3], [1], [3], 1, 0, 0, 0, 0),
1317          ([2, 2, 3, 2], [0, 0, 1], [1, 0, 2], [1, 0, 1], [2, 3], 0, 0, 2, 0, 5)
1318      ]))
1319  @test_util.disable_xla("b/123559667")
1320  def testTensorStridedSliceUpdateGrad(
1321      self, shape, begin, end, strides, updates_shape, *args):
1322    with self.cached_session():
1323      def f(a, b):
1324        return gen_array_ops.tensor_strided_slice_update(
1325            a, begin, end, strides, b, *args)
1326      theoretical, numerical = gradient_checker_v2.compute_gradient(
1327          f, [array_ops.zeros(shape), array_ops.ones(updates_shape)], delta=1.0)
1328      self.assertAllClose(theoretical, numerical)
1329
1330
1331class ShapeSizeRankTest(test_util.TensorFlowTestCase):
1332
1333  @test_util.run_in_graph_and_eager_modes
1334  def testDenseShape(self):
1335    t_value = [[0, 42], [24, 0]]
1336    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
1337    self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
1338    self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
1339
1340    t = constant_op.constant(t_value)
1341    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
1342    self.assertEqual(4, self.evaluate(array_ops.size(t)))
1343    self.assertEqual(2, self.evaluate(array_ops.rank(t)))
1344
1345  @test_util.run_in_graph_and_eager_modes
1346  def testSparseShape(self):
1347    sp_value = sparse_tensor.SparseTensorValue(
1348        indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
1349    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
1350    self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
1351    self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
1352
1353    sp = sparse_tensor.SparseTensor.from_value(sp_value)
1354    self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
1355    self.assertEqual(4, self.evaluate(array_ops.size(sp)))
1356    self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
1357
1358  @test_util.run_in_graph_and_eager_modes
1359  def testSizeDtype(self):
1360    tensor = [1]
1361    self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
1362    self.assertEqual(
1363        dtypes.int64,
1364        self.evaluate(array_ops.size(tensor, out_type=dtypes.int64)).dtype)
1365
1366
1367class SequenceMaskTest(test_util.TensorFlowTestCase):
1368
1369  def testExceptions(self):
1370    with self.cached_session():
1371      with self.assertRaisesRegex(ValueError, "maxlen must be scalar"):
1372        array_ops.sequence_mask([10, 20], [10, 20])
1373
1374  def testOneDimensionalWithMaxlen(self):
1375    res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
1376    self.assertAllEqual(res.get_shape(), [3, 5])
1377    self.assertAllEqual(
1378        res,
1379        [[True, False, False, False, False], [True, True, True, False, False],
1380         [True, True, False, False, False]])
1381
1382  def testOneDimensionalDtypeWithoutMaxlen(self):
1383    # test dtype and default maxlen:
1384    res = array_ops.sequence_mask(
1385        constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
1386    self.assertAllEqual(res.get_shape().as_list(), [3, 4])
1387    self.assertAllEqual(
1388        res, [[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
1389
1390  def testOneDimensionalWithoutMaxlen(self):
1391    res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]))
1392    self.assertAllEqual(res.get_shape().as_list(), [3, 4])
1393    self.assertAllEqual(res,
1394                        [[False, False, False, False],
1395                         [True, False, False, False], [True, True, True, True]])
1396
1397  def testTwoDimensional(self):
1398    res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
1399    self.assertAllEqual(res.get_shape(), [1, 3, 5])
1400    self.assertAllEqual(
1401        res,
1402        [[[True, False, False, False, False], [True, True, True, False, False],
1403          [True, True, False, False, False]]])
1404
1405    # test dtype and default maxlen:
1406    res = array_ops.sequence_mask(
1407        constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
1408    self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
1409    self.assertAllEqual(
1410        res,
1411        [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
1412         [[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
1413
1414  def testDtypes(self):
1415
1416    def check_dtypes(lengths_dtype, maxlen_dtype):
1417      res = array_ops.sequence_mask(
1418          constant_op.constant([1, 3, 2], dtype=lengths_dtype),
1419          constant_op.constant(5, dtype=maxlen_dtype))
1420      self.assertAllEqual(res.get_shape(), [3, 5])
1421      self.assertAllEqual(
1422          res,
1423          [[True, False, False, False, False], [True, True, True, False, False],
1424           [True, True, False, False, False]])
1425
1426    check_dtypes(dtypes.int32, dtypes.int32)
1427    check_dtypes(dtypes.int32, dtypes.int64)
1428    check_dtypes(dtypes.int64, dtypes.int32)
1429    check_dtypes(dtypes.int64, dtypes.int64)
1430
1431  def testOutputDtype(self):
1432
1433    def check_output_dtype(output_dtype):
1434      res = self.evaluate(
1435          array_ops.sequence_mask(
1436              constant_op.constant([1, 3, 2], dtype=dtypes.int32),
1437              constant_op.constant(5, dtype=dtypes.int32),
1438              dtype=output_dtype))
1439      self.assertAllEqual(
1440          res,
1441          self.evaluate(
1442              math_ops.cast([[True, False, False, False, False],
1443                             [True, True, True, False, False],
1444                             [True, True, False, False, False]], output_dtype)))
1445
1446    check_output_dtype(dtypes.bool)
1447    check_output_dtype("bool")
1448    check_output_dtype(np.bool_)
1449    check_output_dtype(dtypes.int32)
1450    check_output_dtype("int32")
1451    check_output_dtype(np.int32)
1452    check_output_dtype(dtypes.float32)
1453    check_output_dtype("float32")
1454    check_output_dtype(np.float32)
1455    check_output_dtype(dtypes.int64)
1456    check_output_dtype("float64")
1457    check_output_dtype(np.float64)
1458
1459
1460class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
1461
1462  @test_util.run_in_graph_and_eager_modes
1463  def testConcatSlice(self):
1464    r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
1465    r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
1466    c = array_ops.stack([r1, r2])
1467    s = array_ops.strided_slice(c, [1], [2])
1468    self.evaluate(test_ops.resource_create_op(s))
1469    with self.assertRaises(errors.AlreadyExistsError):
1470      self.evaluate(test_ops.resource_create_op(r2))
1471
1472
1473class IdentityTest(test_util.TensorFlowTestCase):
1474
1475  @test_util.run_gpu_only
1476  def testEagerIdentity(self):
1477    with context.eager_mode():
1478
1479      def _test(x, y, device):
1480        self.assertAllEqual(x.numpy(), y.numpy())
1481        self.assertTrue(device in y.device.lower())
1482
1483      with test_util.force_gpu():
1484        a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
1485      with test_util.force_gpu():
1486        b = array_ops.identity(a)
1487        _test(a, b, "gpu")
1488      with test_util.force_cpu():
1489        c = array_ops.identity(b)
1490        _test(b, c, "cpu")
1491      with test_util.force_cpu():
1492        d = array_ops.identity(c)
1493        _test(c, d, "cpu")
1494      with test_util.force_gpu():
1495        e = array_ops.identity(d)
1496        _test(d, e, "gpu")
1497
1498
1499class PadTest(test_util.TensorFlowTestCase):
1500
1501  def testEager(self):
1502    with context.eager_mode():
1503      t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
1504      paddings = constant_op.constant([[
1505          1,
1506          1,
1507      ], [2, 2]])
1508      padded = array_ops.pad(t, paddings, "CONSTANT")
1509      self.assertAllEqual(padded.numpy(),
1510                          [[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
1511                           [0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
1512
1513  def testSymmetricMirrorPadGrad(self):
1514    t = np.broadcast_to(np.arange(0, 7), (3, 2, 1, 7))
1515    paddings = constant_op.constant([
1516        [1, 1],
1517        [0, 0],
1518        [0, 0],
1519        [2, 2],
1520    ])
1521    expected = np.broadcast_to(np.array([9, 27, 27]), (1, 2, 1, 3))
1522    result = gen_array_ops.mirror_pad_grad(t, paddings, "SYMMETRIC")
1523    self.assertAllEqual(result, expected)
1524
1525  def testReflectMirrorPadGrad(self):
1526    t = np.broadcast_to(np.reshape(np.arange(0, 7), (7, 1)), (1, 4, 7, 1))
1527    paddings = constant_op.constant([
1528        [0, 0],
1529        [1, 1],
1530        [2, 2],
1531        [0, 0],
1532    ])
1533    expected = np.broadcast_to(
1534        np.reshape(np.array([16, 18, 8]), (3, 1)), (1, 2, 3, 1))
1535    result = gen_array_ops.mirror_pad_grad(t, paddings, "REFLECT")
1536    self.assertAllEqual(result, expected)
1537
1538
1539class InvertPermutationTest(test_util.TensorFlowTestCase):
1540
1541  def testInvertPermutation(self):
1542    for dtype in [dtypes.int32, dtypes.int64]:
1543      with self.subTest(dtype=dtype, use_gpu=True):
1544        x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
1545        y = array_ops.invert_permutation(x)
1546        self.assertAllEqual(y.get_shape(), [5])
1547        self.assertAllEqual(y, [2, 4, 3, 0, 1])
1548
1549
1550class UnravelIndexTest(test_util.TensorFlowTestCase):
1551
1552  # TODO(b/73086570): Reenable test.
1553  @unittest.skip("Test does not pass internally.")
1554  def testUnravelIndex(self):
1555    with self.cached_session():
1556      for dtype in [dtypes.int32, dtypes.int64]:
1557        with self.subTest(dtype=dtype):
1558          indices_1 = constant_op.constant(1621, dtype=dtype)
1559          dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
1560          out_1 = array_ops.unravel_index(indices_1, dims_1)
1561          self.assertAllEqual(out_1, [3, 1, 4, 1])
1562
1563          indices_2 = constant_op.constant([1621], dtype=dtype)
1564          dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
1565          out_2 = array_ops.unravel_index(indices_2, dims_2)
1566          self.assertAllEqual(out_2, [[3], [1], [4], [1]])
1567
1568          indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
1569          dims_3 = constant_op.constant([7, 6], dtype=dtype)
1570          out_3 = array_ops.unravel_index(indices_3, dims_3)
1571          self.assertAllEqual(out_3, [[3, 6, 6], [4, 5, 1]])
1572
1573  # Test case for GitHub issue 40204.
1574  def testUnravelIndexZeroDim(self):
1575    with self.cached_session():
1576      for dtype in [dtypes.int32, dtypes.int64]:
1577        with self.assertRaisesRegex(errors.InvalidArgumentError,
1578                                    "dims cannot contain a dim of zero"):
1579          indices = constant_op.constant([2, 5, 7], dtype=dtype)
1580          dims = constant_op.constant([3, 0], dtype=dtype)
1581          self.evaluate(array_ops.unravel_index(indices=indices, dims=dims))
1582
1583
1584class GuaranteeConstOpTest(test_util.TensorFlowTestCase):
1585
1586  def testSimple(self):
1587    a = array_ops.constant(10)
1588    guarantee_a = array_ops.guarantee_const(a)
1589    self.assertEqual(10, self.evaluate(guarantee_a))
1590
1591  def testVariables(self):
1592    for use_resource in [False, True]:
1593      with self.subTest(use_resource=use_resource):
1594        a = variable_scope.get_variable(
1595            "var_{}".format(use_resource), [],
1596            initializer=init_ops.constant_initializer(10.0),
1597            use_resource=use_resource)
1598        guarantee_a = array_ops.guarantee_const(a)
1599        self.evaluate(a.initializer)
1600        self.assertEqual(10.0, self.evaluate(guarantee_a))
1601
1602  def testResourceRejection(self):
1603    with ops.device("/cpu:0"):
1604      a = variable_scope.get_variable(
1605          "resource_var", [],
1606          initializer=init_ops.constant_initializer(10.0),
1607          use_resource=True)
1608    with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
1609                                             "cannot be a resource variable"):
1610      guarantee_a = array_ops.guarantee_const(a.handle)
1611      self.evaluate(a.initializer)
1612      self.evaluate(guarantee_a)
1613
1614
1615class SnapshotOpTest(test_util.TensorFlowTestCase):
1616
1617  def testInvertPermutation(self):
1618    for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
1619      with self.subTest(dtype=dtype, use_gpu=True):
1620        x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
1621        y = gen_array_ops.snapshot(x)
1622        self.assertAllEqual(y, [0, 1, 2, 3])
1623
1624
1625@test_util.run_all_in_graph_and_eager_modes
1626class QuantizeAndDequantizeTest(test_util.TensorFlowTestCase):
1627
1628  # Generates a tensor of the specified `shape` using values from `values`
1629  # scaled by (slice_idx + 1) along `axis` dimension.
1630  def _scale_per_slice(self, shape, axis, values):
1631    # Note: repeats the values if the shape is larger than values.
1632    out = np.take(values, np.remainder(np.arange(np.prod(shape)),
1633                                       len(values))).reshape(shape)
1634    if axis is not None:
1635      scale_shape = [1] * len(shape)
1636      scale_shape[axis] = shape[axis]
1637      out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
1638    return out
1639
1640  def testAxis(self):
1641    shape = np.array([2, 3, 4, 5])
1642    values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)
1643    quant_values = np.array(
1644        [-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5],
1645        dtype=np.float32)
1646    for axis in [None, 0, 1, 2, 3]:
1647      with self.subTest(axis=axis):
1648        inputs = constant_op.constant(
1649            self._scale_per_slice(shape, axis, values))
1650        expected = self._scale_per_slice(shape, axis, quant_values)
1651        unused_minmax_value = 0 if axis is None else [0] * shape[axis]
1652        fake_quantized = self.evaluate(
1653            array_ops.quantize_and_dequantize_v2(
1654                inputs,
1655                unused_minmax_value,
1656                unused_minmax_value,
1657                range_given=False,
1658                round_mode="HALF_UP",
1659                axis=axis))
1660        self.assertAllEqual(fake_quantized, expected)
1661        if axis is not None:
1662          fake_quantized = self.evaluate(
1663              array_ops.quantize_and_dequantize_v2(
1664                  inputs,
1665                  unused_minmax_value,
1666                  unused_minmax_value,
1667                  range_given=False,
1668                  axis=(axis - 4)))
1669          self.assertAllClose(fake_quantized, expected)
1670
1671  def testBadAxis(self):
1672    input_tensor = [2.5, 2.5]
1673    input_min = [0, 0]
1674    input_max = [1, 1]
1675    error_message_pattern = "Shape must be at least rank 11 but is rank 1"
1676    # TODO(b/171260356): Eager mode and graph mode throw different error types
1677    error = errors.InvalidArgumentError if context.executing_eagerly(
1678    ) else ValueError
1679    with self.assertRaisesRegex(error, error_message_pattern):
1680      self.evaluate(
1681          array_ops.quantize_and_dequantize_v2(
1682              input=input_tensor,
1683              input_min=input_min,
1684              input_max=input_max,
1685              axis=10))
1686
1687  def testQuantizeDequantizeGrad(self):
1688    shape = (2, 2)
1689    max_threshold = 0
1690    min_threshold = -10
1691    input_value = np.random.rand(2, 2) * 40.0 - 20.0
1692    input_tensor = constant_op.constant(input_value, shape=shape,
1693                                        name="input_tensor")
1694    with self.cached_session():
1695      def f(a):
1696        return array_ops.quantize_and_dequantize_v2(
1697            a,
1698            input_min=min_threshold,
1699            input_max=max_threshold,
1700            range_given=True)
1701      output_grad = gradient_checker_v2.compute_gradient(f, [input_tensor])
1702      self.assertAllClose(output_grad[0], np.zeros([1, 4, 4]))
1703
1704
1705@test_util.run_all_in_graph_and_eager_modes
1706class SortedSearchTest(test_util.TensorFlowTestCase):
1707
1708  def testUpperBoundFloatHandCoded(self):
1709    cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
1710    arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
1711                   dtype=np.float32)
1712    result = np.searchsorted(cdf, arr, side="right")
1713    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1714    self.assertAllEqual(result, tf_result)
1715
1716  def testUpperBoundFloatRandomNd(self):
1717    dim_size = 7
1718    for d in range(1, 5):
1719      shape = [dim_size] * d
1720      cdf = np.cumsum(
1721          np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
1722      arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
1723
1724      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1725
1726      cdf = cdf.reshape([-1, dim_size])
1727      arr = arr.reshape([-1, dim_size])
1728      result = np.zeros(arr.shape, dtype=np.int32)
1729      for i in range(dim_size**(d - 1)):
1730        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1731
1732      result = result.reshape(shape)
1733
1734      self.assertAllEqual(result, tf_result)
1735
1736  def testUpperBoundFloatUneven(self):
1737    batch_size = 7
1738    size_search_array = 1000
1739    size_values = 47
1740    cdf = np.cumsum(
1741        np.random.uniform(size=[batch_size, size_search_array]).astype(
1742            np.float32),
1743        axis=1)
1744    arr = np.random.uniform(size=[batch_size, size_values]).astype(
1745        np.float32) * size_search_array
1746
1747    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1748
1749    result = np.zeros(arr.shape, dtype=np.int32)
1750    for i in range(batch_size):
1751      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1752
1753    self.assertAllEqual(result, tf_result)
1754
1755  def testLowerBoundFloatHandCoded(self):
1756    cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
1757    arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
1758                   dtype=np.float32)
1759    result = np.searchsorted(cdf, arr, side="left")
1760    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1761    self.assertAllEqual(result, tf_result)
1762
1763  def testLowerBoundFloatRandomNd(self):
1764    dim_size = 7
1765    for d in range(1, 5):
1766      shape = [dim_size] * d
1767      cdf = np.cumsum(
1768          np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
1769      arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
1770
1771      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1772
1773      cdf = cdf.reshape([-1, dim_size])
1774      arr = arr.reshape([-1, dim_size])
1775      result = np.zeros(arr.shape, dtype=np.int32)
1776      for i in range(dim_size**(d - 1)):
1777        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1778
1779      result = result.reshape(shape)
1780
1781      self.assertAllEqual(result, tf_result)
1782
1783  def testLowerBoundFloatUneven(self):
1784    batch_size = 7
1785    size_search_array = 1000
1786    size_values = 47
1787    cdf = np.cumsum(
1788        np.random.uniform(size=[batch_size, size_search_array]).astype(
1789            np.float32),
1790        axis=1)
1791    arr = np.random.uniform(size=[batch_size, size_values]).astype(
1792        np.float32) * size_search_array
1793
1794    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1795
1796    result = np.zeros(arr.shape, dtype=np.int32)
1797    for i in range(batch_size):
1798      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1799
1800    self.assertAllEqual(result, tf_result)
1801
1802  def testUpperBoundIntHandCoded(self):
1803    cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
1804    arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
1805    result = np.searchsorted(cdf, arr, side="right")
1806    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1807    self.assertAllEqual(result, tf_result)
1808
1809  def testUpperBoundIntRandomNd(self):
1810    dim_size = 7
1811    for d in range(1, 5):
1812      shape = [dim_size] * d
1813      cdf = np.cumsum(
1814          np.random.randint(low=0, high=10, size=shape).astype(np.int64),
1815          axis=(d - 1))
1816      arr = np.random.randint(
1817          low=0, high=10 * dim_size, size=shape).astype(np.int64)
1818
1819      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1820
1821      cdf = cdf.reshape([-1, dim_size])
1822      arr = arr.reshape([-1, dim_size])
1823      result = np.zeros(arr.shape, dtype=np.int32)
1824      for i in range(dim_size**(d - 1)):
1825        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1826
1827      result = result.reshape(shape)
1828
1829      self.assertAllEqual(result, tf_result)
1830
1831  def testUpperBoundIntUneven(self):
1832    batch_size = 7
1833    size_search_array = 1000
1834    size_values = 47
1835    cdf = np.cumsum(
1836        np.random.randint(low=0, high=10,
1837                          size=[batch_size,
1838                                size_search_array]).astype(np.int64),
1839        axis=1)
1840    arr = np.random.randint(
1841        low=0, high=10 * size_search_array, size=[batch_size,
1842                                                  size_values]).astype(np.int64)
1843
1844    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
1845
1846    result = np.zeros(arr.shape, dtype=np.int32)
1847    for i in range(batch_size):
1848      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
1849
1850    self.assertAllEqual(result, tf_result)
1851
1852  def testLowerBoundIntHandCoded(self):
1853    cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
1854    arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
1855    result = np.searchsorted(cdf, arr, side="left")
1856    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1857    self.assertAllEqual(result, tf_result)
1858
1859  def testLowerBoundIntRandomNd(self):
1860    dim_size = 7
1861    for d in range(1, 5):
1862      shape = [dim_size] * d
1863      cdf = np.cumsum(
1864          np.random.randint(low=0, high=10, size=shape).astype(np.int64),
1865          axis=(d - 1))
1866      arr = np.random.randint(
1867          low=0, high=10 * dim_size, size=shape).astype(np.int64)
1868
1869      tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1870
1871      cdf = cdf.reshape([-1, dim_size])
1872      arr = arr.reshape([-1, dim_size])
1873      result = np.zeros(arr.shape, dtype=np.int32)
1874      for i in range(dim_size**(d - 1)):
1875        result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1876
1877      result = result.reshape(shape)
1878
1879      self.assertAllEqual(result, tf_result)
1880
1881  def testLowerBoundIntUneven(self):
1882    batch_size = 7
1883    size_search_array = 1000
1884    size_values = 47
1885    cdf = np.cumsum(
1886        np.random.randint(low=0, high=10,
1887                          size=[batch_size,
1888                                size_search_array]).astype(np.int64),
1889        axis=1)
1890    arr = np.random.randint(
1891        low=0, high=10 * size_search_array, size=[batch_size,
1892                                                  size_values]).astype(np.int64)
1893
1894    tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
1895
1896    result = np.zeros(arr.shape, dtype=np.int32)
1897    for i in range(batch_size):
1898      result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
1899
1900    self.assertAllEqual(result, tf_result)
1901
1902  def testZeroSequenceSize(self):
1903    dtype = dtypes.int32
1904    for side in ("left", "right"):
1905      with self.subTest(side=side):
1906        self.assertAllEqual(
1907            array_ops.searchsorted(
1908                array_ops.ones([2, 0]),
1909                array_ops.ones([2, 3]),
1910                side=side,
1911                out_type=dtype), array_ops.zeros([2, 3], dtype))
1912
1913  def testZeroValueSize(self):
1914    dtype = dtypes.int32
1915    for side in ("left", "right"):
1916      with self.subTest(side=side):
1917        self.assertAllEqual(
1918            array_ops.searchsorted(
1919                array_ops.ones([2, 3]),
1920                array_ops.ones([2, 0]),
1921                side=side,
1922                out_type=dtype), array_ops.zeros([2, 0], dtype))
1923
1924
1925class BatchGatherNdTest(test_util.TensorFlowTestCase):
1926
1927  def testShapesMatch(self):
1928    """Tests for various different shape combinations."""
1929    shapes = []
1930    # params_shape, indices_shape, batch_dims
1931    shapes.append(((2, 2, 2), (2, 1), 1),)
1932    shapes.append(((2, 2, 2), (2, 2), 1),)
1933    shapes.append(((2, 2, 2), (2, 3), 0),)
1934    shapes.append(((2, 2, 2), (3,), 0),)
1935    shapes.append(((2, 2, 2), (1,), 0),)
1936    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
1937    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
1938    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
1939    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
1940    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
1941    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
1942    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
1943    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
1944    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
1945    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
1946    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
1947    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
1948
1949    for params_shape, indices_shape, batch_dims in shapes:
1950      with self.subTest(
1951          params_shape=params_shape,
1952          indices_shape=indices_shape,
1953          batch_dims=batch_dims):
1954        params = constant_op.constant(1.0, shape=(params_shape))
1955        indices = constant_op.constant(
1956            1, shape=(indices_shape), dtype=dtypes.int32)
1957        out = array_ops.batch_gather_nd(
1958            params=params, indices=indices, batch_dims=batch_dims)
1959        ndims_params = len(params_shape) - batch_dims
1960        ndims_rows = ndims_params - indices_shape[-1]
1961        expected_out_shape = indices_shape[:-1]
1962        if ndims_rows > 0:
1963          expected_out_shape += params_shape[-ndims_rows:]
1964        self.assertSequenceEqual(out.shape, expected_out_shape)
1965
1966  def testReducesToGatherNDWhenBatchDimIsZero(self):
1967    """Confirms setting batch_dims to zero reduces to tf.gather_nd."""
1968    params = constant_op.constant(np.random.uniform(0.0, 1.0, size=(7, 8, 9)))
1969    indices_shapes = []
1970    indices_shapes.append((1,))
1971    indices_shapes.append((3, 1))
1972    indices_shapes.append((3, 3, 1))
1973    indices_shapes.append((2,))
1974    indices_shapes.append((3, 2))
1975    indices_shapes.append((3, 3, 2))
1976    indices_shapes.append((3,))
1977    indices_shapes.append((3, 3))
1978    indices_shapes.append((3, 3, 3))
1979
1980    for indices_shape in indices_shapes:
1981      with self.subTest(indices_shape=indices_shape):
1982        indices = np.random.randint(0, 7, size=indices_shape)
1983        gather_nd_result = gen_array_ops.gather_nd(params, indices)
1984        batch_gather_nd_result = array_ops.batch_gather_nd(
1985            params=params, indices=indices, batch_dims=0)
1986        self.assertAllEqual(gather_nd_result, batch_gather_nd_result)
1987
1988  def testSameResultAsMapFn(self):
1989    """Compares results with gather_nd called on every element with map_fn."""
1990    shapes = []
1991    # params_shape, indices_shape, batch_dims
1992    shapes.append(((2, 2, 2), (2, 1), 1),)
1993    shapes.append(((2, 2, 2), (2, 2), 1),)
1994    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
1995    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
1996    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
1997    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
1998    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
1999    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
2000    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
2001    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
2002    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
2003    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
2004    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
2005    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2006
2007    for params_shape, indices_shape, batch_dims in shapes:
2008      with self.subTest(
2009          params_shape=params_shape,
2010          indices_shape=indices_shape,
2011          batch_dims=batch_dims):
2012        params = constant_op.constant(
2013            np.random.uniform(0.0, 1.0, size=(params_shape)))
2014        indices = np.random.randint(0, 2, size=indices_shape)
2015        batch_gather_nd_result = array_ops.batch_gather_nd(
2016            params=params, indices=indices, batch_dims=batch_dims)
2017
2018        if batch_dims > 1:
2019          params = array_ops.reshape(
2020              params, shape=[-1] + list(params_shape[batch_dims:]))
2021          indices = array_ops.reshape(
2022              indices, shape=[-1] + list(indices_shape[batch_dims:]))
2023
2024        map_fn_gather_nd_result = map_fn.map_fn(
2025            fn=self._map_fn_body, elems=(params, indices), dtype=dtypes.float64)
2026
2027        if batch_dims > 1:
2028          out_shape = map_fn_gather_nd_result.shape.as_list()
2029          out_shape = list(params_shape[:batch_dims]) + out_shape[1:]
2030          map_fn_gather_nd_result = array_ops.reshape(
2031              map_fn_gather_nd_result, shape=out_shape)
2032
2033        self.assertAllEqual(map_fn_gather_nd_result, batch_gather_nd_result)
2034
2035  def _map_fn_body(self, elems):
2036    return gen_array_ops.gather_nd(elems[0], elems[1])
2037
2038  def testBatchDimsAsTensor(self):
2039    """Tests Tensor batch_dims as input works as intended."""
2040    shapes = []
2041    # params_shape, indices_shape, batch_dims
2042    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 0),)
2043    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 1),)
2044    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2045
2046    for params_shape, indices_shape, batch_dims in shapes:
2047      with self.subTest(
2048          params_shape=params_shape,
2049          indices_shape=indices_shape,
2050          batch_dims=batch_dims):
2051        params = constant_op.constant(
2052            np.random.uniform(0.0, 1.0, size=(params_shape)))
2053        indices = np.random.randint(0, 2, size=indices_shape)
2054        batch_gather_nd_result = array_ops.gather_nd(
2055            params=params, indices=indices, batch_dims=batch_dims)
2056        batch_dims_tensor = constant_op.constant([batch_dims])
2057        batch_gather_nd_tensor_batch_dims_result = array_ops.gather_nd(
2058            params=params, indices=indices, batch_dims=batch_dims_tensor)
2059
2060        self.assertAllEqual(batch_gather_nd_tensor_batch_dims_result,
2061                            batch_gather_nd_result)
2062
2063  def testInvalidBatchDimsRaisesException(self):
2064    """Tests whether invalid batch_dims raise expected exceptions."""
2065    params = constant_op.constant(
2066        np.random.uniform(0.0, 1.0, size=(3, 2, 2, 3, 4)))
2067    indices = np.random.randint(0, 2, size=(3, 2, 3))
2068
2069    with self.assertRaises(TypeError):
2070      array_ops.batch_gather_nd(
2071          params=params,
2072          indices=indices,
2073          batch_dims=constant_op.constant((0, 1)))
2074
2075    with self.assertRaises(ValueError):
2076      array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=-1)
2077
2078    with self.assertRaises(ValueError):
2079      array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=4)
2080
2081  def testNoneBatchDimensions(self):
2082    """Tests gather_nd works with None dimensions."""
2083    shapes = []
2084    # params_shape, indices_shape, batch_dims
2085    shapes.append(((2, 2, 2), (2, 1), 1),)
2086    shapes.append(((2, 2, 2), (2, 2), 1),)
2087    shapes.append(((2, 2, 3, 2), (2, 3), 1),)
2088    shapes.append(((2, 2, 3, 2), (2, 2), 1),)
2089    shapes.append(((2, 2, 3, 2), (2, 1), 1),)
2090    shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
2091    shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
2092    shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
2093    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
2094    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
2095    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
2096    shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
2097    shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
2098    shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
2099
2100    for params_shape, indices_shape, batch_dims in shapes:
2101      params_ph_shape = list(params_shape)
2102      indices_ph_shape = list(indices_shape)
2103      for i in range(batch_dims):
2104        params_ph_shape[i] = None
2105        indices_ph_shape[i] = None
2106
2107      @def_function.function
2108      def func(params, indices):
2109        return array_ops.batch_gather_nd(
2110            params=params, indices=indices, batch_dims=batch_dims)  # pylint: disable=cell-var-from-loop
2111
2112      f = func.get_concrete_function(
2113          tensor_spec.TensorSpec(params_ph_shape, dtypes.float32),
2114          tensor_spec.TensorSpec(indices_ph_shape, dtypes.int32))
2115
2116      params_val = np.ones(dtype=np.float32, shape=params_shape)
2117      indices_val = np.ones(dtype=np.int32, shape=indices_shape)
2118      res = f(params_val, indices_val)
2119      row_ndims = len(params_shape) - batch_dims - indices_shape[-1]
2120      expected_out_shape = indices_shape[:-1]
2121      if row_ndims > 0:
2122        expected_out_shape += params_shape[-row_ndims:]
2123
2124      self.assertSequenceEqual(res.shape, expected_out_shape)
2125
2126
2127@test_util.run_all_in_graph_and_eager_modes
2128class RepeatTest(test_util.TensorFlowTestCase, parameterized.TestCase):
2129
2130  @parameterized.parameters(
2131      (3, 4, None),
2132      ([[1, 2], [3, 4]], 2, None),
2133      ([[1, 2], [3, 4]], [1, 2], 0),
2134      ([[1, 2], [3, 4]], [1, 2], 1),
2135      ([[1, 2], [3, 4]], 3, 1),
2136      ([[1, 2], [3, 4]], [1, 2, 3, 4], None),
2137      (np.ones([0, 4]), 0, 1),
2138      (np.ones([1, 2]), [2], None),
2139  )
2140  def testRepeat(self, array, repeats, axis):
2141    array = np.array(array)
2142
2143    @def_function.function(
2144        input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)] * 2)
2145    def repeat_fn(array, repeats):
2146      return array_ops.repeat(array, repeats, axis)
2147
2148    v_tf = array_ops.repeat(constant_op.constant(array), repeats, axis)
2149    v_tf_fn = repeat_fn(
2150        constant_op.constant(array, dtype=dtypes.int32), repeats)
2151    v_np = np.repeat(array, repeats, axis)
2152    self.assertAllEqual(v_tf, v_np)
2153    self.assertAllEqual(v_tf_fn, v_np)
2154
2155
2156@test_util.run_all_in_graph_and_eager_modes
2157class TileVariantTest(test_util.TensorFlowTestCase):
2158
2159  def test_tile_tensor_list(self):
2160    t = constant_op.constant(np.random.uniform(size=[2, 3, 4]))
2161    handle = list_ops.tensor_list_from_tensor(t, element_shape=None)
2162    with ops.device("CPU:0"):
2163      tiled_handles = array_ops.tile(array_ops.reshape(handle, [1]), [2])
2164    tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
2165                                                [3, 4])
2166    tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
2167                                                [3, 4])
2168    self.assertAllEqual(t, tiled_tensor_0)
2169    self.assertAllEqual(t, tiled_tensor_1)
2170    # Now mutate some of the lists and make sure the changes are not reflected
2171    # in the tiled handles.
2172    with ops.control_dependencies([
2173        list_ops.tensor_list_scatter([t[0] + 1], [0], input_handle=handle),
2174        list_ops.tensor_list_set_item(tiled_handles[0], 0, t[0] + 2)]):
2175      tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
2176                                                  [3, 4])
2177      tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
2178                                                  [3, 4])
2179    self.assertAllEqual(t, tiled_tensor_0)
2180    self.assertAllEqual(t, tiled_tensor_1)
2181
2182
2183if __name__ == "__main__":
2184  test_lib.main()
2185