• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Functional tests for slice op."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import numpy as np
22from six.moves import xrange  # pylint: disable=redefined-builtin
23
24from tensorflow.python.eager import backprop
25from tensorflow.python.eager import def_function
26from tensorflow.python.framework import constant_op
27from tensorflow.python.framework import dtypes
28from tensorflow.python.framework import errors_impl
29from tensorflow.python.framework import ops
30from tensorflow.python.framework import test_util
31from tensorflow.python.ops import array_ops
32from tensorflow.python.ops import gradients_impl
33from tensorflow.python.ops import math_ops
34from tensorflow.python.ops import nn_ops
35from tensorflow.python.platform import test
36
37
38class SliceTest(test.TestCase):
39
40  def testEmpty(self):
41    inp = np.random.rand(4, 4).astype("f")
42    for k in xrange(4):
43      with self.cached_session():
44        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
45        slice_t = a[2, k:k]
46        slice_val = self.evaluate(slice_t)
47      self.assertAllEqual(slice_val, inp[2, k:k])
48
49  def testInt32(self):
50    inp = np.random.rand(4, 4).astype("i")
51    for k in xrange(4):
52      with self.cached_session():
53        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
54        slice_t = a[2, k:k]
55        slice_val = self.evaluate(slice_t)
56      self.assertAllEqual(slice_val, inp[2, k:k])
57
58  def testSlicingWithInt64Index(self):
59    with self.cached_session(force_gpu=test.is_gpu_available()):
60      a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
61
62      # Slice using int64 Tensor.
63      i = constant_op.constant(1, dtype=dtypes.int64)
64      slice_t = a[i]
65      slice_val = self.evaluate(slice_t)
66      self.assertAllEqual(1, slice_val)
67      slice_t = a[i:i+1]
68      slice_val = self.evaluate(slice_t)
69      self.assertAllEqual([1], slice_val)
70
71      # Slice using int64 integer.
72      i = np.asarray(1).astype(np.int64)
73      slice_t = a[i]
74      slice_val = self.evaluate(slice_t)
75      self.assertAllEqual(1, slice_val)
76      slice_t = a[i:i+1]
77      slice_val = self.evaluate(slice_t)
78      self.assertAllEqual([1], slice_val)
79
80      a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
81      slice_t = array_ops.slice(a_int32,
82                                np.asarray([1]).astype(np.int64),
83                                np.asarray([2]).astype(np.int64))
84      slice_val = self.evaluate(slice_t)
85      self.assertAllEqual([1, 2], slice_val)
86
87      a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
88      slice_t = array_ops.slice(a_float32,
89                                np.asarray([1]).astype(np.int64),
90                                np.asarray([2]).astype(np.int64))
91      slice_val = self.evaluate(slice_t)
92      self.assertAllEqual([1, 2], slice_val)
93
94  def testSlicingInt64Tensor(self):
95    with self.cached_session(force_gpu=test.is_gpu_available()):
96      a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
97
98      # Slice using int32 Tensor.
99      i = constant_op.constant(1, dtype=dtypes.int32)
100      slice_t = a[i]
101      slice_val = self.evaluate(slice_t)
102      self.assertAllEqual(1, slice_val)
103      slice_t = a[i:i + 1]
104      slice_val = self.evaluate(slice_t)
105      self.assertAllEqual([1], slice_val)
106
107      # Slice using int32 integer.
108      i = np.asarray(1).astype(np.int32)
109      slice_t = a[i]
110      slice_val = self.evaluate(slice_t)
111      self.assertAllEqual(1, slice_val)
112      slice_t = a[i:i + 1]
113      slice_val = self.evaluate(slice_t)
114      self.assertAllEqual([1], slice_val)
115
116      slice_t = array_ops.slice(a, [1], [2])
117      slice_val = self.evaluate(slice_t)
118      self.assertAllEqual([1, 2], slice_val)
119
120  def testSelectAll(self):
121    for _ in range(10):
122      with self.cached_session():
123        inp = np.random.rand(4, 4, 4, 4).astype("f")
124        a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
125
126        slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
127        slice_implicit_t = a[:, :, :, :]
128
129        self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
130        self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
131        self.assertEqual(inp.shape, slice_explicit_t.get_shape())
132        self.assertEqual(inp.shape, slice_implicit_t.get_shape())
133
134  def testSingleDimension(self):
135    for _ in range(10):
136      with self.cached_session():
137        inp = np.random.rand(10).astype("f")
138        a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
139
140        hi = np.random.randint(0, 9)
141        scalar_t = a[hi]
142        scalar_val = self.evaluate(scalar_t)
143        self.assertAllEqual(scalar_val, inp[hi])
144
145        if hi > 0:
146          lo = np.random.randint(0, hi)
147        else:
148          lo = 0
149        slice_t = a[lo:hi]
150        slice_val = self.evaluate(slice_t)
151        self.assertAllEqual(slice_val, inp[lo:hi])
152
153  def test3Dimension(self):
154    with self.cached_session():
155      input_shape = [8, 16, 16, 16, 8]
156      total_input_size = 1
157      for s in input_shape:
158        total_input_size *= s
159      inputs = [
160          i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
161      ]
162      a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
163
164      filter_shape = [1, 1, 1, 8, 8]
165      total_filter_size = 1
166      for s in filter_shape:
167        total_filter_size *= s
168      filters = [
169          i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
170      ]
171      f = constant_op.constant(
172          filters, shape=filter_shape, dtype=dtypes.float32)
173
174      conv_t = nn_ops.conv3d(
175          a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
176      slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
177      result = self.evaluate(slice_t)
178      expected = [
179          0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
180          0.03654456, 0.03758812
181      ]
182      self.assertAllClose(expected, result.flatten(), rtol=1e-6)
183
184  def testScalarInput(self):
185    input_val = 0
186    # Test with constant input; shape inference fails.
187    with self.assertRaisesWithPredicateMatch(
188        (ValueError, errors_impl.InvalidArgumentError), "out of range"):
189      constant_op.constant(input_val)[:].get_shape()
190
191    # Test evaluating with non-constant input; kernel execution fails.
192    @def_function.function
193    def func(input_t):
194      slice_t = input_t[:]
195      return slice_t
196
197    with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
198      self.evaluate(func(input_val))
199
200  def testInvalidIndex(self):
201    input_val = [1, 2]
202    # Test with constant input; shape inference fails.
203    with self.assertRaisesWithPredicateMatch(
204        (ValueError, errors_impl.InvalidArgumentError), "out of range"):
205      constant_op.constant(input_val)[1:, 1:].get_shape()
206
207    # Test evaluating with non-constant input; kernel execution fails.
208    @def_function.function
209    def func(input_t):
210      slice_t = input_t[1:, 1:]
211      return slice_t
212
213    with self.assertRaisesWithPredicateMatch(
214        TypeError, "must be integers or slices, not tuple"):
215      self.evaluate(func(input_val))
216
217  def _testSliceMatrixDim0(self, x, begin, size):
218    tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
219    np_ans = x[begin:begin + size, :]
220    self.assertAllEqual(tf_ans, np_ans)
221
222  def testSliceMatrixDim0(self):
223    x = np.random.rand(8, 4).astype("f")
224    self._testSliceMatrixDim0(x, 1, 2)
225    self._testSliceMatrixDim0(x, 3, 3)
226    y = np.random.rand(8, 7).astype("f")  # 7 * sizeof(float) is not aligned
227    self._testSliceMatrixDim0(y, 1, 2)
228    self._testSliceMatrixDim0(y, 3, 3)
229
230  def testSingleElementAll(self):
231    for _ in range(10):
232      with self.cached_session():
233        inp = np.random.rand(4, 4).astype("f")
234        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
235
236        x, y = np.random.randint(0, 3, size=2).tolist()
237        slice_t = a[x, 0:y]
238        slice_val = self.evaluate(slice_t)
239      self.assertAllEqual(slice_val, inp[x, 0:y])
240
241  def testSimple(self):
242    with test_util.use_gpu():
243      for dtype in [
244          np.uint8,
245          np.int8,
246          np.uint16,
247          np.int16,
248          np.int32,
249          np.int64,
250          np.bool_,
251          np.float16,
252          np.float32,
253          np.float64,
254          np.complex64,
255          np.complex128,
256      ]:
257        inp = np.random.rand(4, 4).astype(dtype)
258        a = constant_op.constant(
259            [float(x) for x in inp.ravel(order="C")],
260            shape=[4, 4],
261            dtype=dtypes.float32)
262        slice_t = array_ops.slice(a, [0, 0], [2, 2])
263        slice2_t = a[:2, :2]
264        slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
265        self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
266        self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
267        self.assertEqual(slice_val.shape, slice_t.get_shape())
268        self.assertEqual(slice2_val.shape, slice2_t.get_shape())
269
270  def testComplex(self):
271    inp = np.random.rand(4, 10, 10, 4).astype("f")
272    a = constant_op.constant(inp, dtype=dtypes.float32)
273
274    x = np.random.randint(0, 9)
275    z = np.random.randint(0, 9)
276    if z > 0:
277      y = np.random.randint(0, z)
278    else:
279      y = 0
280    slice_t = a[:, x, y:z, :]
281    self.assertAllEqual(slice_t, inp[:, x, y:z, :])
282
283  def testRandom(self):
284    # Random dims of rank 6
285    input_shape = np.random.randint(0, 20, size=6)
286    inp = np.random.rand(*input_shape).astype("f")
287    a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
288                             shape=input_shape,
289                             dtype=dtypes.float32)
290    indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
291    sizes = [
292        np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
293    ]
294    slice_t = array_ops.slice(a, indices, sizes)
295    slice2_t = a[indices[0]:indices[0] + sizes[0],
296                 indices[1]:indices[1] + sizes[1],
297                 indices[2]:indices[2] + sizes[2],
298                 indices[3]:indices[3] + sizes[3],
299                 indices[4]:indices[4] + sizes[4],
300                 indices[5]:indices[5] + sizes[5]]
301
302    slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
303
304    expected_val = inp[indices[0]:indices[0] + sizes[0],
305                       indices[1]:indices[1] + sizes[1],
306                       indices[2]:indices[2] + sizes[2],
307                       indices[3]:indices[3] + sizes[3],
308                       indices[4]:indices[4] + sizes[4],
309                       indices[5]:indices[5] + sizes[5]]
310    self.assertAllEqual(slice_val, expected_val)
311    self.assertAllEqual(slice2_val, expected_val)
312    self.assertEqual(expected_val.shape, slice_t.get_shape())
313    self.assertEqual(expected_val.shape, slice2_t.get_shape())
314
315  def testPartialShapeInference(self):
316    z = array_ops.zeros((1, 2, 3))
317    self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
318
319    m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
320    self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
321
322    m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
323    self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
324
325  def _testGradientSlice(self, input_shape, slice_begin, slice_size):
326    with self.cached_session():
327      num_inputs = np.prod(input_shape)
328      num_grads = np.prod(slice_size)
329      inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
330      a = constant_op.constant(
331          [float(x) for x in inp.ravel(order="C")],
332          shape=input_shape,
333          dtype=dtypes.float32)
334      slice_t = array_ops.slice(a, slice_begin, slice_size)
335      grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
336      grad_tensor = constant_op.constant(grads)
337      grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
338      result = self.evaluate(grad)
339
340    # Create a zero tensor of the input shape ane place
341    # the grads into the right location to compare against TensorFlow.
342    np_ans = np.zeros(input_shape)
343    slices = []
344    for i in xrange(len(input_shape)):
345      slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
346    np_ans[slices] = grads
347
348    self.assertAllClose(np_ans, result)
349
350  def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
351    with backprop.GradientTape() as tape:
352      num_inputs = np.prod(input_shape)
353      num_grads = np.prod(slice_size)
354      inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
355      a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
356                               shape=input_shape,
357                               dtype=dtypes.float32)
358      tape.watch(a)
359      slice_t = array_ops.slice(a, slice_begin, slice_size)
360      grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
361      grad_tensor = constant_op.constant(grads)
362    grad = tape.gradient(slice_t, [a], grad_tensor)[0]
363    result = self.evaluate(grad)
364
365    # Create a zero tensor of the input shape ane place
366    # the grads into the right location to compare against TensorFlow.
367    np_ans = np.zeros(input_shape)
368    slices = []
369    for i in xrange(len(input_shape)):
370      slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
371    np_ans[slices] = grads
372
373    self.assertAllClose(np_ans, result)
374
375  def _testGradientVariableSize(self):
376    with self.cached_session():
377      inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
378      out = array_ops.slice(inp, [1], [-1])
379      grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
380    self.assertAllClose([0., 1., 1.], grad_actual)
381
382  def _testGradientVariableSizeTape(self):
383    with backprop.GradientTape() as tape:
384      inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
385      tape.watch(inp)
386      out = array_ops.slice(inp, [1], [-1])
387    grad_actual = self.evaluate(tape.gradient(out, inp))
388    self.assertAllClose([0., 1., 1.], grad_actual)
389
390  def _testGradientVariableSize2D(self):
391    # Regression test for bug in slice. A low-level bug in Eigen was causing
392    # incorrect results for negative indices in multi-dimensional tensors.
393    # See b/114318298.
394    with self.cached_session():
395      x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
396      loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
397      loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
398
399      g1 = gradients_impl.gradients(loss1, x)[0]
400      g2 = gradients_impl.gradients(loss2, x)[0]
401
402      g1_val, g2_val = self.evaluate([g1, g2])
403    self.assertAllEqual(g1_val, g2_val)
404
405  def _testGradientVariableSize2DTape(self):
406    # Regression test for bug in slice. A low-level bug in Eigen was causing
407    # incorrect results for negative indices in multi-dimensional tensors.
408    # See b/114318298.
409    with backprop.GradientTape(persistent=True) as tape:
410      x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
411      tape.watch(x)
412      loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
413      loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
414
415    g1 = tape.gradient(loss1, x)
416    g2 = tape.gradient(loss2, x)
417    g1_val, g2_val = self.evaluate([g1, g2])
418    self.assertAllEqual(g1_val, g2_val)
419
420  def testGradientsAll(self):
421    with ops.Graph().as_default():
422      # Slice the middle square out of a 4x4 input
423      self._testGradientSlice([4, 4], [1, 1], [2, 2])
424
425      # Slice the upper left square out of a 4x4 input
426      self._testGradientSlice([4, 4], [0, 0], [2, 2])
427
428      # Slice a non-square input starting from (2,1)
429      self._testGradientSlice([4, 4], [2, 1], [1, 2])
430
431      # Slice a 3D tensor
432      self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
433
434      # Use -1 as a slice dimension.
435      self._testGradientVariableSize()
436
437      # Use -1 as a slice dimension on a 2D tensor.
438      self._testGradientVariableSize2D()
439
440  def testGradientsAllTape(self):
441    # Slice the middle square out of a 4x4 input
442    self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
443
444    # Slice the upper left square out of a 4x4 input
445    self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
446
447    # Slice a non-square input starting from (2,1)
448    self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
449
450    # Slice a 3D tensor
451    self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
452
453    # Use -1 as a slice dimension.
454    self._testGradientVariableSizeTape()
455
456    # Use -1 as a slice dimension on a 2D tensor.
457    self._testGradientVariableSize2DTape()
458
459  def testNotIterable(self):
460    # Tensor iteration is disabled explicitly for only graph mode.
461    with ops.Graph().as_default():
462      # NOTE(mrry): If we register __getitem__ as an overloaded
463      # operator, Python will valiantly attempt to iterate over the
464      # Tensor from 0 to infinity.  This test ensures that this
465      # unintended behavior is prevented.
466      c = constant_op.constant(5.0)
467      with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
468                                  "iterating over `tf.Tensor`"):
469        for _ in c:
470          pass
471
472  def testComputedShape(self):
473    # NOTE(mrry): We cannot currently handle partially-known values,
474    # because `tf.slice()` uses -1 to specify a wildcard size, and
475    # this can't be handled using the
476    # `tensor_util.constant_value_as_shape()` trick.
477    a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
478    begin = constant_op.constant(0)
479    size = constant_op.constant(1)
480    b = array_ops.slice(a, [begin, 0], [size, 2])
481    self.assertEqual([1, 2], b.get_shape())
482
483    # placeholders only make sense in a graph.
484    with ops.Graph().as_default():
485      a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
486      begin = array_ops.placeholder(dtypes.int32, shape=())
487      c = array_ops.slice(a, [begin, 0], [-1, 2])
488      self.assertEqual([None, 2], c.get_shape().as_list())
489
490  def testSliceOfSlice(self):
491    with self.session():
492      a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
493      b = a[1:, :]
494      c = b[:-1, :]
495      d = c[1, :]
496      res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
497      self.assertAllEqual([0, 0, 0], self.evaluate(res))
498
499
500if __name__ == "__main__":
501  test.main()
502