• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Functional tests for slice op."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import numpy as np
22from six.moves import xrange  # pylint: disable=redefined-builtin
23
24from tensorflow.python.eager import backprop
25from tensorflow.python.eager import def_function
26from tensorflow.python.framework import constant_op
27from tensorflow.python.framework import dtypes
28from tensorflow.python.framework import errors_impl
29from tensorflow.python.framework import ops
30from tensorflow.python.framework import test_util
31from tensorflow.python.ops import array_ops
32from tensorflow.python.ops import gradients_impl
33from tensorflow.python.ops import math_ops
34from tensorflow.python.ops import nn_ops
35from tensorflow.python.platform import test
36
37
38class SliceTest(test.TestCase):
39
40  def testEmpty(self):
41    inp = np.random.rand(4, 4).astype("f")
42    for k in xrange(4):
43      with self.cached_session():
44        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
45        slice_t = a[2, k:k]
46        slice_val = self.evaluate(slice_t)
47      self.assertAllEqual(slice_val, inp[2, k:k])
48
49  def testInt32(self):
50    inp = np.random.rand(4, 4).astype("i")
51    for k in xrange(4):
52      with self.cached_session():
53        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
54        slice_t = a[2, k:k]
55        slice_val = self.evaluate(slice_t)
56      self.assertAllEqual(slice_val, inp[2, k:k])
57
58  def testSlicingWithInt64Index(self):
59    with self.cached_session(force_gpu=test.is_gpu_available()):
60      a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
61
62      # Slice using int64 Tensor.
63      i = constant_op.constant(1, dtype=dtypes.int64)
64      slice_t = a[i]
65      slice_val = self.evaluate(slice_t)
66      self.assertAllEqual(1, slice_val)
67      slice_t = a[i:i+1]
68      slice_val = self.evaluate(slice_t)
69      self.assertAllEqual([1], slice_val)
70
71      # Slice using int64 integer.
72      i = np.asarray(1).astype(np.int64)
73      slice_t = a[i]
74      slice_val = self.evaluate(slice_t)
75      self.assertAllEqual(1, slice_val)
76      slice_t = a[i:i+1]
77      slice_val = self.evaluate(slice_t)
78      self.assertAllEqual([1], slice_val)
79
80      a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
81      slice_t = array_ops.slice(a_int32,
82                                np.asarray([1]).astype(np.int64),
83                                np.asarray([2]).astype(np.int64))
84      slice_val = self.evaluate(slice_t)
85      self.assertAllEqual([1, 2], slice_val)
86
87      a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
88      slice_t = array_ops.slice(a_float32,
89                                np.asarray([1]).astype(np.int64),
90                                np.asarray([2]).astype(np.int64))
91      slice_val = self.evaluate(slice_t)
92      self.assertAllEqual([1, 2], slice_val)
93
94  def testSlicingInt64Tensor(self):
95    with self.cached_session(force_gpu=test.is_gpu_available()):
96      a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
97
98      # Slice using int32 Tensor.
99      i = constant_op.constant(1, dtype=dtypes.int32)
100      slice_t = a[i]
101      slice_val = self.evaluate(slice_t)
102      self.assertAllEqual(1, slice_val)
103      slice_t = a[i:i + 1]
104      slice_val = self.evaluate(slice_t)
105      self.assertAllEqual([1], slice_val)
106
107      # Slice using int32 integer.
108      i = np.asarray(1).astype(np.int32)
109      slice_t = a[i]
110      slice_val = self.evaluate(slice_t)
111      self.assertAllEqual(1, slice_val)
112      slice_t = a[i:i + 1]
113      slice_val = self.evaluate(slice_t)
114      self.assertAllEqual([1], slice_val)
115
116      slice_t = array_ops.slice(a, [1], [2])
117      slice_val = self.evaluate(slice_t)
118      self.assertAllEqual([1, 2], slice_val)
119
120  def testSelectAll(self):
121    for _ in range(10):
122      with self.cached_session():
123        inp = np.random.rand(4, 4, 4, 4).astype("f")
124        a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
125
126        slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
127        slice_implicit_t = a[:, :, :, :]
128
129        self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
130        self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
131        self.assertEqual(inp.shape, slice_explicit_t.get_shape())
132        self.assertEqual(inp.shape, slice_implicit_t.get_shape())
133
134  def testSingleDimension(self):
135    for _ in range(10):
136      with self.cached_session():
137        inp = np.random.rand(10).astype("f")
138        a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
139
140        hi = np.random.randint(0, 9)
141        scalar_t = a[hi]
142        scalar_val = self.evaluate(scalar_t)
143        self.assertAllEqual(scalar_val, inp[hi])
144
145        if hi > 0:
146          lo = np.random.randint(0, hi)
147        else:
148          lo = 0
149        slice_t = a[lo:hi]
150        slice_val = self.evaluate(slice_t)
151        self.assertAllEqual(slice_val, inp[lo:hi])
152
153  def test3Dimension(self):
154    with self.cached_session():
155      input_shape = [8, 16, 16, 16, 8]
156      total_input_size = 1
157      for s in input_shape:
158        total_input_size *= s
159      inputs = [
160          i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
161      ]
162      a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
163
164      filter_shape = [1, 1, 1, 8, 8]
165      total_filter_size = 1
166      for s in filter_shape:
167        total_filter_size *= s
168      filters = [
169          i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
170      ]
171      f = constant_op.constant(
172          filters, shape=filter_shape, dtype=dtypes.float32)
173
174      conv_t = nn_ops.conv3d(
175          a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
176      slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
177      result = self.evaluate(slice_t)
178      expected = [
179          0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
180          0.03654456, 0.03758812
181      ]
182      self.assertAllClose(expected, result.flatten(), rtol=1e-6)
183
184  def testScalarInput(self):
185    input_val = 0
186    # Test with constant input; shape inference fails.
187    with self.assertRaisesWithPredicateMatch(
188        (ValueError, errors_impl.InvalidArgumentError), "out of range"):
189      constant_op.constant(input_val)[:].get_shape()
190
191    # Test evaluating with non-constant input; kernel execution fails.
192    @def_function.function
193    def func(input_t):
194      slice_t = input_t[:]
195      return slice_t
196
197    with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
198      self.evaluate(func(input_val))
199
200  def testInvalidIndex(self):
201    input_val = [1, 2]
202    # Test with constant input; shape inference fails.
203    with self.assertRaisesWithPredicateMatch(
204        (ValueError, errors_impl.InvalidArgumentError), "out of range"):
205      constant_op.constant(input_val)[1:, 1:].get_shape()
206
207    # Test evaluating with non-constant input; kernel execution fails.
208    @def_function.function
209    def func(input_t):
210      slice_t = input_t[1:, 1:]
211      return slice_t
212
213    with self.assertRaisesWithPredicateMatch(
214        TypeError, "must be integers or slices, not tuple"):
215      self.evaluate(func(input_val))
216
217  def _testSliceMatrixDim0(self, x, begin, size):
218    tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
219    np_ans = x[begin:begin + size, :]
220    self.assertAllEqual(tf_ans, np_ans)
221
222  def testSliceMatrixDim0(self):
223    x = np.random.rand(8, 4).astype("f")
224    self._testSliceMatrixDim0(x, 1, 2)
225    self._testSliceMatrixDim0(x, 3, 3)
226    y = np.random.rand(8, 7).astype("f")  # 7 * sizeof(float) is not aligned
227    self._testSliceMatrixDim0(y, 1, 2)
228    self._testSliceMatrixDim0(y, 3, 3)
229
230  def testSingleElementAll(self):
231    for _ in range(10):
232      with self.cached_session():
233        inp = np.random.rand(4, 4).astype("f")
234        a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
235
236        x, y = np.random.randint(0, 3, size=2).tolist()
237        slice_t = a[x, 0:y]
238        slice_val = self.evaluate(slice_t)
239      self.assertAllEqual(slice_val, inp[x, 0:y])
240
241  def testSimple(self):
242    with test_util.use_gpu():
243      for dtype in [
244          np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
245          np.float16, np.float32, np.float64, np.complex64, np.complex128,]:
246        inp = np.random.rand(4, 4).astype(dtype)
247        a = constant_op.constant(
248            [float(x) for x in inp.ravel(order="C")],
249            shape=[4, 4],
250            dtype=dtypes.float32)
251        slice_t = array_ops.slice(a, [0, 0], [2, 2])
252        slice2_t = a[:2, :2]
253        slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
254        self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
255        self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
256        self.assertEqual(slice_val.shape, slice_t.get_shape())
257        self.assertEqual(slice2_val.shape, slice2_t.get_shape())
258
259  def testComplex(self):
260    inp = np.random.rand(4, 10, 10, 4).astype("f")
261    a = constant_op.constant(inp, dtype=dtypes.float32)
262
263    x = np.random.randint(0, 9)
264    z = np.random.randint(0, 9)
265    if z > 0:
266      y = np.random.randint(0, z)
267    else:
268      y = 0
269    slice_t = a[:, x, y:z, :]
270    self.assertAllEqual(slice_t, inp[:, x, y:z, :])
271
272  def testRandom(self):
273    # Random dims of rank 6
274    input_shape = np.random.randint(0, 20, size=6)
275    inp = np.random.rand(*input_shape).astype("f")
276    a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
277                             shape=input_shape,
278                             dtype=dtypes.float32)
279    indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
280    sizes = [
281        np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
282    ]
283    slice_t = array_ops.slice(a, indices, sizes)
284    slice2_t = a[indices[0]:indices[0] + sizes[0],
285                 indices[1]:indices[1] + sizes[1],
286                 indices[2]:indices[2] + sizes[2],
287                 indices[3]:indices[3] + sizes[3],
288                 indices[4]:indices[4] + sizes[4],
289                 indices[5]:indices[5] + sizes[5]]
290
291    slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
292
293    expected_val = inp[indices[0]:indices[0] + sizes[0],
294                       indices[1]:indices[1] + sizes[1],
295                       indices[2]:indices[2] + sizes[2],
296                       indices[3]:indices[3] + sizes[3],
297                       indices[4]:indices[4] + sizes[4],
298                       indices[5]:indices[5] + sizes[5]]
299    self.assertAllEqual(slice_val, expected_val)
300    self.assertAllEqual(slice2_val, expected_val)
301    self.assertEqual(expected_val.shape, slice_t.get_shape())
302    self.assertEqual(expected_val.shape, slice2_t.get_shape())
303
304  def testPartialShapeInference(self):
305    z = array_ops.zeros((1, 2, 3))
306    self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
307
308    m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
309    self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
310
311    m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
312    self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
313
314  def _testGradientSlice(self, input_shape, slice_begin, slice_size):
315    with self.cached_session():
316      num_inputs = np.prod(input_shape)
317      num_grads = np.prod(slice_size)
318      inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
319      a = constant_op.constant(
320          [float(x) for x in inp.ravel(order="C")],
321          shape=input_shape,
322          dtype=dtypes.float32)
323      slice_t = array_ops.slice(a, slice_begin, slice_size)
324      grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
325      grad_tensor = constant_op.constant(grads)
326      grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
327      result = self.evaluate(grad)
328
329    # Create a zero tensor of the input shape ane place
330    # the grads into the right location to compare against TensorFlow.
331    np_ans = np.zeros(input_shape)
332    slices = []
333    for i in xrange(len(input_shape)):
334      slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
335    np_ans[slices] = grads
336
337    self.assertAllClose(np_ans, result)
338
339  def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
340    with backprop.GradientTape() as tape:
341      num_inputs = np.prod(input_shape)
342      num_grads = np.prod(slice_size)
343      inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
344      a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
345                               shape=input_shape,
346                               dtype=dtypes.float32)
347      tape.watch(a)
348      slice_t = array_ops.slice(a, slice_begin, slice_size)
349      grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
350      grad_tensor = constant_op.constant(grads)
351    grad = tape.gradient(slice_t, [a], grad_tensor)[0]
352    result = self.evaluate(grad)
353
354    # Create a zero tensor of the input shape ane place
355    # the grads into the right location to compare against TensorFlow.
356    np_ans = np.zeros(input_shape)
357    slices = []
358    for i in xrange(len(input_shape)):
359      slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
360    np_ans[slices] = grads
361
362    self.assertAllClose(np_ans, result)
363
364  def _testGradientVariableSize(self):
365    with self.cached_session():
366      inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
367      out = array_ops.slice(inp, [1], [-1])
368      grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
369    self.assertAllClose([0., 1., 1.], grad_actual)
370
371  def _testGradientVariableSizeTape(self):
372    with backprop.GradientTape() as tape:
373      inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
374      tape.watch(inp)
375      out = array_ops.slice(inp, [1], [-1])
376    grad_actual = self.evaluate(tape.gradient(out, inp))
377    self.assertAllClose([0., 1., 1.], grad_actual)
378
379  def _testGradientVariableSize2D(self):
380    # Regression test for bug in slice. A low-level bug in Eigen was causing
381    # incorrect results for negative indices in multi-dimensional tensors.
382    # See b/114318298.
383    with self.cached_session():
384      x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
385      loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
386      loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
387
388      g1 = gradients_impl.gradients(loss1, x)[0]
389      g2 = gradients_impl.gradients(loss2, x)[0]
390
391      g1_val, g2_val = self.evaluate([g1, g2])
392    self.assertAllEqual(g1_val, g2_val)
393
394  def _testGradientVariableSize2DTape(self):
395    # Regression test for bug in slice. A low-level bug in Eigen was causing
396    # incorrect results for negative indices in multi-dimensional tensors.
397    # See b/114318298.
398    with backprop.GradientTape(persistent=True) as tape:
399      x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
400      tape.watch(x)
401      loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
402      loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
403
404    g1 = tape.gradient(loss1, x)
405    g2 = tape.gradient(loss2, x)
406    g1_val, g2_val = self.evaluate([g1, g2])
407    self.assertAllEqual(g1_val, g2_val)
408
409  def testGradientsAll(self):
410    with ops.Graph().as_default():
411      # Slice the middle square out of a 4x4 input
412      self._testGradientSlice([4, 4], [1, 1], [2, 2])
413
414      # Slice the upper left square out of a 4x4 input
415      self._testGradientSlice([4, 4], [0, 0], [2, 2])
416
417      # Slice a non-square input starting from (2,1)
418      self._testGradientSlice([4, 4], [2, 1], [1, 2])
419
420      # Slice a 3D tensor
421      self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
422
423      # Use -1 as a slice dimension.
424      self._testGradientVariableSize()
425
426      # Use -1 as a slice dimension on a 2D tensor.
427      self._testGradientVariableSize2D()
428
429  def testGradientsAllTape(self):
430    # Slice the middle square out of a 4x4 input
431    self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
432
433    # Slice the upper left square out of a 4x4 input
434    self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
435
436    # Slice a non-square input starting from (2,1)
437    self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
438
439    # Slice a 3D tensor
440    self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
441
442    # Use -1 as a slice dimension.
443    self._testGradientVariableSizeTape()
444
445    # Use -1 as a slice dimension on a 2D tensor.
446    self._testGradientVariableSize2DTape()
447
448  def testNotIterable(self):
449    # Tensor iteration is disabled explicitly for only graph mode.
450    with ops.Graph().as_default():
451      # NOTE(mrry): If we register __getitem__ as an overloaded
452      # operator, Python will valiantly attempt to iterate over the
453      # Tensor from 0 to infinity.  This test ensures that this
454      # unintended behavior is prevented.
455      c = constant_op.constant(5.0)
456      with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
457                                  "iterating over `tf.Tensor`"):
458        for _ in c:
459          pass
460
461  def testComputedShape(self):
462    # NOTE(mrry): We cannot currently handle partially-known values,
463    # because `tf.slice()` uses -1 to specify a wildcard size, and
464    # this can't be handled using the
465    # `tensor_util.constant_value_as_shape()` trick.
466    a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
467    begin = constant_op.constant(0)
468    size = constant_op.constant(1)
469    b = array_ops.slice(a, [begin, 0], [size, 2])
470    self.assertEqual([1, 2], b.get_shape())
471
472    # placeholders only make sense in a graph.
473    with ops.Graph().as_default():
474      a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
475      begin = array_ops.placeholder(dtypes.int32, shape=())
476      c = array_ops.slice(a, [begin, 0], [-1, 2])
477      self.assertEqual([None, 2], c.get_shape().as_list())
478
479  def testSliceOfSlice(self):
480    with self.session():
481      a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
482      b = a[1:, :]
483      c = b[:-1, :]
484      d = c[1, :]
485      res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
486      self.assertAllEqual([0, 0, 0], self.evaluate(res))
487
488
489if __name__ == "__main__":
490  test.main()
491