• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Functional tests for Transpose op."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import itertools
22
23import numpy as np
24
25from tensorflow.python.eager import def_function
26from tensorflow.python.framework import constant_op
27from tensorflow.python.framework import dtypes
28from tensorflow.python.framework import errors
29from tensorflow.python.framework import ops
30from tensorflow.python.framework import tensor_shape
31from tensorflow.python.framework import tensor_spec
32from tensorflow.python.ops import array_ops
33from tensorflow.python.ops import gradient_checker_v2
34from tensorflow.python.platform import test
35
36
37class TransposeTest(test.TestCase):
38
39  def _np_transpose(self, x, perm):
40    ret = np.copy(x)
41    ret = ret.transpose(perm)
42    return ret
43
44  def _compareCpu(self, x, p, conjugate=False):
45    if p is None:
46      rank = x.ndim
47      perm = (rank - 1) - np.arange(rank)
48    else:
49      perm = p
50    np_ans = self._np_transpose(x, perm)
51    if conjugate:
52      np_ans = np.conj(np_ans)
53    with self.cached_session(use_gpu=False):
54      inx = ops.convert_to_tensor(x)
55      y = array_ops.transpose(inx, p, conjugate=conjugate)
56      tf_ans = self.evaluate(y)
57      self.assertShapeEqual(np_ans, y)
58      self.assertAllEqual(np_ans, tf_ans)
59
60      jacob_t = None
61      # Gradient check on CPU.
62      if x.dtype in [np.float32, np.complex64]:
63        jacob_t, jacob_n = gradient_checker_v2.compute_gradient(
64            lambda x: array_ops.transpose(x, p, conjugate=conjugate), [inx])
65        self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
66      elif x.dtype in [np.float64, np.complex128]:
67        jacob_t, jacob_n = gradient_checker_v2.compute_gradient(
68            lambda x: array_ops.transpose(x, p, conjugate=conjugate), [inx])
69        self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
70
71      return tf_ans, jacob_t
72
73  def _compareGpu(self, x, p, conjugate=False):
74    if p is None:
75      rank = x.ndim
76      perm = (rank - 1) - np.arange(rank)
77    else:
78      perm = p
79    np_ans = self._np_transpose(x, perm)
80    if conjugate:
81      np_ans = np.conj(np_ans)
82    with self.cached_session():
83      inx = ops.convert_to_tensor(x)
84      y = array_ops.transpose(inx, p, conjugate=conjugate)
85      tf_ans = self.evaluate(y)
86
87      self.assertAllEqual(np_ans, tf_ans)
88      self.assertShapeEqual(np_ans, y)
89
90      jacob_t = None
91      # Gradient check on GPU.
92      if x.dtype == np.float32:
93        jacob_t, jacob_n = gradient_checker_v2.compute_gradient(
94            lambda x: array_ops.transpose(x, p, conjugate=conjugate), [inx])
95        self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
96      elif x.dtype == np.float64:
97        jacob_t, jacob_n = gradient_checker_v2.compute_gradient(
98            lambda x: array_ops.transpose(x, p, conjugate=conjugate), [inx])
99        self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
100
101      return tf_ans, jacob_t
102
103  def _compare(self, x, use_gpu=False):
104    n = np.ndim(x)
105    # generate all permutations of [0, 1, ... n-1] in random order.
106    all_perm = np.random.permutation(
107        [p for p in itertools.permutations(range(n))]).astype(np.int32)
108    cs = [False, True] if x.dtype in [np.complex64, np.complex128] else [False]
109    for c in cs:
110      for p in all_perm[:2]:
111        self._compareCpu(x, p, conjugate=c)
112        if use_gpu:
113          self._compareGpu(x, p, conjugate=c)
114    # Test with an empty permutation
115    for c in cs:
116      self._compareCpu(x, None, conjugate=c)
117      if use_gpu:
118        self._compareGpu(x, None, conjugate=c)
119
120  def _compare_cpu_gpu(self, x):
121    n = np.ndim(x)
122    # generate all permutation of [0, 1, ... n-1] in random order,
123    # choose the first two.
124    perms = itertools.permutations(range(n))
125    for _ in range(2):
126      p = np.random.permutation(next(perms)).astype(np.int32) if n > 1 else None
127      tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
128      tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
129      assert tf_g_cpu is not None
130      assert tf_g_gpu is not None
131      if x.dtype == np.float32:
132        self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
133        self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
134      elif x.dtype == np.float64:
135        self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
136        self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
137
138  def _testBoth(self, x):
139    self._compare(x, use_gpu=False)
140    self._compare(x, use_gpu=True)
141
142  def testRank1(self):
143    self._compareCpu(np.arange(0., 2), [0])
144
145  def test1D(self):
146    vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
147    self._compare(vector, use_gpu=False)
148    self._compare(vector, use_gpu=True)
149
150  def test5DGPU(self):
151    # If no GPU available, skip the test
152    if not test.is_gpu_available(cuda_only=True):
153      return
154    large_shapes = [[4, 10, 10, 10, 3], [4, 10, 10, 10, 8], [4, 10, 10, 10, 13],
155                    [4, 3, 10, 10, 10], [4, 8, 10, 10, 10], [4, 13, 10, 10,
156                                                             10]] * 3
157    perms = [[0, 4, 1, 2, 3]] * 3 + [[0, 2, 3, 4, 1]] * 3 + [[
158        4, 1, 2, 3, 0
159    ]] * 6 + [[1, 2, 3, 4, 0]] * 6
160
161    datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
162    for datatype in datatypes:
163      for input_shape, perm in zip(large_shapes, perms):
164        with self.subTest(
165            datatype=datatype, input_shape=input_shape, perm=perm):
166          total_size = np.prod(input_shape)
167          inp = np.arange(
168              1, total_size + 1, dtype=datatype).reshape(input_shape)
169          np_ans = self._np_transpose(inp, perm)
170          with self.cached_session():
171            inx = ops.convert_to_tensor(inp)
172            y = array_ops.transpose(inx, perm)
173            tf_ans = self.evaluate(y)
174          self.assertAllEqual(np_ans, tf_ans)
175          self.assertShapeEqual(np_ans, y)
176
177  def test4DGPU(self):
178    # If no GPU available, skip the test
179    if not test.is_gpu_available(cuda_only=True):
180      return
181    large_shapes = [[4, 10, 10, 3], [4, 10, 10, 8], [4, 10, 10, 13],
182                    [4, 3, 10, 10], [4, 8, 10, 10], [4, 13, 10, 10]] * 3
183    perms = [[0, 3, 1, 2]] * 3 + [[0, 2, 3, 1]] * 3 + [[3, 1, 2, 0]] * 6 + [[
184        1, 2, 3, 0
185    ]] * 3 + [[2, 3, 0, 1]] * 3
186
187    for input_shape, perm in zip(large_shapes, perms):
188      with self.subTest(input_shape=input_shape, perm=perm):
189        total_size = np.prod(input_shape)
190        inp = np.arange(
191            1, total_size + 1, dtype=np.float32).reshape(input_shape)
192        np_ans = self._np_transpose(inp, perm)
193        with self.cached_session():
194          inx = ops.convert_to_tensor(inp)
195          y = array_ops.transpose(inx, perm)
196          tf_ans = self.evaluate(y)
197        self.assertAllEqual(np_ans, tf_ans)
198        self.assertShapeEqual(np_ans, y)
199
200    # shapes related to Inception (taken from conv_ops_test.py)
201    inception_shapes = [[4, 5, 5, 124], [4, 8, 8, 38], [4, 8, 8, 38], [
202        4, 8, 8, 204
203    ], [4, 8, 8, 44], [4, 8, 8, 204], [4, 8, 8, 204], [4, 8, 8, 204], [
204        4, 8, 8, 176
205    ], [4, 8, 8, 176], [4, 8, 8, 176], [4, 8, 8, 176], [4, 17, 17, 19], [
206        4, 17, 17, 19
207    ], [4, 17, 17, 124], [4, 17, 17, 12], [4, 17, 17, 124], [4, 17, 17, 22], [
208        4, 17, 17, 19
209    ], [4, 17, 17, 19], [4, 17, 17, 121], [4, 17, 17, 121], [4, 17, 17, 22], [
210        4, 17, 17, 19
211    ], [4, 17, 17, 19], [4, 17, 17, 115], [4, 17, 17, 115], [4, 17, 17, 19], [
212        4, 17, 17, 16
213    ], [4, 17, 17, 115], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 102], [
214        4, 17, 17, 12
215    ], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 76], [4, 17, 17, 12], [
216        4, 17, 17, 12
217    ], [4, 17, 17, 76], [4, 17, 17, 76], [4, 35, 35, 9], [4, 35, 35, 28], [
218        4, 35, 35, 6
219    ], [4, 35, 35, 28], [4, 35, 35, 25], [4, 35, 35, 4], [4, 35, 35, 25],
220                        [4, 35, 35, 9], [4, 35, 35, 19], [4, 35, 35, 19],
221                        [4, 35, 35, 19], [4, 73, 73, 6], [4, 73, 73,
222                                                          6], [4, 147, 147, 2]]
223    for input_shape in inception_shapes:
224      with self.subTest(input_shape=input_shape):
225        perm = [0, 3, 1, 2]
226        total_size = np.prod(input_shape)
227        inp = np.arange(
228            1, total_size + 1, dtype=np.float32).reshape(input_shape)
229        np_ans = self._np_transpose(inp, perm)
230        with self.cached_session():
231          inx = ops.convert_to_tensor(inp)
232          y = array_ops.transpose(inx, perm)
233          tf_ans = self.evaluate(y)
234        self.assertAllEqual(np_ans, tf_ans)
235        self.assertShapeEqual(np_ans, y)
236
237  def test3DGPU(self):
238    # If no GPU available, skip the test
239    if not test.is_gpu_available(cuda_only=True):
240      return
241
242    datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
243    large_shapes = [[4, 1000, 3], [4, 1000, 8], [4, 1000, 13], [4, 3, 1000],
244                    [4, 8, 1000], [4, 13, 1000]] * 3
245    perms = [[0, 2, 1]] * 6 + [[2, 1, 0]] * 6 + [[1, 2, 0]] * 3 + [[2, 0, 1]
246                                                                  ] * 3
247    for datatype in datatypes:
248      for input_shape, perm in zip(large_shapes, perms):
249        with self.subTest(
250            datatype=datatype, input_shape=input_shape, perm=perm):
251          total_size = np.prod(input_shape)
252          inp = np.arange(
253              1, total_size + 1, dtype=datatype).reshape(input_shape)
254          np_ans = self._np_transpose(inp, perm)
255          with self.cached_session():
256            inx = ops.convert_to_tensor(inp)
257            y = array_ops.transpose(inx, perm)
258            tf_ans = self.evaluate(y)
259          self.assertAllEqual(np_ans, tf_ans)
260          self.assertShapeEqual(np_ans, y)
261
262  def testLargeSizeGPU(self):
263    # If no GPU available, skip the test
264    if not test.is_gpu_available(cuda_only=True):
265      return
266
267    large_shapes = [[1000000, 31, 3], [3, 1000000, 31], [3, 31, 1000000],
268                    [10000, 310, 3], [3, 10000, 310], [3, 310, 10000],
269                    [2, 1000, 1000], [1000, 2, 1000], [1000, 1000, 2]]
270    perms = [[0, 2, 1]] * 9
271
272    for input_shape, perm in zip(large_shapes, perms):
273      with self.subTest(input_shape=input_shape, perm=perm):
274        total_size = np.prod(input_shape)
275        inp = np.arange(
276            1, total_size + 1, dtype=np.float32).reshape(input_shape)
277        np_ans = self._np_transpose(inp, perm)
278        with self.cached_session():
279          inx = ops.convert_to_tensor(inp)
280          y = array_ops.transpose(inx, perm)
281          tf_ans = self.evaluate(y)
282        self.assertAllEqual(np_ans, tf_ans)
283        self.assertShapeEqual(np_ans, y)
284
285  def testRandomizedSmallDimLargeSizeGPU(self):
286    # If no GPU available, skip the test
287    if not test.is_gpu_available(cuda_only=True):
288      return
289
290    # Draw 10 random shapes with large dimension sizes.
291    # 40% prob to generate dim[0] size within [1, 2047]
292    # 40% prob to generate dim[0] size within [2048, 4095]
293    # 20% prob to generate dim[0] size within [4096, 100000]
294    # 50% prob to use dim[1] as the small dim (<16)
295    num_samples = 10
296    total_size = 500000
297    small_size_limit = 2048
298    large_size_limit = 95905
299    small_size_percentage = 0.4
300    medium_size_percentage = 0.4
301    large_size_percentage = 0.2
302    perms = [[0, 2, 1]] * num_samples
303    dim_zero_sizes = []
304    dim_zero_sizes += list(
305        np.random.randint(
306            small_size_limit, size=int(small_size_percentage * num_samples)) +
307        1)
308    dim_zero_sizes += list(
309        np.random.randint(
310            small_size_limit, size=int(medium_size_percentage * num_samples)) +
311        small_size_limit)
312    dim_zero_sizes += list(
313        np.random.randint(
314            large_size_limit, size=int(large_size_percentage * num_samples)) +
315        small_size_limit * 2)
316    input_shapes = []
317    small_dim_limit = 16
318    for dim_zero_size in dim_zero_sizes:
319      small_dim_size = np.random.randint(small_dim_limit - 1) + 1
320      large_dim_size = int(
321          total_size / dim_zero_size / small_dim_size) + small_dim_limit
322      input_shapes += ([[dim_zero_size, small_dim_size, large_dim_size]]
323                       if np.random.randint(2) else
324                       [[dim_zero_size, large_dim_size, small_dim_size]])
325
326    for input_shape, perm in zip(input_shapes, perms):
327      # generate input data with random ints from 0 to 9.
328      with self.subTest(input_shape=input_shape, perm=perm):
329        inp = np.random.randint(10, size=input_shape)
330        np_ans = self._np_transpose(inp, perm)
331        with self.cached_session():
332          inx = ops.convert_to_tensor(inp)
333          y = array_ops.transpose(inx, perm)
334          tf_ans = self.evaluate(y)
335        self.assertAllEqual(np_ans, tf_ans)
336        self.assertShapeEqual(np_ans, y)
337        self._ClearCachedSession()
338
339  def testNop(self):
340    self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
341
342  def testSimple(self):
343    self._compareCpu(
344        np.arange(0, 8).reshape([2, 4]).astype(np.float32),
345        np.array([1, 0]).astype(np.int32))
346
347  def testPermType(self):
348    for perm_dtype in [np.int64, np.int32]:
349      with self.subTest(perm_dtype=perm_dtype):
350        x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
351        p = np.array([1, 0]).astype(perm_dtype)
352        np_ans = np.copy(x).transpose(p)
353        with self.cached_session():
354          inx = ops.convert_to_tensor(x)
355          inp = constant_op.constant(p)
356          y = array_ops.transpose(inx, inp)
357          tf_ans = self.evaluate(y)
358          self.assertShapeEqual(np_ans, y)
359          self.assertAllEqual(np_ans, tf_ans)
360
361  def testHalf(self):
362    self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
363    self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
364    self._compare(
365        np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
366
367  def testFloat(self):
368    self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
369    self._compare_cpu_gpu(
370        np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
371    self._compare_cpu_gpu(
372        np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
373
374  def testDouble(self):
375    self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
376    self._compare_cpu_gpu(
377        np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
378    self._compare_cpu_gpu(
379        np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
380
381  def testComplex64(self):
382    self._testBoth(np.array(1 + 2j).astype(np.complex64))
383    self._testBoth((1 + 2j) * np.arange(0, 21).astype(np.complex64))
384    self._testBoth(
385        (1 + 2j) * np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
386    self._testBoth(
387        (1 + 2j) * np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
388    self._testBoth(
389        (1 + 2j) *
390        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
391
392  def testComplex128(self):
393    self._testBoth(np.array(1 + 2j).astype(np.complex128))
394    self._testBoth((1 + 2j) * np.arange(0, 21).astype(np.complex128))
395    self._testBoth(
396        (1 + 2j) * np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
397    self._testBoth(
398        (1 + 2j) *
399        np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
400    self._testBoth(
401        (1 + 2j) *
402        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
403
404  def testInt8(self):
405    self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
406    self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
407    self._testBoth(
408        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
409
410  def testInt16(self):
411    self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
412    self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
413    self._testBoth(
414        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
415
416  def testInt32(self):
417    self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
418    self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
419    self._testBoth(
420        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
421
422  def testInt64(self):
423    self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
424    self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
425    self._testBoth(
426        np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
427
428  def testTranspose2DAuto(self):
429    x_np = [[1, 2, 3], [4, 5, 6]]
430    for use_gpu in [False, True]:
431      with self.subTest(use_gpu=use_gpu):
432        with self.cached_session(use_gpu=use_gpu):
433          x_tf = array_ops.transpose(x_np)
434          self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
435
436  def testSingletonDims(self):
437    # A singleton dimension is a dimension i with shape[i] == 1. Such dimensions
438    # can be collapsed and expanded using reshape without changing the
439    # underlying data storage. If all non-singleton dimensions remain in
440    # ascending order, the shuffled singletons will be transposed by a reshape,
441    # saving a memory allocation & copy. Since this gets a special code-path in
442    # transpose_op.cc, we test that the codepath is exercised and the results
443    # are as expected; we do not test that we save the memory allocation and
444    # copy here.
445    for shape in [[2, 1, 2], [2, 1, 2, 1, 1, 2], [1, 2, 2, 1, 1, 1],
446                  [1, 1, 1, 2, 2, 2], [2, 2, 1, 1, 1]]:
447      with self.subTest(shape=shape):
448        self._compare_cpu_gpu(
449            np.arange(np.prod(shape)).reshape(shape).astype(np.float32))
450
451  def testTransposeShapes(self):
452    self.assertEqual([],
453                     array_ops.transpose(
454                         constant_op.constant(1, dtype=dtypes.int32,
455                                              shape=[])).get_shape().dims)
456    self.assertEqual([100],
457                     array_ops.transpose(
458                         constant_op.constant(
459                             1, dtype=dtypes.int32,
460                             shape=[100])).get_shape().dims)
461    self.assertEqual([37, 100],
462                     array_ops.transpose(
463                         constant_op.constant(
464                             1, dtype=dtypes.int32,
465                             shape=[100, 37])).get_shape().dims)
466    self.assertEqual([100, 37],
467                     array_ops.transpose(
468                         constant_op.constant(
469                             1, dtype=dtypes.int32, shape=[100, 37]),
470                         [0, 1]).get_shape().dims)
471    self.assertEqual([15, 37, 100],
472                     array_ops.transpose(
473                         constant_op.constant(
474                             1, dtype=dtypes.int32,
475                             shape=[100, 37, 15])).get_shape().dims)
476    self.assertEqual([15, 100, 37],
477                     array_ops.transpose(
478                         constant_op.constant(
479                             1, dtype=dtypes.int32, shape=[100, 37, 15]),
480                         [2, 0, 1]).get_shape().dims)
481
482  def testTransposeDynamicShapes(self):
483    @def_function.function(input_signature=[
484        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
485    ])
486    def transpose(x):
487      y = array_ops.transpose(x)
488      self.assertEqual(y.shape, tensor_shape.TensorShape(None))
489      return y
490
491    x = constant_op.constant([[1, 2, 3], [4, 5, 6]])  # Shape (2, 3)
492    expected_transpose = constant_op.constant([[1, 4], [2, 5],
493                                               [3, 6]])  # Shape (3, 2)
494    self.assertAllEqual(expected_transpose, transpose(x))
495
496    @def_function.function(input_signature=[
497        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
498        tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
499    ])
500    def transpose_with_perm(x, perm):
501      y = array_ops.transpose(x, perm)
502      self.assertEqual(y.shape, tensor_shape.TensorShape(None))
503      return y
504
505    self.assertAllEqual(x, transpose_with_perm(x, [0, 1]))
506
507  def testNullTensor(self):
508    with self.cached_session():
509      x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
510      xt = array_ops.transpose(x, [0, 2, 1])
511      self.assertAllEqual(xt.shape, (1, 0, 4))
512
513  def testScalar(self):
514    with self.cached_session():
515      x = constant_op.constant(42, dtype=dtypes.float32, shape=[])
516      xt = array_ops.transpose(x)
517      self.assertAllEqual(xt, x)
518
519  def _testError(self, x, p, err):
520    with self.cached_session():
521      with self.assertRaisesOpError(err):
522        self.evaluate(array_ops.transpose(x, p))
523
524  def testError(self):
525    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
526                                r"must be rank 1"):
527      array_ops.transpose(
528          np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
529    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
530                                r"3 is out of range"):
531      array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
532    self._testError(
533        np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
534
535
536if __name__ == "__main__":
537  test.main()
538