• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import numpy as np
22
23from tensorflow.python.framework import constant_op
24from tensorflow.python.framework import dtypes as dtypes_lib
25from tensorflow.python.framework import test_util
26from tensorflow.python.ops import array_ops
27from tensorflow.python.ops import gradient_checker_v2
28from tensorflow.python.ops import linalg_ops
29from tensorflow.python.ops import math_ops
30from tensorflow.python.ops import random_ops
31from tensorflow.python.platform import test
32
33
34def _AddTest(test_class, op_name, testcase_name, fn):
35  test_name = "_".join(["test", op_name, testcase_name])
36  if hasattr(test_class, test_name):
37    raise RuntimeError("Test %s defined more than once" % test_name)
38  setattr(test_class, test_name, fn)
39
40
41class SelfAdjointEigTest(test.TestCase):
42
43  @test_util.run_deprecated_v1
44  def testWrongDimensions(self):
45    # The input to self_adjoint_eig should be a tensor of
46    # at least rank 2.
47    scalar = constant_op.constant(1.)
48    with self.assertRaises(ValueError):
49      linalg_ops.self_adjoint_eig(scalar)
50    vector = constant_op.constant([1., 2.])
51    with self.assertRaises(ValueError):
52      linalg_ops.self_adjoint_eig(vector)
53
54  @test_util.run_deprecated_v1
55  def testConcurrentExecutesWithoutError(self):
56    all_ops = []
57    with self.session(use_gpu=True) as sess:
58      for compute_v_ in True, False:
59        matrix1 = random_ops.random_normal([5, 5], seed=42)
60        matrix2 = random_ops.random_normal([5, 5], seed=42)
61        if compute_v_:
62          e1, v1 = linalg_ops.self_adjoint_eig(matrix1)
63          e2, v2 = linalg_ops.self_adjoint_eig(matrix2)
64          all_ops += [e1, v1, e2, v2]
65        else:
66          e1 = linalg_ops.self_adjoint_eigvals(matrix1)
67          e2 = linalg_ops.self_adjoint_eigvals(matrix2)
68          all_ops += [e1, e2]
69      val = self.evaluate(all_ops)
70      self.assertAllEqual(val[0], val[2])
71      # The algorithm is slightly different for compute_v being True and False,
72      # so require approximate equality only here.
73      self.assertAllClose(val[2], val[4])
74      self.assertAllEqual(val[4], val[5])
75      self.assertAllEqual(val[1], val[3])
76
77  def testMatrixThatFailsWhenFlushingDenormsToZero(self):
78    # Test a 32x32 matrix which is known to fail if denorm floats are flushed to
79    # zero.
80    matrix = np.genfromtxt(
81        test.test_src_dir_path(
82            "python/kernel_tests/testdata/"
83            "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
84    self.assertEqual(matrix.shape, (32, 32))
85    matrix_tensor = constant_op.constant(matrix)
86    with self.session(use_gpu=True) as sess:
87      (e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
88      self.assertEqual(e.size, 32)
89      self.assertAllClose(
90          np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
91      self.assertAllClose(matrix,
92                          np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
93
94
95def SortEigenDecomposition(e, v):
96  if v.ndim < 2:
97    return e, v
98  else:
99    perm = np.argsort(e, -1)
100    return np.take(e, perm, -1), np.take(v, perm, -1)
101
102
103def EquilibrateEigenVectorPhases(x, y):
104  """Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.
105
106  Eigenvectors are only unique up to an arbitrary phase. This function rotates x
107  such that it matches y. Precondition: The coluns of x and y differ by a
108  multiplicative complex phase factor only.
109
110  Args:
111    x: `np.ndarray` with Eigenvectors
112    y: `np.ndarray` with Eigenvectors
113
114  Returns:
115    `np.ndarray` containing an equilibrated version of x.
116  """
117  phases = np.sum(np.conj(x) * y, -2, keepdims=True)
118  phases /= np.abs(phases)
119  return phases * x
120
121
122def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
123
124  def CompareEigenVectors(self, x, y, tol):
125    x = EquilibrateEigenVectorPhases(x, y)
126    self.assertAllClose(x, y, atol=tol)
127
128  def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
129    num_batches = int(np.prod(x_e.shape[:-1]))
130    n = x_e.shape[-1]
131    x_e = np.reshape(x_e, [num_batches] + [n])
132    x_v = np.reshape(x_v, [num_batches] + [n, n])
133    y_e = np.reshape(y_e, [num_batches] + [n])
134    y_v = np.reshape(y_v, [num_batches] + [n, n])
135    for i in range(num_batches):
136      x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
137      y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
138      self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
139      CompareEigenVectors(self, x_vi, y_vi, tol)
140
141  def Test(self):
142    np.random.seed(1)
143    n = shape_[-1]
144    batch_shape = shape_[:-2]
145    np_dtype = dtype_.as_numpy_dtype
146    a = np.random.uniform(
147        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
148    if dtype_.is_complex:
149      a += 1j * np.random.uniform(
150          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
151    a += np.conj(a.T)
152    a = np.tile(a, batch_shape + (1, 1))
153    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
154      atol = 1e-4
155    else:
156      atol = 1e-12
157    np_e, np_v = np.linalg.eigh(a)
158    with self.session(use_gpu=True):
159      if compute_v_:
160        tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
161
162        # Check that V*diag(E)*V^T is close to A.
163        a_ev = math_ops.matmul(
164            math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
165            tf_v,
166            adjoint_b=True)
167        self.assertAllClose(self.evaluate(a_ev), a, atol=atol)
168
169        # Compare to numpy.linalg.eigh.
170        CompareEigenDecompositions(self, np_e, np_v, self.evaluate(tf_e),
171                                   self.evaluate(tf_v), atol)
172      else:
173        tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
174        self.assertAllClose(
175            np.sort(np_e, -1), np.sort(self.evaluate(tf_e), -1), atol=atol)
176
177  return Test
178
179
180class SelfAdjointEigGradTest(test.TestCase):
181  pass  # Filled in below
182
183
184def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
185
186  def Test(self):
187    np.random.seed(1)
188    n = shape_[-1]
189    batch_shape = shape_[:-2]
190    np_dtype = dtype_.as_numpy_dtype
191
192    def RandomInput():
193      a = np.random.uniform(
194          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
195      if dtype_.is_complex:
196        a += 1j * np.random.uniform(
197            low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
198      a += np.conj(a.T)
199      a = np.tile(a, batch_shape + (1, 1))
200      return a
201
202    # Optimal stepsize for central difference is O(epsilon^{1/3}).
203    epsilon = np.finfo(np_dtype).eps
204    delta = 0.1 * epsilon**(1.0 / 3.0)
205    # tolerance obtained by looking at actual differences using
206    # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
207    # after discarding one random input sample
208    _ = RandomInput()
209    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
210      tol = 1e-2
211    else:
212      tol = 1e-7
213    with self.session(use_gpu=True):
214      def Compute(x):
215        e, v = linalg_ops.self_adjoint_eig(x)
216        # (complex) Eigenvectors are only unique up to an arbitrary phase
217        # We normalize the vectors such that the first component has phase 0.
218        top_rows = v[..., 0:1, :]
219        if dtype_.is_complex:
220          angle = -math_ops.angle(top_rows)
221          phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
222        else:
223          phase = math_ops.sign(top_rows)
224        v *= phase
225        return e, v
226
227      if compute_v_:
228        funcs = [lambda x: Compute(x)[0], lambda x: Compute(x)[1]]
229      else:
230        funcs = [linalg_ops.self_adjoint_eigvals]
231
232      for f in funcs:
233        theoretical, numerical = gradient_checker_v2.compute_gradient(
234            f,
235            [RandomInput()],
236            delta=delta)
237        self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
238
239  return Test
240
241
242if __name__ == "__main__":
243  for compute_v in True, False:
244    for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
245                  dtypes_lib.complex128):
246      for size in 1, 2, 5, 10:
247        for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
248          shape = batch_dims + (size, size)
249          name = "%s_%s_%s" % (dtype.name, "_".join(map(str, shape)), compute_v)
250          _AddTest(SelfAdjointEigTest, "SelfAdjointEig", name,
251                   _GetSelfAdjointEigTest(dtype, shape, compute_v))
252          _AddTest(SelfAdjointEigGradTest, "SelfAdjointEigGrad", name,
253                   _GetSelfAdjointEigGradTest(dtype, shape, compute_v))
254  test.main()
255