• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for tensorflow.ops.linalg_ops.eig."""
16
17import numpy as np
18
19from tensorflow.python.framework import constant_op
20from tensorflow.python.framework import dtypes as dtypes_lib
21from tensorflow.python.framework import errors
22from tensorflow.python.framework import test_util
23from tensorflow.python.ops import array_ops
24from tensorflow.python.ops import gen_linalg_ops
25from tensorflow.python.ops import gradient_checker_v2
26from tensorflow.python.ops import linalg_ops
27from tensorflow.python.ops import math_ops
28from tensorflow.python.ops import random_ops
29from tensorflow.python.ops import sort_ops
30from tensorflow.python.platform import test
31
32
33def _AddTest(test_class, op_name, testcase_name, fn):
34  test_name = "_".join(["test", op_name, testcase_name])
35  if hasattr(test_class, test_name):
36    raise RuntimeError("Test %s defined more than once" % test_name)
37  setattr(test_class, test_name, fn)
38
39
40class EigTest(test.TestCase):
41
42  @test_util.run_deprecated_v1
43  def testWrongDimensions(self):
44    # The input to self_adjoint_eig should be a tensor of
45    # at least rank 2.
46    scalar = constant_op.constant(1.)
47    with self.assertRaises(ValueError):
48      linalg_ops.eig(scalar)
49    vector = constant_op.constant([1., 2.])
50    with self.assertRaises(ValueError):
51      linalg_ops.eig(vector)
52
53  @test_util.run_deprecated_v1
54  def testConcurrentExecutesWithoutError(self):
55    all_ops = []
56    with self.session():
57      for compute_v_ in True, False:
58        matrix1 = random_ops.random_normal([5, 5], seed=42)
59        matrix2 = random_ops.random_normal([5, 5], seed=42)
60        if compute_v_:
61          e1, v1 = linalg_ops.eig(matrix1)
62          e2, v2 = linalg_ops.eig(matrix2)
63          all_ops += [e1, v1, e2, v2]
64        else:
65          e1 = linalg_ops.eigvals(matrix1)
66          e2 = linalg_ops.eigvals(matrix2)
67          all_ops += [e1, e2]
68      val = self.evaluate(all_ops)
69      self.assertAllEqual(val[0], val[2])
70      # The algorithm is slightly different for compute_v being True and False,
71      # so require approximate equality only here.
72      self.assertAllClose(val[2], val[4])
73      self.assertAllEqual(val[4], val[5])
74      self.assertAllEqual(val[1], val[3])
75
76  def testMatrixThatFailsWhenFlushingDenormsToZero(self):
77    # Test a 32x32 matrix which is known to fail if denorm floats are flushed to
78    # zero.
79    matrix = np.genfromtxt(
80        test.test_src_dir_path(
81            "python/kernel_tests/linalg/testdata/"
82            "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
83    self.assertEqual(matrix.shape, (32, 32))
84    matrix_tensor = constant_op.constant(matrix)
85    with self.session() as _:
86      (e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
87      self.assertEqual(e.size, 32)
88      self.assertAllClose(
89          np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
90      self.assertAllClose(matrix,
91                          np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
92
93  def testMismatchedDtypes(self):
94    tensor = constant_op.constant([[0, 1], [2, 3]], dtype=dtypes_lib.float32)
95    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
96                                "Invalid output dtype"):
97      self.evaluate(
98          gen_linalg_ops.eig(
99              input=tensor,
100              Tout=dtypes_lib.complex128,  # Expected dtype: complex64.
101              compute_v=True))
102
103
104def SortEigenValues(e):
105  perm = np.argsort(e.real + e.imag, -1)
106  return np.take(e, perm, -1)
107
108
109def SortEigenDecomposition(e, v):
110  if v.ndim < 2:
111    return e, v
112  perm = np.argsort(e.real + e.imag, -1)
113  return np.take(e, perm, -1), np.take(v, perm, -1)
114
115
116def EquilibrateEigenVectorPhases(x, y):
117  """Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.
118
119  Eigenvectors are only unique up to an arbitrary phase. This function rotates x
120  such that it matches y. Precondition: The columns of x and y differ by a
121  multiplicative complex phase factor only.
122
123  Args:
124    x: `np.ndarray` with Eigenvectors
125    y: `np.ndarray` with Eigenvectors
126
127  Returns:
128    `np.ndarray` containing an equilibrated version of x.
129  """
130  phases = np.sum(np.conj(x) * y, -2, keepdims=True)
131  phases /= np.abs(phases)
132  return phases * x
133
134
135def _GetEigTest(dtype_, shape_, compute_v_):
136
137  def CompareEigenVectors(self, x, y, tol):
138    x = EquilibrateEigenVectorPhases(x, y)
139    self.assertAllClose(x, y, atol=tol)
140
141  def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
142    num_batches = int(np.prod(x_e.shape[:-1]))
143    n = x_e.shape[-1]
144    x_e = np.reshape(x_e, [num_batches] + [n])
145    x_v = np.reshape(x_v, [num_batches] + [n, n])
146    y_e = np.reshape(y_e, [num_batches] + [n])
147    y_v = np.reshape(y_v, [num_batches] + [n, n])
148    for i in range(num_batches):
149      x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
150      y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
151      self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
152      CompareEigenVectors(self, x_vi, y_vi, tol)
153
154  def Test(self):
155    np.random.seed(1)
156    n = shape_[-1]
157    batch_shape = shape_[:-2]
158    np_dtype = dtype_.as_numpy_dtype
159
160    def RandomInput():
161      # Most matrices are diagonalizable
162      a = np.random.uniform(
163          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
164      if dtype_.is_complex:
165        a += 1j * np.random.uniform(
166            low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
167      a = np.tile(a, batch_shape + (1, 1))
168      return a
169
170    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
171      atol = 1e-4
172    else:
173      atol = 1e-12
174
175    a = RandomInput()
176    np_e, np_v = np.linalg.eig(a)
177    with self.session():
178      if compute_v_:
179        tf_e, tf_v = linalg_ops.eig(constant_op.constant(a))
180
181        # Check that V*diag(E)*V^(-1) is close to A.
182        a_ev = math_ops.matmul(
183            math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
184            linalg_ops.matrix_inverse(tf_v))
185        self.assertAllClose(self.evaluate(a_ev), a, atol=atol)
186
187        # Compare to numpy.linalg.eig.
188        CompareEigenDecompositions(self, np_e, np_v, self.evaluate(tf_e),
189                                   self.evaluate(tf_v), atol)
190      else:
191        tf_e = linalg_ops.eigvals(constant_op.constant(a))
192        self.assertAllClose(
193            SortEigenValues(np_e),
194            SortEigenValues(self.evaluate(tf_e)),
195            atol=atol)
196
197  return Test
198
199
200class EigGradTest(test.TestCase):
201  pass  # Filled in below
202
203
204def _GetEigGradTest(dtype_, shape_, compute_v_):
205
206  def Test(self):
207    np.random.seed(1)
208    n = shape_[-1]
209    batch_shape = shape_[:-2]
210    np_dtype = dtype_.as_numpy_dtype
211
212    def RandomInput():
213      # Most matrices are diagonalizable
214      a = np.random.uniform(
215          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
216      if dtype_.is_complex:
217        a += 1j * np.random.uniform(
218            low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
219      a = np.tile(a, batch_shape + (1, 1))
220      return a
221
222    # Optimal stepsize for central difference is O(epsilon^{1/3}).
223    epsilon = np.finfo(np_dtype).eps
224    delta = 0.1 * epsilon**(1.0 / 3.0)
225    # tolerance obtained by looking at actual differences using
226    # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
227    # after discarding one random input sample
228    _ = RandomInput()
229    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
230      tol = 1e-2
231    else:
232      tol = 1e-7
233    with self.session():
234
235      def Compute(x):
236        e, v = linalg_ops.eig(x)
237
238        # We sort eigenvalues by e.real+e.imag to have consistent
239        # order between runs
240        b_dims = len(e.shape) - 1
241        idx = sort_ops.argsort(math_ops.real(e) + math_ops.imag(e), axis=-1)
242        e = array_ops.gather(e, idx, batch_dims=b_dims)
243        v = array_ops.gather(v, idx, batch_dims=b_dims)
244
245        # (complex) Eigenvectors are only unique up to an arbitrary phase
246        # We normalize the vectors such that the first component has phase 0.
247        top_rows = v[..., 0:1, :]
248        angle = -math_ops.angle(top_rows)
249        phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
250        v *= phase
251        return e, v
252
253      if compute_v_:
254        funcs = [lambda x: Compute(x)[0], lambda x: Compute(x)[1]]
255      else:
256        funcs = [linalg_ops.eigvals]
257
258      for f in funcs:
259        theoretical, numerical = gradient_checker_v2.compute_gradient(
260            f, [RandomInput()], delta=delta)
261        self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
262
263  return Test
264
265
266if __name__ == "__main__":
267  dtypes_to_test = [
268      dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
269      dtypes_lib.complex128
270  ]
271  for compute_v in True, False:
272    for dtype in dtypes_to_test:
273      for size in 1, 2, 5, 10:
274        for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
275          shape = batch_dims + (size, size)
276          name = "%s_%s_%s" % (dtype.name, "_".join(map(str, shape)), compute_v)
277          _AddTest(EigTest, "Eig", name, _GetEigTest(dtype, shape, compute_v))
278
279          if dtype not in [dtypes_lib.float32, dtypes_lib.float64]:
280            _AddTest(EigGradTest, "EigGrad", name,
281                     _GetEigGradTest(dtype, shape, compute_v))
282  test.main()
283