• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/cc/framework/gradient_checker.h"
17 #include "tensorflow/cc/framework/grad_op_registry.h"
18 #include "tensorflow/cc/framework/testutil.h"
19 #include "tensorflow/cc/ops/standard_ops.h"
20 #include "tensorflow/core/framework/node_def_util.h"
21 #include "tensorflow/core/framework/tensor_testutil.h"
22 #include "tensorflow/core/lib/core/status_test_util.h"
23 #include "tensorflow/core/platform/test.h"
24 #include "tensorflow/core/util/equal_graph_def.h"
25 
26 namespace tensorflow {
27 namespace {
28 
29 using ops::Complex;
30 using ops::Const;
31 using ops::Div;
32 using ops::MatMul;
33 using ops::Placeholder;
34 using ops::Real;
35 using ops::Split;
36 using ops::Square;
37 using ops::Stack;
38 using ops::Sub;
39 using ops::Unstack;
40 
TEST(GradientCheckerTest,BasicFloat)41 TEST(GradientCheckerTest, BasicFloat) {
42   Scope scope = Scope::NewRootScope();
43   TensorShape shape({2, 4, 3});
44   auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
45   auto y = Square(scope, x);
46   float max_error;
47   TF_ASSERT_OK((ComputeGradientError<float, float, float>(
48       scope, {x}, {shape}, {y}, {shape}, &max_error)));
49   EXPECT_LT(max_error, 1e-4);
50 }
51 
TEST(GradientCheckerTest,BasicDouble)52 TEST(GradientCheckerTest, BasicDouble) {
53   Scope scope = Scope::NewRootScope();
54   TensorShape shape({2, 4, 3});
55   auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape));
56   auto y = Square(scope, x);
57   double max_error;
58   TF_ASSERT_OK((ComputeGradientError<double, double, double>(
59       scope, {x}, {shape}, {y}, {shape}, &max_error)));
60   EXPECT_LT(max_error, 1e-10);
61 }
62 
TEST(GradientCheckerTest,BasicComplex64)63 TEST(GradientCheckerTest, BasicComplex64) {
64   Scope scope = Scope::NewRootScope();
65   TensorShape shape({2, 4, 3});
66   auto x = Placeholder(scope, DT_COMPLEX64, Placeholder::Shape(shape));
67   auto y = Square(scope, x);
68   float max_error;
69   TF_ASSERT_OK((ComputeGradientError<complex64, complex64, float>(
70       scope, {x}, {shape}, {y}, {shape}, &max_error)));
71   EXPECT_LT(max_error, 1e-4);
72 }
73 
TEST(GradientCheckerTest,BasicComplex128)74 TEST(GradientCheckerTest, BasicComplex128) {
75   Scope scope = Scope::NewRootScope();
76   TensorShape shape({2, 4, 3});
77   auto x = Placeholder(scope, DT_COMPLEX128, Placeholder::Shape(shape));
78   auto y = Square(scope, x);
79   double max_error;
80   TF_ASSERT_OK((ComputeGradientError<complex128, complex128, double>(
81       scope, {x}, {shape}, {y}, {shape}, &max_error)));
82   EXPECT_LT(max_error, 1e-10);
83 }
84 
TEST(GradientCheckerTest,FloatToComplex64)85 TEST(GradientCheckerTest, FloatToComplex64) {
86   // Test an op whose inputs are real and outputs are complex
87   Scope scope = Scope::NewRootScope();
88   TensorShape shape({2, 4, 3});
89   auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
90   auto y = Complex(scope, x, x);
91   float max_error;
92   TF_ASSERT_OK((ComputeGradientError<float, complex64, float>(
93       scope, {x}, {shape}, {y}, {shape}, &max_error)));
94   EXPECT_LT(max_error, 1e-4);
95 }
96 
TEST(GradientCheckerTest,Complex64ToFloat)97 TEST(GradientCheckerTest, Complex64ToFloat) {
98   // Test an op whose inputs are complex and outputs are real
99   Scope scope = Scope::NewRootScope();
100   TensorShape shape({2, 4, 3});
101   auto x = Placeholder(scope, DT_COMPLEX64, Placeholder::Shape(shape));
102   auto y = Real(scope, x);
103   float max_error;
104   TF_ASSERT_OK((ComputeGradientError<complex64, float, float>(
105       scope, {x}, {shape}, {y}, {shape}, &max_error)));
106   EXPECT_LT(max_error, 1e-4);
107 }
108 
109 // When calculating gradients that are undefined, test we get NaN
110 // as the computed error rather than 0.
TEST(GradientCheckerTest,BasicNan)111 TEST(GradientCheckerTest, BasicNan) {
112   Scope scope = Scope::NewRootScope();
113   TensorShape shape({2, 4, 3});
114   auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
115   // y = x/(x-x) should always return NaN
116   auto y = Div(scope, x, Sub(scope, x, x));
117   float max_error;
118   TF_ASSERT_OK((ComputeGradientError<float, float, float>(
119       scope, {x}, {shape}, {y}, {shape}, &max_error)));
120   EXPECT_TRUE(std::isnan(max_error));
121 }
122 
TEST(GradientCheckerTest,MatMulGrad)123 TEST(GradientCheckerTest, MatMulGrad) {
124   Scope scope = Scope::NewRootScope();
125 
126   TensorShape x_shape({4, 3});
127   TensorShape y_shape({3, 2});
128   TensorShape z_shape({4, 2});
129 
130   auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape));
131   auto y = Const(scope, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, y_shape);
132   auto z = MatMul(scope, x, y);
133   double max_error;
134   TF_ASSERT_OK((ComputeGradientError<double, double, double>(
135       scope, {x}, {x_shape}, {z}, {z_shape}, &max_error)));
136   EXPECT_LT(max_error, 1e-10);
137 }
138 
TEST(GradientCheckerTest,SplitGrad)139 TEST(GradientCheckerTest, SplitGrad) {
140   // Split is an op with single inputs and multiple outputs.
141   Scope scope = Scope::NewRootScope();
142   TensorShape x_shape({5, 2});
143   auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape));
144   // Split along the second dimension.
145   auto split_dim = Const(scope, 1, {});
146   auto y = Split(scope, split_dim, x, /* num_split */ 2);
147   TensorShape y_shape = TensorShape({5, 1});
148   double max_error;
149   TF_ASSERT_OK((ComputeGradientError<double, double, double>(
150       scope, {x}, {x_shape}, y.output, {y_shape, y_shape}, &max_error)));
151   EXPECT_LT(max_error, 1e-10);
152 }
153 
TEST(GradientCheckerTest,StackGrad)154 TEST(GradientCheckerTest, StackGrad) {
155   // Stack is an op with multiple inputs and a single output.
156   Scope scope = Scope::NewRootScope();
157   TensorShape x_shape({1, 2, 3});
158   std::vector<Output> xs;
159   xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape)));
160   xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape)));
161   auto y = Stack(scope, xs, Stack::Axis(0));
162   TensorShape y_shape({2, 1, 2, 3});
163   double max_error;
164   TF_ASSERT_OK((ComputeGradientError<double, double, double>(
165       scope, xs, {x_shape, x_shape}, {y}, {y_shape}, &max_error)));
166   EXPECT_LT(max_error, 1e-10);
167 }
168 
TEST(GradientCheckerTest,StackUnstackGrad)169 TEST(GradientCheckerTest, StackUnstackGrad) {
170   // Chaining a Stack op to an Unstack op allows us to test the gradient checker
171   // in a multiple input/output scenario.
172   Scope scope = Scope::NewRootScope();
173   TensorShape shape({1, 2, 3});
174   std::vector<Output> xs;
175   xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
176   xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
177   auto tmp = Stack(scope, xs, Stack::Axis(0));
178   auto y = Unstack(scope, tmp, 2, Unstack::Axis(0));
179   double max_error;
180   TF_ASSERT_OK((ComputeGradientError<double, double, double>(
181       scope, xs, {shape, shape}, y.output, {shape, shape}, &max_error)));
182   EXPECT_LT(max_error, 1e-10);
183 }
184 
185 }  // namespace
186 }  // namespace tensorflow
187