• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/cc/framework/grad_op_registry.h"
17 #include "tensorflow/cc/framework/gradient_checker.h"
18 #include "tensorflow/cc/framework/testutil.h"
19 #include "tensorflow/cc/gradients/grad_testutil.h"
20 #include "tensorflow/cc/ops/nn_ops_internal.h"
21 #include "tensorflow/cc/ops/standard_ops.h"
22 #include "tensorflow/core/framework/tensor_testutil.h"
23 #include "tensorflow/core/lib/core/status_test_util.h"
24 #include "tensorflow/core/lib/random/random.h"
25 
26 namespace tensorflow {
27 namespace {
28 
29 using ops::AvgPool;
30 using ops::AvgPool3D;
31 using ops::BiasAdd;
32 using ops::Conv2D;
33 using ops::Elu;
34 using ops::FractionalAvgPool;
35 using ops::FractionalMaxPool;
36 using ops::L2Loss;
37 using ops::LogSoftmax;
38 using ops::LRN;
39 using ops::MaxPool;
40 using ops::MaxPool3D;
41 using ops::MaxPoolV2;
42 using ops::Placeholder;
43 using ops::Relu;
44 using ops::Relu6;
45 using ops::Selu;
46 using ops::Softmax;
47 using ops::Softplus;
48 using ops::Softsign;
49 
50 class NNGradTest : public ::testing::Test {
51  protected:
NNGradTest()52   NNGradTest() : scope_(Scope::NewRootScope()) {}
53 
RunTest(const Output & x,const TensorShape & x_shape,const Output & y,const TensorShape & y_shape)54   void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
55                const TensorShape& y_shape) {
56     float max_error;
57     TF_ASSERT_OK((ComputeGradientError<float, float, float>(
58         scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
59     EXPECT_LT(max_error, 1e-3);
60   }
61 
RunTest(const Output & x,const Tensor & x_init_value,const Output & y,const TensorShape & y_shape)62   void RunTest(const Output& x, const Tensor& x_init_value, const Output& y,
63                const TensorShape& y_shape) {
64     float max_error;
65     TF_ASSERT_OK((ComputeGradientError<float, float, float>(
66         scope_, x, x_init_value, y, y_shape, &max_error)));
67     EXPECT_LT(max_error, 1e-3);
68   }
69 
RunTest(const OutputList & xs,const std::vector<TensorShape> & x_shapes,const OutputList & ys,const std::vector<TensorShape> & y_shapes)70   void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
71                const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
72     TF_ASSERT_OK(scope_.status());
73     float max_error;
74     TF_ASSERT_OK((ComputeGradientError<float, float, float>(
75         scope_, xs, x_shapes, ys, y_shapes, &max_error)));
76     EXPECT_LT(max_error, 1e-3);
77   }
78 
79   // Sets tensor with random values, ensuring that every pair of elements are at
80   // least a reasonable amount apart.
81   // This is an issue for max pooling operations, in which perturbations by the
82   // numeric gradient computation in the gradient checker can change the max
83   // value if a pool has values that are too close together.
84   template <typename T>
SetRandomValuesForMaxPooling(Tensor * tensor)85   void SetRandomValuesForMaxPooling(Tensor* tensor) {
86     auto tensor_flat = tensor->flat<T>();
87     // First set the array to an increasing sequence of values spaced
88     // a reasonable amount apart
89     T cur = 0;
90     for (size_t i = 0; i < tensor->NumElements(); i++) {
91       tensor_flat(i) = cur;
92       cur += 5e-2;
93     }
94     // Fischer-Yates shuffle the array
95     for (size_t i = tensor->NumElements() - 1; i >= 1; i--) {
96       // j <- random integer 0 <= j <= i
97       size_t j = random::New64() % (i + 1);
98       // swap values at i, j
99       T tmp = tensor_flat(i);
100       tensor_flat(i) = tensor_flat(j);
101       tensor_flat(j) = tmp;
102     }
103   }
104 
105   Scope scope_;
106 };
107 
TEST_F(NNGradTest,SoftmaxGrad)108 TEST_F(NNGradTest, SoftmaxGrad) {
109   TensorShape shape({32, 10});
110   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
111   auto y = Softmax(scope_, x);
112   RunTest(x, shape, y, shape);
113 }
114 
TEST_F(NNGradTest,SoftmaxCrossEntropyWithLogitsGrad)115 TEST_F(NNGradTest, SoftmaxCrossEntropyWithLogitsGrad) {
116   TensorShape logits_shape({5, 3});
117   TensorShape loss_shape({5});
118 
119   auto logits = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
120   auto labels = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
121   auto y =
122       tensorflow::ops::SoftmaxCrossEntropyWithLogits(scope_, logits, labels);
123   // Note the reversal of the backprop and loss orders. Issue #18734 has been
124   // opened for this.
125   RunTest({logits, labels}, {logits_shape, logits_shape}, {y.backprop, y.loss},
126           {logits_shape, loss_shape});
127 }
128 
TEST_F(NNGradTest,LogSoftmaxGrad)129 TEST_F(NNGradTest, LogSoftmaxGrad) {
130   TensorShape shape({5, 3});
131   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
132   auto y = LogSoftmax(scope_, x);
133   // Avoid numerical instability when computing finite differences.
134   Tensor x_init_value =
135       test::AsTensor<float>({-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f,
136                              0.5f, 0.7f, 0.8f, -0.1f, 0.1f, 0.1f, 0.1f, 1.2f},
137                             {5, 3});
138   RunTest(x, x_init_value, y, shape);
139 }
140 
TEST_F(NNGradTest,ReluGrad)141 TEST_F(NNGradTest, ReluGrad) {
142   TensorShape shape({5, 2});
143   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
144   auto y = Relu(scope_, x);
145   // Avoid input values where ReLU gradient is not well defined (around zero).
146   Tensor x_init_value = test::AsTensor<float>(
147       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
148       {5, 2});
149   RunTest(x, x_init_value, y, shape);
150 }
151 
TEST_F(NNGradTest,Relu6Grad)152 TEST_F(NNGradTest, Relu6Grad) {
153   TensorShape shape({5, 2});
154   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
155   auto y = Relu6(scope_, x);
156   // Avoid input values where ReLU gradient is not well defined (around zero
157   // and six).
158   Tensor x_init_value = test::AsTensor<float>(
159       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 6.1f, 6.3f, 6.5f, 6.7f, 6.9f},
160       {5, 2});
161   RunTest(x, x_init_value, y, shape);
162 }
163 
TEST_F(NNGradTest,LeakyReluGrad)164 TEST_F(NNGradTest, LeakyReluGrad) {
165   TensorShape shape({5, 2});
166   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
167   auto y = ops::internal::LeakyRelu(scope_, x);
168   // Avoid input values where Leaky ReLU gradient is not well defined (around
169   // zero).
170   Tensor x_init_value = test::AsTensor<float>(
171       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
172       {5, 2});
173   RunTest(x, x_init_value, y, shape);
174 }
175 
TEST_F(NNGradTest,LeakyReluGradGrad)176 TEST_F(NNGradTest, LeakyReluGradGrad) {
177   TensorShape shape({5, 2});
178   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
179   // Avoid input values where Leaky ReLU gradient is not well defined (around
180   // zero).
181   Tensor x_init_value = test::AsTensor<float>(
182       {2.3f, 1.9f, 1.5f, 1.1f, 0.7f, 0.3f, -0.1f, -0.5f, -0.9f, -1.3f}, {5, 2});
183   Tensor features = test::AsTensor<float>(
184       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
185       {5, 2});
186   auto y = ops::internal::LeakyReluGrad(scope_, x, features);
187   RunTest(x, x_init_value, y, shape);
188 }
189 
TEST_F(NNGradTest,EluGrad)190 TEST_F(NNGradTest, EluGrad) {
191   TensorShape shape({5, 2});
192   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
193   auto y = Elu(scope_, x);
194   Tensor x_init_value = test::AsTensor<float>(
195       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
196       {5, 2});
197   RunTest(x, x_init_value, y, shape);
198 }
199 
TEST_F(NNGradTest,SeluGrad)200 TEST_F(NNGradTest, SeluGrad) {
201   TensorShape shape({5, 2});
202   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
203   auto y = Selu(scope_, x);
204   Tensor x_init_value = test::AsTensor<float>(
205       {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
206       {5, 2});
207   RunTest(x, x_init_value, y, shape);
208 }
209 
TEST_F(NNGradTest,L2LossGrad)210 TEST_F(NNGradTest, L2LossGrad) {
211   TensorShape x_shape({5, 2});
212   TensorShape y_shape({1});
213   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
214   auto y = L2Loss(scope_, x);
215   RunTest(x, x_shape, y, y_shape);
216 }
217 
TEST_F(NNGradTest,BiasAddGradHelper)218 TEST_F(NNGradTest, BiasAddGradHelper) {
219   TensorShape shape({4, 5});
220   TensorShape bias_shape({5});
221   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
222   auto bias = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(bias_shape));
223   auto y = BiasAdd(scope_, x, bias);
224   RunTest({x, bias}, {shape, bias_shape}, {y}, {shape});
225 }
226 
TEST_F(NNGradTest,Conv2DGrad)227 TEST_F(NNGradTest, Conv2DGrad) {
228   TensorShape shape({1, 2, 2, 1});
229   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
230   Tensor filter = test::AsTensor<float>({0.5f}, {1, 1, 1, 1});
231   const std::vector<int> strides{1, 1, 1, 1};
232   auto y = Conv2D(scope_, x, filter, strides, "SAME");
233   RunTest(x, shape, y, shape);
234 }
235 
TEST_F(NNGradTest,MaxPoolGradHelper)236 TEST_F(NNGradTest, MaxPoolGradHelper) {
237   TensorShape x_shape({1, 2, 2, 1});
238   TensorShape y_shape({1, 1, 1, 1});
239   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
240   // Setup window and strides so that we only do one MaxPool.
241   const std::vector<int> ksize{1, 2, 2, 1};
242   const std::vector<int> strides{1, 2, 2, 1};
243   auto y = MaxPool(scope_, x, ksize, strides, "VALID");
244   Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
245   SetRandomValuesForMaxPooling<float>(&x_init_value);
246   RunTest(x, x_init_value, y, y_shape);
247 }
248 
TEST_F(NNGradTest,MaxPoolGradV2Helper)249 TEST_F(NNGradTest, MaxPoolGradV2Helper) {
250   TensorShape x_shape({1, 2, 2, 1});
251   TensorShape y_shape({1, 1, 1, 1});
252   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
253   // Setup window and strides so that we only do one MaxPool.
254   Tensor ksize = test::AsTensor<int>({1, 2, 2, 1}, {4});
255   Tensor strides = test::AsTensor<int>({1, 2, 2, 1}, {4});
256   auto y = MaxPoolV2(scope_, x, ksize, strides, "VALID");
257   Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
258   SetRandomValuesForMaxPooling<float>(&x_init_value);
259   RunTest(x, x_init_value, y, y_shape);
260 }
261 
TEST_F(NNGradTest,MaxPool3DGradHelper)262 TEST_F(NNGradTest, MaxPool3DGradHelper) {
263   TensorShape x_shape({1, 3, 3, 3, 1});
264   TensorShape y_shape({1, 1, 1, 1, 1});
265   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
266   // Setup window and strides so that we only do one MaxPool3D.
267   const std::vector<int> ksize{1, 3, 3, 3, 1};
268   const std::vector<int> strides{1, 3, 3, 3, 1};
269   auto y = MaxPool3D(scope_, x, ksize, strides, "VALID");
270   Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
271   SetRandomValuesForMaxPooling<float>(&x_init_value);
272   RunTest(x, x_init_value, y, y_shape);
273 }
274 
TEST_F(NNGradTest,AvgPoolGradHelper)275 TEST_F(NNGradTest, AvgPoolGradHelper) {
276   TensorShape x_shape({1, 2, 2, 1});
277   TensorShape y_shape({1, 1, 1, 1});
278   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
279   // Setup window and strides so that we only do one AvgPool.
280   const std::vector<int> ksize{1, 2, 2, 1};
281   const std::vector<int> strides{1, 2, 2, 1};
282   auto y = AvgPool(scope_, x, ksize, strides, "SAME");
283   RunTest(x, x_shape, y, y_shape);
284 }
285 
TEST_F(NNGradTest,AvgPool3DGradHelper)286 TEST_F(NNGradTest, AvgPool3DGradHelper) {
287   TensorShape x_shape({1, 3, 3, 3, 1});
288   TensorShape y_shape({1, 1, 1, 1, 1});
289   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
290   // Setup window and strides so that we only do one AvgPool3D.
291   const std::vector<int> ksize{1, 3, 3, 3, 1};
292   const std::vector<int> strides{1, 3, 3, 3, 1};
293   auto y = AvgPool3D(scope_, x, ksize, strides, "SAME");
294   RunTest(x, x_shape, y, y_shape);
295 }
296 
TEST_F(NNGradTest,LRN)297 TEST_F(NNGradTest, LRN) {
298   TensorShape x_shape({1, 1, 2, 1});
299   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
300   auto y = LRN(scope_, x);
301   RunTest(x, x_shape, y, x_shape);
302 }
303 
TEST_F(NNGradTest,SoftplusGrad)304 TEST_F(NNGradTest, SoftplusGrad) {
305   TensorShape shape({3, 7});
306   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
307   auto y = Softplus(scope_, x);
308   RunTest(x, shape, y, shape);
309 }
310 
TEST_F(NNGradTest,SoftsignGrad)311 TEST_F(NNGradTest, SoftsignGrad) {
312   TensorShape shape({3, 7});
313   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
314   auto y = Softsign(scope_, x);
315   RunTest(x, shape, y, shape);
316 }
317 
TEST_F(NNGradTest,FractionalAvgPoolGradHelper)318 TEST_F(NNGradTest, FractionalAvgPoolGradHelper) {
319   TensorShape x_shape({1, 3, 7, 1});
320   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
321   // Force consistent pooling regions for unit testing.
322   auto y = FractionalAvgPool(
323       scope_, x, {1, 1.2, 1.9, 1},
324       FractionalAvgPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
325           2));
326   TensorShape y_shape({1, 2, 3, 1});
327   RunTest(x, x_shape, y.output, y_shape);
328 }
329 
TEST_F(NNGradTest,FractionalMaxPoolGradHelper)330 TEST_F(NNGradTest, FractionalMaxPoolGradHelper) {
331   TensorShape x_shape({1, 3, 7, 1});
332   auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
333   // Force consistent pooling regions for unit testing.
334   auto y = FractionalMaxPool(
335       scope_, x, {1, 1.2, 1.9, 1},
336       FractionalMaxPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
337           2));
338   Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
339   SetRandomValuesForMaxPooling<float>(&x_init_value);
340   TensorShape y_shape({1, 2, 3, 1});
341   RunTest(x, x_init_value, y.output, y_shape);
342 }
343 
344 }  // namespace
345 }  // namespace tensorflow
346