• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for tensorflow.kernels.logging_ops."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import os
22import string
23import sys
24import tempfile
25
26from tensorflow.python.eager import context
27from tensorflow.python.eager import function
28from tensorflow.python.framework import constant_op
29from tensorflow.python.framework import dtypes
30from tensorflow.python.framework import ops
31from tensorflow.python.framework import sparse_tensor
32from tensorflow.python.framework import test_util
33from tensorflow.python.ops import control_flow_ops
34from tensorflow.python.ops import gradients_impl
35from tensorflow.python.ops import logging_ops
36from tensorflow.python.ops import math_ops
37from tensorflow.python.ops import string_ops
38from tensorflow.python.ops import variables
39from tensorflow.python.platform import test
40
41
42class LoggingOpsTest(test.TestCase):
43
44  @test_util.run_deprecated_v1
45  def testAssertDivideByZero(self):
46    with self.cached_session() as sess:
47      epsilon = ops.convert_to_tensor(1e-20)
48      x = ops.convert_to_tensor(0.0)
49      y = ops.convert_to_tensor(1.0)
50      z = ops.convert_to_tensor(2.0)
51      # assert(epsilon < y)
52      # z / y
53      with sess.graph.control_dependencies([
54          control_flow_ops.Assert(
55              math_ops.less(epsilon, y), ["Divide-by-zero"])
56      ]):
57        out = math_ops.div(z, y)
58      self.assertAllEqual(2.0, self.evaluate(out))
59      # assert(epsilon < x)
60      # z / x
61      #
62      # This tests printing out multiple tensors
63      with sess.graph.control_dependencies([
64          control_flow_ops.Assert(
65              math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
66      ]):
67        out = math_ops.div(z, x)
68      with self.assertRaisesOpError("less than x"):
69        self.evaluate(out)
70
71
72class PrintV2Test(test.TestCase):
73
74  @test_util.run_in_graph_and_eager_modes()
75  def testPrintOneTensor(self):
76    with self.cached_session():
77      tensor = math_ops.range(10)
78      with self.captureWritesToStream(sys.stderr) as printed:
79        print_op = logging_ops.print_v2(tensor)
80        self.evaluate(print_op)
81
82      expected = "[0 1 2 ... 7 8 9]"
83      self.assertTrue((expected + "\n") in printed.contents())
84
85  @test_util.run_in_graph_and_eager_modes()
86  def testPrintOneStringTensor(self):
87    with self.cached_session():
88      tensor = ops.convert_to_tensor([char for char in string.ascii_lowercase])
89      with self.captureWritesToStream(sys.stderr) as printed:
90        print_op = logging_ops.print_v2(tensor)
91        self.evaluate(print_op)
92
93      expected = "[\"a\" \"b\" \"c\" ... \"x\" \"y\" \"z\"]"
94      self.assertIn((expected + "\n"), printed.contents())
95
96  @test_util.run_in_graph_and_eager_modes()
97  def testPrintOneTensorVarySummarize(self):
98    with self.cached_session():
99      tensor = math_ops.range(10)
100      with self.captureWritesToStream(sys.stderr) as printed:
101        print_op = logging_ops.print_v2(tensor, summarize=1)
102        self.evaluate(print_op)
103
104      expected = "[0 ... 9]"
105      self.assertTrue((expected + "\n") in printed.contents())
106
107    with self.cached_session():
108      tensor = math_ops.range(10)
109      with self.captureWritesToStream(sys.stderr) as printed:
110        print_op = logging_ops.print_v2(tensor, summarize=2)
111        self.evaluate(print_op)
112
113      expected = "[0 1 ... 8 9]"
114      self.assertTrue((expected + "\n") in printed.contents())
115
116    with self.cached_session():
117      tensor = math_ops.range(10)
118      with self.captureWritesToStream(sys.stderr) as printed:
119        print_op = logging_ops.print_v2(tensor, summarize=3)
120        self.evaluate(print_op)
121
122      expected = "[0 1 2 ... 7 8 9]"
123      self.assertTrue((expected + "\n") in printed.contents())
124
125    with self.cached_session():
126      tensor = math_ops.range(10)
127      with self.captureWritesToStream(sys.stderr) as printed:
128        print_op = logging_ops.print_v2(tensor, summarize=-1)
129        self.evaluate(print_op)
130
131      expected = "[0 1 2 3 4 5 6 7 8 9]"
132      self.assertTrue((expected + "\n") in printed.contents())
133
134  @test_util.run_in_graph_and_eager_modes()
135  def testPrintOneVariable(self):
136    with self.cached_session():
137      var = variables.Variable(math_ops.range(10))
138      if not context.executing_eagerly():
139        variables.global_variables_initializer().run()
140      with self.captureWritesToStream(sys.stderr) as printed:
141        print_op = logging_ops.print_v2(var)
142        self.evaluate(print_op)
143      expected = "[0 1 2 ... 7 8 9]"
144      self.assertTrue((expected + "\n") in printed.contents())
145
146  @test_util.run_in_graph_and_eager_modes()
147  def testPrintTwoVariablesInStructWithAssignAdd(self):
148    with self.cached_session():
149      var_one = variables.Variable(2.14)
150      plus_one = var_one.assign_add(1.0)
151      var_two = variables.Variable(math_ops.range(10))
152      if not context.executing_eagerly():
153        variables.global_variables_initializer().run()
154      with self.captureWritesToStream(sys.stderr) as printed:
155        self.evaluate(plus_one)
156        print_op = logging_ops.print_v2(var_one, {"second": var_two})
157        self.evaluate(print_op)
158      expected = "3.14 {'second': [0 1 2 ... 7 8 9]}"
159      self.assertTrue((expected + "\n") in printed.contents())
160
161  @test_util.run_in_graph_and_eager_modes()
162  def testPrintTwoTensors(self):
163    with self.cached_session():
164      tensor = math_ops.range(10)
165      with self.captureWritesToStream(sys.stderr) as printed:
166        print_op = logging_ops.print_v2(tensor, tensor * 10)
167        self.evaluate(print_op)
168      expected = "[0 1 2 ... 7 8 9] [0 10 20 ... 70 80 90]"
169      self.assertTrue((expected + "\n") in printed.contents())
170
171  @test_util.run_in_graph_and_eager_modes()
172  def testPrintPlaceholderGeneration(self):
173    with self.cached_session():
174      tensor = math_ops.range(10)
175      with self.captureWritesToStream(sys.stderr) as printed:
176        print_op = logging_ops.print_v2("{}6", {"{}": tensor * 10})
177        self.evaluate(print_op)
178      expected = "{}6 {'{}': [0 10 20 ... 70 80 90]}"
179      self.assertTrue((expected + "\n") in printed.contents())
180
181  @test_util.run_in_graph_and_eager_modes()
182  def testPrintNoTensors(self):
183    with self.cached_session():
184      with self.captureWritesToStream(sys.stderr) as printed:
185        print_op = logging_ops.print_v2(23, [23, 5], {"6": 12})
186        self.evaluate(print_op)
187      expected = "23 [23, 5] {'6': 12}"
188      self.assertTrue((expected + "\n") in printed.contents())
189
190  @test_util.run_in_graph_and_eager_modes()
191  def testPrintFloatScalar(self):
192    with self.cached_session():
193      tensor = ops.convert_to_tensor(434.43)
194      with self.captureWritesToStream(sys.stderr) as printed:
195        print_op = logging_ops.print_v2(tensor)
196        self.evaluate(print_op)
197      expected = "434.43"
198      self.assertTrue((expected + "\n") in printed.contents())
199
200  @test_util.run_in_graph_and_eager_modes()
201  def testPrintStringScalar(self):
202    with self.cached_session():
203      tensor = ops.convert_to_tensor("scalar")
204      with self.captureWritesToStream(sys.stderr) as printed:
205        print_op = logging_ops.print_v2(tensor)
206        self.evaluate(print_op)
207      expected = "scalar"
208      self.assertTrue((expected + "\n") in printed.contents())
209
210  @test_util.run_in_graph_and_eager_modes()
211  def testPrintComplexTensorStruct(self):
212    with self.cached_session():
213      tensor = math_ops.range(10)
214      small_tensor = constant_op.constant([0.3, 12.4, -16.1])
215      big_tensor = math_ops.mul(tensor, 10)
216      with self.captureWritesToStream(sys.stderr) as printed:
217        print_op = logging_ops.print_v2(
218            "first:", tensor, "middle:",
219            {"small": small_tensor, "Big": big_tensor}, 10,
220            [tensor * 2, tensor])
221        self.evaluate(print_op)
222      # Note that the keys in the dict will always be sorted,
223      # so 'Big' comes before 'small'
224      expected = ("first: [0 1 2 ... 7 8 9] "
225                  "middle: {'Big': [0 10 20 ... 70 80 90], "
226                  "'small': [0.3 12.4 -16.1]} "
227                  "10 [[0 2 4 ... 14 16 18], [0 1 2 ... 7 8 9]]")
228      self.assertTrue((expected + "\n") in printed.contents())
229
230  @test_util.run_in_graph_and_eager_modes()
231  def testPrintSparseTensor(self):
232    with self.cached_session():
233      ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
234      val = [0, 10, 13, 4, 14, 32, 33]
235      shape = [5, 6]
236
237      sparse = sparse_tensor.SparseTensor(
238          constant_op.constant(ind, dtypes.int64),
239          constant_op.constant(val, dtypes.int64),
240          constant_op.constant(shape, dtypes.int64))
241
242      with self.captureWritesToStream(sys.stderr) as printed:
243        print_op = logging_ops.print_v2(sparse)
244        self.evaluate(print_op)
245      expected = ("'SparseTensor(indices=[[0 0]\n"
246                  " [1 0]\n"
247                  " [1 3]\n"
248                  " ...\n"
249                  " [1 4]\n"
250                  " [3 2]\n"
251                  " [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])'")
252      self.assertTrue((expected + "\n") in printed.contents())
253
254  @test_util.run_in_graph_and_eager_modes()
255  def testPrintSparseTensorInDataStruct(self):
256    with self.cached_session():
257      ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
258      val = [0, 10, 13, 4, 14, 32, 33]
259      shape = [5, 6]
260
261      sparse = sparse_tensor.SparseTensor(
262          constant_op.constant(ind, dtypes.int64),
263          constant_op.constant(val, dtypes.int64),
264          constant_op.constant(shape, dtypes.int64))
265
266      with self.captureWritesToStream(sys.stderr) as printed:
267        print_op = logging_ops.print_v2([sparse])
268        self.evaluate(print_op)
269      expected = ("['SparseTensor(indices=[[0 0]\n"
270                  " [1 0]\n"
271                  " [1 3]\n"
272                  " ...\n"
273                  " [1 4]\n"
274                  " [3 2]\n"
275                  " [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])']")
276      self.assertTrue((expected + "\n") in printed.contents())
277
278  @test_util.run_in_graph_and_eager_modes()
279  def testPrintOneTensorStdout(self):
280    with self.cached_session():
281      tensor = math_ops.range(10)
282      with self.captureWritesToStream(sys.stdout) as printed:
283        print_op = logging_ops.print_v2(
284            tensor, output_stream=sys.stdout)
285        self.evaluate(print_op)
286      expected = "[0 1 2 ... 7 8 9]"
287      self.assertTrue((expected + "\n") in printed.contents())
288
289  @test_util.run_in_graph_and_eager_modes()
290  def testPrintTensorsToFile(self):
291    tmpfile_name = tempfile.mktemp(".printv2_test")
292    tensor_0 = math_ops.range(0, 10)
293    print_op_0 = logging_ops.print_v2(tensor_0,
294                                      output_stream="file://"+tmpfile_name)
295    self.evaluate(print_op_0)
296    tensor_1 = math_ops.range(11, 20)
297    print_op_1 = logging_ops.print_v2(tensor_1,
298                                      output_stream="file://"+tmpfile_name)
299    self.evaluate(print_op_1)
300    try:
301      f = open(tmpfile_name, "r")
302      line_0 = f.readline()
303      expected_0 = "[0 1 2 ... 7 8 9]"
304      self.assertTrue(expected_0 in line_0)
305      line_1 = f.readline()
306      expected_1 = "[11 12 13 ... 17 18 19]"
307      self.assertTrue(expected_1 in line_1)
308      f.close()
309      os.remove(tmpfile_name)
310    except IOError as e:
311      self.fail(e)
312
313  @test_util.run_in_graph_and_eager_modes()
314  def testInvalidOutputStreamRaisesError(self):
315    with self.cached_session():
316      tensor = math_ops.range(10)
317      with self.assertRaises(ValueError):
318        print_op = logging_ops.print_v2(
319            tensor, output_stream="unknown")
320        self.evaluate(print_op)
321
322  @test_util.run_deprecated_v1
323  def testPrintOpName(self):
324    with self.cached_session():
325      tensor = math_ops.range(10)
326      print_op = logging_ops.print_v2(tensor, name="print_name")
327      self.assertEqual(print_op.name, "print_name")
328
329  @test_util.run_deprecated_v1
330  def testNoDuplicateFormatOpGraphModeAfterExplicitFormat(self):
331    with self.cached_session():
332      tensor = math_ops.range(10)
333      formatted_string = string_ops.string_format("{}", tensor)
334      print_op = logging_ops.print_v2(formatted_string)
335      self.evaluate(print_op)
336      graph_ops = ops.get_default_graph().get_operations()
337      format_ops = [op for op in graph_ops if op.type == "StringFormat"]
338      # Should be only 1 format_op for graph mode.
339      self.assertEqual(len(format_ops), 1)
340
341  def testPrintOneTensorEagerOnOpCreate(self):
342    with self.cached_session():
343      with context.eager_mode():
344        tensor = math_ops.range(10)
345        expected = "[0 1 2 ... 7 8 9]"
346        with self.captureWritesToStream(sys.stderr) as printed:
347          logging_ops.print_v2(tensor)
348        self.assertTrue((expected + "\n") in printed.contents())
349
350  def testPrintsOrderedInDefun(self):
351    with context.eager_mode():
352
353      @function.defun
354      def prints():
355        logging_ops.print_v2("A")
356        logging_ops.print_v2("B")
357        logging_ops.print_v2("C")
358
359      with self.captureWritesToStream(sys.stderr) as printed:
360        prints()
361      self.assertTrue(("A\nB\nC\n") in printed.contents())
362
363  @test_util.run_in_graph_and_eager_modes()
364  def testPrintInDefunWithoutExplicitEvalOfPrint(self):
365    @function.defun
366    def f():
367      tensor = math_ops.range(10)
368      logging_ops.print_v2(tensor)
369      return tensor
370
371    expected = "[0 1 2 ... 7 8 9]"
372    with self.captureWritesToStream(sys.stderr) as printed_one:
373      x = f()
374      self.evaluate(x)
375    self.assertTrue((expected + "\n") in printed_one.contents())
376
377    # We execute the function again to make sure it doesn't only print on the
378    # first call.
379    with self.captureWritesToStream(sys.stderr) as printed_two:
380      y = f()
381      self.evaluate(y)
382    self.assertTrue((expected + "\n") in printed_two.contents())
383
384
385class PrintGradientTest(test.TestCase):
386
387  @test_util.run_in_graph_and_eager_modes
388  def testPrintShape(self):
389    inp = constant_op.constant(2.0, shape=[100, 32])
390    inp_printed = logging_ops.Print(inp, [inp])
391    self.assertEqual(inp.get_shape(), inp_printed.get_shape())
392
393  def testPrintString(self):
394    inp = constant_op.constant(2.0, shape=[100, 32])
395    inp_printed = logging_ops.Print(inp, ["hello"])
396    self.assertEqual(inp.get_shape(), inp_printed.get_shape())
397
398  @test_util.run_deprecated_v1
399  def testPrintGradient(self):
400    with self.cached_session():
401      inp = constant_op.constant(2.0, shape=[100, 32], name="in")
402      w = constant_op.constant(4.0, shape=[10, 100], name="w")
403      wx = math_ops.matmul(w, inp, name="wx")
404      wx_print = logging_ops.Print(wx, [w, w, w])
405      wx_grad = gradients_impl.gradients(wx, w)[0]
406      wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
407      wxg = self.evaluate(wx_grad)
408      wxpg = self.evaluate(wx_print_grad)
409    self.assertAllEqual(wxg, wxpg)
410
411
412if __name__ == "__main__":
413  test.main()
414