• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16"""Operations to emit summaries."""
17
18from __future__ import absolute_import
19from __future__ import division
20from __future__ import print_function
21
22import abc
23import collections
24import functools
25import os
26import re
27import threading
28
29import six
30
31from tensorflow.core.framework import graph_pb2
32from tensorflow.core.framework import summary_pb2
33from tensorflow.core.protobuf import config_pb2
34from tensorflow.python.eager import context
35from tensorflow.python.eager import profiler as _profiler
36from tensorflow.python.framework import constant_op
37from tensorflow.python.framework import dtypes
38from tensorflow.python.framework import ops
39from tensorflow.python.framework import smart_cond
40from tensorflow.python.framework import tensor_util
41from tensorflow.python.ops import array_ops
42from tensorflow.python.ops import control_flow_ops
43from tensorflow.python.ops import gen_resource_variable_ops
44from tensorflow.python.ops import gen_summary_ops
45from tensorflow.python.ops import math_ops
46from tensorflow.python.ops import resource_variable_ops
47from tensorflow.python.ops import summary_op_util
48from tensorflow.python.platform import tf_logging as logging
49from tensorflow.python.training import training_util
50from tensorflow.python.training.tracking import tracking
51from tensorflow.python.util import deprecation
52from tensorflow.python.util import tf_contextlib
53from tensorflow.python.util.tf_export import tf_export
54
55# Name for graph collection of summary writer init ops, which is only exposed
56# as a legacy API for tf.contrib.summary in TF 1.x.
57_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
58
59
60class _SummaryState(threading.local):
61
62  def __init__(self):
63    super(_SummaryState, self).__init__()
64    self.is_recording = None
65    # TODO(slebedev): why a separate flag for DS and is it on by default?
66    self.is_recording_distribution_strategy = True
67    self.writer = None
68    self.step = None
69
70
71_summary_state = _SummaryState()
72
73
74class _SummaryContextManager:
75  """Context manager to implement SummaryWriter.as_default()."""
76  # Note: this is a class so that it's possible to implement `set_as_default()`
77  # simply via `as_default().__enter__()`. We can't do that with @contextmanager
78  # because the `finally` block will be executed when the generator is GCed.
79
80  def __init__(self, writer, step=None):
81    self._writer = writer
82    self._step = step
83    self._old_writer = None
84    self._old_step = None
85
86  def __enter__(self):
87    self._old_writer = _summary_state.writer
88    _summary_state.writer = self._writer
89    if self._step is not None:
90      self._old_step = _summary_state.step
91      _summary_state.step = self._step
92    return self._writer
93
94  def __exit__(self, *exc):
95    # Flushes the summary writer in eager mode or in graph functions, but
96    # not in legacy graph mode (you're on your own there).
97    _summary_state.writer.flush()
98    _summary_state.writer = self._old_writer
99    if self._step is not None:
100      _summary_state.step = self._old_step
101    return False
102
103
104def _should_record_summaries_internal(default_state):
105  """Returns boolean Tensor if summaries should/shouldn't be recorded.
106
107  Now the summary condition is decided by logical "and" of below conditions:
108  First, summary writer must be set. Given this constraint is met,
109  ctx.summary_recording and ctx.summary_recording_distribution_strategy.
110  The former one is usually set by user, and the latter one is controlled
111  by DistributionStrategy (tf.distribute.ReplicaContext).
112
113  Args:
114    default_state: can be True or False. The default summary behavior when
115    summary writer is set and the user does not specify
116    ctx.summary_recording and ctx.summary_recording_distribution_strategy
117    is True.
118  """
119  if _summary_state.writer is None:
120    return constant_op.constant(False)
121
122  if not callable(_summary_state.is_recording):
123    static_cond = tensor_util.constant_value(_summary_state.is_recording)
124    if static_cond is not None and not static_cond:
125      return constant_op.constant(False)
126
127  resolve = lambda x: x() if callable(x) else x
128  cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)
129  cond = resolve(_summary_state.is_recording)
130  if cond is None:
131    cond = default_state
132  return math_ops.logical_and(cond_distributed, cond)
133
134
135@tf_export("summary.should_record_summaries", v1=[])
136def should_record_summaries():
137  """Returns boolean Tensor which is True if summaries will be recorded.
138
139  If no default summary writer is currently registered, this always returns
140  False. Otherwise, this reflects the recording condition has been set via
141  `tf.summary.record_if()` (except that it may return False for some replicas
142  when using `tf.distribute.Strategy`). If no recording condition is active,
143  it defaults to True.
144  """
145  return _should_record_summaries_internal(default_state=True)
146
147
148# Legacy symbol used by tf.contrib.summary.should_record_summaries.
149def _legacy_contrib_should_record_summaries():
150  """Returns boolean Tensor which is true if summaries should be recorded."""
151  return _should_record_summaries_internal(default_state=False)
152
153
154@tf_export("summary.record_if", v1=[])
155@tf_contextlib.contextmanager
156def record_if(condition):
157  """Sets summary recording on or off per the provided boolean value.
158
159  The provided value can be a python boolean, a scalar boolean Tensor, or
160  or a callable providing such a value; if a callable is passed it will be
161  invoked on-demand to determine whether summary writing will occur.  Note that
162  when calling record_if() in an eager mode context, if you intend to provide a
163  varying condition like `step % 100 == 0`, you must wrap this in a
164  callable to avoid immediate eager evaluation of the condition.  In particular,
165  using a callable is the only way to have your condition evaluated as part of
166  the traced body of an @tf.function that is invoked from within the
167  `record_if()` context.
168
169  Args:
170    condition: can be True, False, a bool Tensor, or a callable providing such.
171
172  Yields:
173    Returns a context manager that sets this value on enter and restores the
174    previous value on exit.
175  """
176  old = _summary_state.is_recording
177  try:
178    _summary_state.is_recording = condition
179    yield
180  finally:
181    _summary_state.is_recording = old
182
183
184# TODO(apassos) consider how to handle local step here.
185def record_summaries_every_n_global_steps(n, global_step=None):
186  """Sets the should_record_summaries Tensor to true if global_step % n == 0."""
187  if global_step is None:
188    global_step = training_util.get_or_create_global_step()
189  with ops.device("cpu:0"):
190    should = lambda: math_ops.equal(global_step % n, 0)
191    if not context.executing_eagerly():
192      should = should()
193  return record_if(should)
194
195
196def always_record_summaries():
197  """Sets the should_record_summaries Tensor to always true."""
198  return record_if(True)
199
200
201def never_record_summaries():
202  """Sets the should_record_summaries Tensor to always false."""
203  return record_if(False)
204
205
206@tf_export("summary.experimental.get_step", v1=[])
207def get_step():
208  """Returns the default summary step for the current thread.
209
210  Returns:
211    The step set by `tf.summary.experimental.set_step()` if one has been set,
212    otherwise None.
213  """
214  return _summary_state.step
215
216
217@tf_export("summary.experimental.set_step", v1=[])
218def set_step(step):
219  """Sets the default summary step for the current thread.
220
221  For convenience, this function sets a default value for the `step` parameter
222  used in summary-writing functions elsewhere in the API so that it need not
223  be explicitly passed in every such invocation. The value can be a constant
224  or a variable, and can be retrieved via `tf.summary.experimental.get_step()`.
225
226  Note: when using this with @tf.functions, the step value will be captured at
227  the time the function is traced, so changes to the step outside the function
228  will not be reflected inside the function unless using a `tf.Variable` step.
229
230  Args:
231    step: An `int64`-castable default step value, or None to unset.
232  """
233  _summary_state.step = step
234
235
236@tf_export("summary.SummaryWriter", v1=[])
237@six.add_metaclass(abc.ABCMeta)
238class SummaryWriter(object):
239  """Interface representing a stateful summary writer object."""
240
241  def set_as_default(self, step=None):
242    """Enables this summary writer for the current thread.
243
244    For convenience, if `step` is not None, this function also sets a default
245    value for the `step` parameter used in summary-writing functions elsewhere
246    in the API so that it need not be explicitly passed in every such
247    invocation. The value can be a constant or a variable.
248
249    Note: when setting `step` in a @tf.function, the step value will be
250    captured at the time the function is traced, so changes to the step outside
251    the function will not be reflected inside the function unless using
252    a `tf.Variable` step.
253
254    Args:
255      step: An `int64`-castable default step value, or `None`. When not `None`,
256        the current step is modified to the given value. When `None`, the
257        current step is not modified.
258    """
259    self.as_default(step).__enter__()
260
261  def as_default(self, step=None):
262    """Returns a context manager that enables summary writing.
263
264    For convenience, if `step` is not None, this function also sets a default
265    value for the `step` parameter used in summary-writing functions elsewhere
266    in the API so that it need not be explicitly passed in every such
267    invocation. The value can be a constant or a variable.
268
269    Note: when setting `step` in a @tf.function, the step value will be
270    captured at the time the function is traced, so changes to the step outside
271    the function will not be reflected inside the function unless using
272    a `tf.Variable` step.
273
274    For example, `step` can be used as:
275
276    ```python
277    with writer_a.as_default(step=10):
278      tf.summary.scalar(tag, value)   # Logged to writer_a with step 10
279      with writer_b.as_default(step=20):
280        tf.summary.scalar(tag, value) # Logged to writer_b with step 20
281      tf.summary.scalar(tag, value)   # Logged to writer_a with step 10
282    ```
283
284    Args:
285      step: An `int64`-castable default step value, or `None`. When not `None`,
286        the current step is captured, replaced by a given one, and the original
287        one is restored when the context manager exits. When `None`, the current
288        step is not modified (and not restored when the context manager exits).
289
290    Returns:
291      The context manager.
292    """
293    return _SummaryContextManager(self, step)
294
295  def init(self):
296    """Initializes the summary writer."""
297    raise NotImplementedError()
298
299  def flush(self):
300    """Flushes any buffered data."""
301    raise NotImplementedError()
302
303  def close(self):
304    """Flushes and closes the summary writer."""
305    raise NotImplementedError()
306
307
308class _ResourceSummaryWriter(SummaryWriter):
309  """Implementation of SummaryWriter using a SummaryWriterInterface resource."""
310
311  def __init__(self, create_fn, init_op_fn):
312    self._resource = create_fn()
313    self._init_op = init_op_fn(self._resource)
314    self._closed = False
315    if context.executing_eagerly():
316      self._set_up_resource_deleter()
317    else:
318      ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op)
319
320  # Extension point to be overridden by subclasses to customize deletion.
321
322  def _set_up_resource_deleter(self):
323    self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
324        handle=self._resource, handle_device="cpu:0")
325
326  def set_as_default(self, step=None):
327    """See `SummaryWriter.set_as_default`."""
328    if context.executing_eagerly() and self._closed:
329      raise RuntimeError("SummaryWriter is already closed")
330    super().set_as_default(step)
331
332  def as_default(self, step=None):
333    """See `SummaryWriter.as_default`."""
334    if context.executing_eagerly() and self._closed:
335      raise RuntimeError("SummaryWriter is already closed")
336    return super().as_default(step)
337
338  def init(self):
339    """See `SummaryWriter.init`."""
340    if context.executing_eagerly() and self._closed:
341      raise RuntimeError("SummaryWriter is already closed")
342    return self._init_op
343
344  def flush(self):
345    """See `SummaryWriter.flush`."""
346    if context.executing_eagerly() and self._closed:
347      return
348    with ops.device("cpu:0"):
349      return gen_summary_ops.flush_summary_writer(self._resource)
350
351  def close(self):
352    """See `SummaryWriter.close`."""
353    if context.executing_eagerly() and self._closed:
354      return
355    try:
356      with ops.control_dependencies([self.flush()]):
357        with ops.device("cpu:0"):
358          return gen_summary_ops.close_summary_writer(self._resource)
359    finally:
360      if context.executing_eagerly():
361        self._closed = True
362
363
364class _MultiMetaclass(
365    type(_ResourceSummaryWriter), type(tracking.TrackableResource)):
366  pass
367
368
369class _TrackableResourceSummaryWriter(
370    _ResourceSummaryWriter,
371    tracking.TrackableResource,
372    metaclass=_MultiMetaclass):
373  """A `_ResourceSummaryWriter` subclass that implements `TrackableResource`."""
374
375  def __init__(self, create_fn, init_op_fn):
376    # Resolve multiple inheritance via explicit calls to __init__() on parents.
377    tracking.TrackableResource.__init__(self, device="/CPU:0")
378    self._create_fn = create_fn
379    self._init_op_fn = init_op_fn
380    # Pass .resource_handle into _ResourceSummaryWriter parent class rather than
381    # create_fn, to ensure it accesses the resource handle only through the
382    # cached property so that everything is using a single resource handle.
383    _ResourceSummaryWriter.__init__(
384        self, create_fn=lambda: self.resource_handle, init_op_fn=init_op_fn)
385
386  # Override for TrackableResource implementation.
387  def _create_resource(self):
388    return self._create_fn()
389
390  # Override for TrackableResource implementation.
391  def _initialize(self):
392    return self._init_op_fn(self.resource_handle)
393
394  # Override for TrackableResource implementation.
395  def _destroy_resource(self):
396    gen_resource_variable_ops.destroy_resource_op(
397        self.resource_handle, ignore_lookup_error=True)
398
399  def _set_up_resource_deleter(self):
400    # Override to suppress ResourceSummaryWriter implementation; we don't need
401    # the deleter since TrackableResource.__del__() handles it for us.
402    pass
403
404
405class _LegacyResourceSummaryWriter(SummaryWriter):
406  """Legacy resource-backed SummaryWriter for tf.contrib.summary."""
407
408  def  __init__(self, resource, init_op_fn):
409    self._resource = resource
410    self._init_op_fn = init_op_fn
411    init_op = self.init()
412    if context.executing_eagerly():
413      self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
414          handle=self._resource, handle_device="cpu:0")
415    else:
416      ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op)
417
418  def init(self):
419    """See `SummaryWriter.init`."""
420    return self._init_op_fn(self._resource)
421
422  def flush(self):
423    """See `SummaryWriter.flush`."""
424    with ops.device("cpu:0"):
425      return gen_summary_ops.flush_summary_writer(self._resource)
426
427  def close(self):
428    """See `SummaryWriter.close`."""
429    with ops.control_dependencies([self.flush()]):
430      with ops.device("cpu:0"):
431        return gen_summary_ops.close_summary_writer(self._resource)
432
433
434class _NoopSummaryWriter(SummaryWriter):
435  """A summary writer that does nothing, for create_noop_writer()."""
436
437  def set_as_default(self, step=None):
438    pass
439
440  @tf_contextlib.contextmanager
441  def as_default(self, step=None):
442    yield
443
444  def init(self):
445    pass
446
447  def flush(self):
448    pass
449
450  def close(self):
451    pass
452
453
454@tf_export(v1=["summary.initialize"])
455def initialize(
456    graph=None,  # pylint: disable=redefined-outer-name
457    session=None):
458  """Initializes summary writing for graph execution mode.
459
460  This operation is a no-op when executing eagerly.
461
462  This helper method provides a higher-level alternative to using
463  `tf.contrib.summary.summary_writer_initializer_op` and
464  `tf.contrib.summary.graph`.
465
466  Most users will also want to call `tf.compat.v1.train.create_global_step`
467  which can happen before or after this function is called.
468
469  Args:
470    graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
471      This function will not write the default graph by default. When
472      writing to an event log file, the associated step will be zero.
473    session: So this method can call `tf.Session.run`. This defaults
474      to `tf.compat.v1.get_default_session`.
475
476  Raises:
477    RuntimeError: If  the current thread has no default
478      `tf.contrib.summary.SummaryWriter`.
479    ValueError: If session wasn't passed and no default session.
480  """
481  if context.executing_eagerly():
482    return
483  if _summary_state.writer is None:
484    raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
485  if session is None:
486    session = ops.get_default_session()
487    if session is None:
488      raise ValueError("session must be passed if no default session exists")
489  session.run(summary_writer_initializer_op())
490  if graph is not None:
491    data = _serialize_graph(graph)
492    x = array_ops.placeholder(dtypes.string)
493    session.run(graph_v1(x, 0), feed_dict={x: data})
494
495
496@tf_export("summary.create_file_writer", v1=[])
497def create_file_writer_v2(logdir,
498                          max_queue=None,
499                          flush_millis=None,
500                          filename_suffix=None,
501                          name=None,
502                          experimental_trackable=False):
503  """Creates a summary file writer for the given log directory.
504
505  Args:
506    logdir: a string specifying the directory in which to write an event file.
507    max_queue: the largest number of summaries to keep in a queue; will
508     flush once the queue gets bigger than this. Defaults to 10.
509    flush_millis: the largest interval between flushes. Defaults to 120,000.
510    filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
511    name: a name for the op that creates the writer.
512    experimental_trackable: a boolean that controls whether the returned writer
513      will be a `TrackableResource`, which makes it compatible with SavedModel
514      when used as a `tf.Module` property.
515
516  Returns:
517    A SummaryWriter object.
518  """
519  if logdir is None:
520    raise ValueError("logdir cannot be None")
521  inside_function = ops.inside_function()
522  with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
523    # Run init inside an init_scope() to hoist it out of tf.functions.
524    with ops.init_scope():
525      if context.executing_eagerly():
526        _check_create_file_writer_args(
527            inside_function,
528            logdir=logdir,
529            max_queue=max_queue,
530            flush_millis=flush_millis,
531            filename_suffix=filename_suffix)
532      logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
533      if max_queue is None:
534        max_queue = constant_op.constant(10)
535      if flush_millis is None:
536        flush_millis = constant_op.constant(2 * 60 * 1000)
537      if filename_suffix is None:
538        filename_suffix = constant_op.constant(".v2")
539
540      def create_fn():
541        # Use unique shared_name to prevent resource sharing in eager mode, but
542        # otherwise use a fixed shared_name to allow SavedModel TF 1.x loading.
543        if context.executing_eagerly():
544          shared_name = context.shared_name()
545        else:
546          shared_name = ops.name_from_scope_name(scope)  # pylint: disable=protected-access
547        return gen_summary_ops.summary_writer(
548            shared_name=shared_name, name=name)
549
550      init_op_fn = functools.partial(
551          gen_summary_ops.create_summary_file_writer,
552          logdir=logdir,
553          max_queue=max_queue,
554          flush_millis=flush_millis,
555          filename_suffix=filename_suffix)
556      if experimental_trackable:
557        return _TrackableResourceSummaryWriter(
558            create_fn=create_fn, init_op_fn=init_op_fn)
559      else:
560        return _ResourceSummaryWriter(
561            create_fn=create_fn, init_op_fn=init_op_fn)
562
563
564def create_file_writer(logdir,
565                       max_queue=None,
566                       flush_millis=None,
567                       filename_suffix=None,
568                       name=None):
569  """Creates a summary file writer in the current context under the given name.
570
571  Args:
572    logdir: a string, or None. If a string, creates a summary file writer
573     which writes to the directory named by the string. If None, returns
574     a mock object which acts like a summary writer but does nothing,
575     useful to use as a context manager.
576    max_queue: the largest number of summaries to keep in a queue; will
577     flush once the queue gets bigger than this. Defaults to 10.
578    flush_millis: the largest interval between flushes. Defaults to 120,000.
579    filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
580    name: Shared name for this SummaryWriter resource stored to default
581      Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
582      summary writer resource with this shared name already exists, the returned
583      SummaryWriter wraps that resource and the other arguments have no effect.
584
585  Returns:
586    Either a summary writer or an empty object which can be used as a
587    summary writer.
588  """
589  if logdir is None:
590    return _NoopSummaryWriter()
591  logdir = str(logdir)
592  with ops.device("cpu:0"):
593    if max_queue is None:
594      max_queue = constant_op.constant(10)
595    if flush_millis is None:
596      flush_millis = constant_op.constant(2 * 60 * 1000)
597    if filename_suffix is None:
598      filename_suffix = constant_op.constant(".v2")
599    if name is None:
600      name = "logdir:" + logdir
601    resource = gen_summary_ops.summary_writer(shared_name=name)
602    return _LegacyResourceSummaryWriter(
603        resource=resource,
604        init_op_fn=functools.partial(
605            gen_summary_ops.create_summary_file_writer,
606            logdir=logdir,
607            max_queue=max_queue,
608            flush_millis=flush_millis,
609            filename_suffix=filename_suffix))
610
611
612@tf_export("summary.create_noop_writer", v1=[])
613def create_noop_writer():
614  """Returns a summary writer that does nothing.
615
616  This is useful as a placeholder in code that expects a context manager.
617  """
618  return _NoopSummaryWriter()
619
620
621def _cleanse_string(name, pattern, value):
622  if isinstance(value, six.string_types) and pattern.search(value) is None:
623    raise ValueError("%s (%s) must match %s" % (name, value, pattern.pattern))
624  return ops.convert_to_tensor(value, dtypes.string)
625
626
627def _nothing():
628  """Convenient else branch for when summaries do not record."""
629  return constant_op.constant(False)
630
631
632@tf_export(v1=["summary.all_v2_summary_ops"])
633def all_v2_summary_ops():
634  """Returns all V2-style summary ops defined in the current default graph.
635
636  This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except
637  for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but
638  does *not* include TF 1.x tf.summary ops.
639
640  Returns:
641    List of summary ops, or None if called under eager execution.
642  """
643  if context.executing_eagerly():
644    return None
645  return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)  # pylint: disable=protected-access
646
647
648def summary_writer_initializer_op():
649  """Graph-mode only. Returns the list of ops to create all summary writers.
650
651  Returns:
652    The initializer ops.
653
654  Raises:
655    RuntimeError: If in Eager mode.
656  """
657  if context.executing_eagerly():
658    raise RuntimeError(
659        "tf.contrib.summary.summary_writer_initializer_op is only "
660        "supported in graph mode.")
661  return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
662
663
664_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]")
665
666
667@tf_export("summary.experimental.summary_scope", v1=[])
668@tf_contextlib.contextmanager
669def summary_scope(name, default_name="summary", values=None):
670  """Experimental context manager for use when defining a custom summary op.
671
672  This behaves similarly to `tf.name_scope`, except that it returns a generated
673  summary tag in addition to the scope name. The tag is structurally similar to
674  the scope name - derived from the user-provided name, prefixed with enclosing
675  name scopes if any - but we relax the constraint that it be uniquified, as
676  well as the character set limitation (so the user-provided name can contain
677  characters not legal for scope names; in the scope name these are removed).
678
679  This makes the summary tag more predictable and consistent for the user.
680
681  For example, to define a new summary op called `my_op`:
682
683  ```python
684  def my_op(name, my_value, step):
685    with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope):
686      my_value = tf.convert_to_tensor(my_value)
687      return tf.summary.write(tag, my_value, step=step)
688  ```
689
690  Args:
691    name: string name for the summary.
692    default_name: Optional; if provided, used as default name of the summary.
693    values: Optional; passed as `values` parameter to name_scope.
694
695  Yields:
696    A tuple `(tag, scope)` as described above.
697  """
698  name = name or default_name
699  current_scope = ops.get_name_scope()
700  tag = current_scope + "/" + name if current_scope else name
701  # Strip illegal characters from the scope name, and if that leaves nothing,
702  # use None instead so we pick up the default name.
703  name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
704  with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope:
705    yield tag, scope
706
707
708@tf_export("summary.write", v1=[])
709def write(tag, tensor, step=None, metadata=None, name=None):
710  """Writes a generic summary to the default SummaryWriter if one exists.
711
712  This exists primarily to support the definition of type-specific summary ops
713  like scalar() and image(), and is not intended for direct use unless defining
714  a new type-specific summary op.
715
716  Args:
717    tag: string tag used to identify the summary (e.g. in TensorBoard), usually
718      generated with `tf.summary.summary_scope`
719    tensor: the Tensor holding the summary data to write or a callable that
720      returns this Tensor. If a callable is passed, it will only be called when
721      a default SummaryWriter exists and the recording condition specified by
722      `record_if()` is met.
723    step: Explicit `int64`-castable monotonic step value for this summary. If
724      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
725      not be None.
726    metadata: Optional SummaryMetadata, as a proto or serialized bytes
727    name: Optional string name for this op.
728
729  Returns:
730    True on success, or false if no summary was written because no default
731    summary writer was available.
732
733  Raises:
734    ValueError: if a default writer exists, but no step was provided and
735      `tf.summary.experimental.get_step()` is None.
736  """
737  with ops.name_scope(name, "write_summary") as scope:
738    if _summary_state.writer is None:
739      return constant_op.constant(False)
740    if step is None:
741      step = get_step()
742    if metadata is None:
743      serialized_metadata = b""
744    elif hasattr(metadata, "SerializeToString"):
745      serialized_metadata = metadata.SerializeToString()
746    else:
747      serialized_metadata = metadata
748
749    def record():
750      """Record the actual summary and return True."""
751      if step is None:
752        raise ValueError("No step set via 'step' argument or "
753                         "tf.summary.experimental.set_step()")
754
755      # Note the identity to move the tensor to the CPU.
756      with ops.device("cpu:0"):
757        summary_tensor = tensor() if callable(tensor) else array_ops.identity(
758            tensor)
759        write_summary_op = gen_summary_ops.write_summary(
760            _summary_state.writer._resource,  # pylint: disable=protected-access
761            step,
762            summary_tensor,
763            tag,
764            serialized_metadata,
765            name=scope)
766        with ops.control_dependencies([write_summary_op]):
767          return constant_op.constant(True)
768
769    op = smart_cond.smart_cond(
770        should_record_summaries(), record, _nothing, name="summary_cond")
771    if not context.executing_eagerly():
772      ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
773    return op
774
775
776@tf_export("summary.experimental.write_raw_pb", v1=[])
777def write_raw_pb(tensor, step=None, name=None):
778  """Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.
779
780  Experimental: this exists to support the usage of V1-style manual summary
781  writing (via the construction of a `tf.compat.v1.Summary` protocol buffer)
782  with the V2 summary writing API.
783
784  Args:
785    tensor: the string Tensor holding one or more serialized `Summary` protobufs
786    step: Explicit `int64`-castable monotonic step value for this summary. If
787      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
788      not be None.
789    name: Optional string name for this op.
790
791  Returns:
792    True on success, or false if no summary was written because no default
793    summary writer was available.
794
795  Raises:
796    ValueError: if a default writer exists, but no step was provided and
797      `tf.summary.experimental.get_step()` is None.
798  """
799  with ops.name_scope(name, "write_raw_pb") as scope:
800    if _summary_state.writer is None:
801      return constant_op.constant(False)
802    if step is None:
803      step = get_step()
804      if step is None:
805        raise ValueError("No step set via 'step' argument or "
806                         "tf.summary.experimental.set_step()")
807
808    def record():
809      """Record the actual summary and return True."""
810      # Note the identity to move the tensor to the CPU.
811      with ops.device("cpu:0"):
812        raw_summary_op = gen_summary_ops.write_raw_proto_summary(
813            _summary_state.writer._resource,  # pylint: disable=protected-access
814            step,
815            array_ops.identity(tensor),
816            name=scope)
817        with ops.control_dependencies([raw_summary_op]):
818          return constant_op.constant(True)
819
820    with ops.device("cpu:0"):
821      op = smart_cond.smart_cond(
822          should_record_summaries(), record, _nothing, name="summary_cond")
823      if not context.executing_eagerly():
824        ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
825      return op
826
827
828def summary_writer_function(name, tensor, function, family=None):
829  """Helper function to write summaries.
830
831  Args:
832    name: name of the summary
833    tensor: main tensor to form the summary
834    function: function taking a tag and a scope which writes the summary
835    family: optional, the summary's family
836
837  Returns:
838    The result of writing the summary.
839  """
840  name_scope = ops.get_name_scope()
841  if name_scope:
842    # Add a slash to allow reentering the name scope.
843    name_scope += "/"
844  def record():
845    with ops.name_scope(name_scope), summary_op_util.summary_scope(
846        name, family, values=[tensor]) as (tag, scope):
847      with ops.control_dependencies([function(tag, scope)]):
848        return constant_op.constant(True)
849
850  if _summary_state.writer is None:
851    return control_flow_ops.no_op()
852  with ops.device("cpu:0"):
853    op = smart_cond.smart_cond(
854        _legacy_contrib_should_record_summaries(), record, _nothing, name="")
855    if not context.executing_eagerly():
856      ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
857  return op
858
859
860def generic(name, tensor, metadata=None, family=None, step=None):
861  """Writes a tensor summary if possible."""
862
863  def function(tag, scope):
864    if metadata is None:
865      serialized_metadata = constant_op.constant("")
866    elif hasattr(metadata, "SerializeToString"):
867      serialized_metadata = constant_op.constant(metadata.SerializeToString())
868    else:
869      serialized_metadata = metadata
870    # Note the identity to move the tensor to the CPU.
871    return gen_summary_ops.write_summary(
872        _summary_state.writer._resource,  # pylint: disable=protected-access
873        _choose_step(step),
874        array_ops.identity(tensor),
875        tag,
876        serialized_metadata,
877        name=scope)
878  return summary_writer_function(name, tensor, function, family=family)
879
880
881def scalar(name, tensor, family=None, step=None):
882  """Writes a scalar summary if possible.
883
884  Unlike `tf.contrib.summary.generic` this op may change the dtype
885  depending on the writer, for both practical and efficiency concerns.
886
887  Args:
888    name: An arbitrary name for this summary.
889    tensor: A `tf.Tensor` Must be one of the following types:
890      `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
891      `int8`, `uint16`, `half`, `uint32`, `uint64`.
892    family: Optional, the summary's family.
893    step: The `int64` monotonic step variable, which defaults
894      to `tf.compat.v1.train.get_global_step`.
895
896  Returns:
897    The created `tf.Operation` or a `tf.no_op` if summary writing has
898    not been enabled for this context.
899  """
900
901  def function(tag, scope):
902    # Note the identity to move the tensor to the CPU.
903    return gen_summary_ops.write_scalar_summary(
904        _summary_state.writer._resource,  # pylint: disable=protected-access
905        _choose_step(step),
906        tag,
907        array_ops.identity(tensor),
908        name=scope)
909
910  return summary_writer_function(name, tensor, function, family=family)
911
912
913def histogram(name, tensor, family=None, step=None):
914  """Writes a histogram summary if possible."""
915
916  def function(tag, scope):
917    # Note the identity to move the tensor to the CPU.
918    return gen_summary_ops.write_histogram_summary(
919        _summary_state.writer._resource,  # pylint: disable=protected-access
920        _choose_step(step),
921        tag,
922        array_ops.identity(tensor),
923        name=scope)
924
925  return summary_writer_function(name, tensor, function, family=family)
926
927
928def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
929  """Writes an image summary if possible."""
930
931  def function(tag, scope):
932    bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
933                  if bad_color is None else bad_color)
934    # Note the identity to move the tensor to the CPU.
935    return gen_summary_ops.write_image_summary(
936        _summary_state.writer._resource,  # pylint: disable=protected-access
937        _choose_step(step),
938        tag,
939        array_ops.identity(tensor),
940        bad_color_,
941        max_images,
942        name=scope)
943
944  return summary_writer_function(name, tensor, function, family=family)
945
946
947def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
948  """Writes an audio summary if possible."""
949
950  def function(tag, scope):
951    # Note the identity to move the tensor to the CPU.
952    return gen_summary_ops.write_audio_summary(
953        _summary_state.writer._resource,  # pylint: disable=protected-access
954        _choose_step(step),
955        tag,
956        array_ops.identity(tensor),
957        sample_rate=sample_rate,
958        max_outputs=max_outputs,
959        name=scope)
960
961  return summary_writer_function(name, tensor, function, family=family)
962
963
964def graph_v1(param, step=None, name=None):
965  """Writes a TensorFlow graph to the summary interface.
966
967  The graph summary is, strictly speaking, not a summary. Conditions
968  like `tf.summary.should_record_summaries` do not apply. Only
969  a single graph can be associated with a particular run. If multiple
970  graphs are written, then only the last one will be considered by
971  TensorBoard.
972
973  When not using eager execution mode, the user should consider passing
974  the `graph` parameter to `tf.compat.v1.summary.initialize` instead of
975  calling this function. Otherwise special care needs to be taken when
976  using the graph to record the graph.
977
978  Args:
979    param: A `tf.Tensor` containing a serialized graph proto. When
980      eager execution is enabled, this function will automatically
981      coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types.
982    step: The global step variable. This doesn't have useful semantics
983      for graph summaries, but is used anyway, due to the structure of
984      event log files. This defaults to the global step.
985    name: A name for the operation (optional).
986
987  Returns:
988    The created `tf.Operation` or a `tf.no_op` if summary writing has
989    not been enabled for this context.
990
991  Raises:
992    TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
993  """
994  if not context.executing_eagerly() and not isinstance(param, ops.Tensor):
995    raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
996                    "mode, but was: %s" % type(param))
997  writer = _summary_state.writer
998  if writer is None:
999    return control_flow_ops.no_op()
1000  with ops.device("cpu:0"):
1001    if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
1002      tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
1003    else:
1004      tensor = array_ops.identity(param)
1005    return gen_summary_ops.write_graph_summary(
1006        writer._resource, _choose_step(step), tensor, name=name)  # pylint: disable=protected-access
1007
1008
1009@tf_export("summary.graph", v1=[])
1010def graph(graph_data):
1011  """Writes a TensorFlow graph summary.
1012
1013  Write an instance of `tf.Graph` or `tf.compat.v1.GraphDef` as summary only
1014  in an eager mode. Please prefer to use the trace APIs (`tf.summary.trace_on`,
1015  `tf.summary.trace_off`, and `tf.summary.trace_export`) when using
1016  `tf.function` which can automatically collect and record graphs from
1017  executions.
1018
1019  Usage Example:
1020  ```py
1021  writer = tf.summary.create_file_writer("/tmp/mylogs")
1022
1023  @tf.function
1024  def f():
1025    x = constant_op.constant(2)
1026    y = constant_op.constant(3)
1027    return x**y
1028
1029  with writer.as_default():
1030    tf.summary.graph(f.get_concrete_function().graph)
1031
1032  # Another example: in a very rare use case, when you are dealing with a TF v1
1033  # graph.
1034  graph = tf.Graph()
1035  with graph.as_default():
1036    c = tf.constant(30.0)
1037  with writer.as_default():
1038    tf.summary.graph(graph)
1039  ```
1040
1041  Args:
1042    graph_data: The TensorFlow graph to write, as a `tf.Graph` or a
1043      `tf.compat.v1.GraphDef`.
1044
1045  Returns:
1046    True on success, or False if no summary was written because no default
1047    summary writer was available.
1048
1049  Raises:
1050    ValueError: `graph` summary API is invoked in a graph mode.
1051  """
1052  if not context.executing_eagerly():
1053    raise ValueError("graph() cannot be invoked inside a graph context.")
1054  writer = _summary_state.writer
1055  if writer is None:
1056    return constant_op.constant(False)
1057  with ops.device("cpu:0"):
1058    if not should_record_summaries():
1059      return constant_op.constant(False)
1060
1061    if isinstance(graph_data, (ops.Graph, graph_pb2.GraphDef)):
1062      tensor = ops.convert_to_tensor(
1063          _serialize_graph(graph_data), dtypes.string)
1064    else:
1065      raise ValueError("'graph_data' is not tf.Graph or tf.compat.v1.GraphDef")
1066
1067    gen_summary_ops.write_graph_summary(
1068        writer._resource,  # pylint: disable=protected-access
1069        # Graph does not have step. Set to 0.
1070        0,
1071        tensor,
1072    )
1073    return constant_op.constant(True)
1074
1075
1076def import_event(tensor, name=None):
1077  """Writes a `tf.compat.v1.Event` binary proto.
1078
1079  This can be used to import existing event logs into a new summary writer sink.
1080  Please note that this is lower level than the other summary functions and
1081  will ignore the `tf.summary.should_record_summaries` setting.
1082
1083  Args:
1084    tensor: A `tf.Tensor` of type `string` containing a serialized
1085      `tf.compat.v1.Event` proto.
1086    name: A name for the operation (optional).
1087
1088  Returns:
1089    The created `tf.Operation`.
1090  """
1091  return gen_summary_ops.import_event(
1092      _summary_state.writer._resource, tensor, name=name)  # pylint: disable=protected-access
1093
1094
1095@tf_export("summary.flush", v1=[])
1096def flush(writer=None, name=None):
1097  """Forces summary writer to send any buffered data to storage.
1098
1099  This operation blocks until that finishes.
1100
1101  Args:
1102    writer: The `tf.summary.SummaryWriter` to flush. If None, the current
1103      default writer will be used instead; if there is no current writer, this
1104      returns `tf.no_op`.
1105    name: Ignored legacy argument for a name for the operation.
1106
1107  Returns:
1108    The created `tf.Operation`.
1109  """
1110  if writer is None:
1111    writer = _summary_state.writer
1112    if writer is None:
1113      return control_flow_ops.no_op()
1114  if isinstance(writer, SummaryWriter):
1115    return writer.flush()
1116  else:
1117    # Legacy fallback in case we were passed a raw resource tensor.
1118    with ops.device("cpu:0"):
1119      return gen_summary_ops.flush_summary_writer(writer, name=name)
1120
1121
1122def eval_dir(model_dir, name=None):
1123  """Construct a logdir for an eval summary writer."""
1124  return os.path.join(model_dir, "eval" if not name else "eval_" + name)
1125
1126
1127@deprecation.deprecated(date=None,
1128                        instructions="Renamed to create_file_writer().")
1129def create_summary_file_writer(*args, **kwargs):
1130  """Please use `tf.contrib.summary.create_file_writer`."""
1131  logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
1132                  "to create_file_writer")
1133  return create_file_writer(*args, **kwargs)
1134
1135
1136def _serialize_graph(arbitrary_graph):
1137  if isinstance(arbitrary_graph, ops.Graph):
1138    return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
1139  else:
1140    return arbitrary_graph.SerializeToString()
1141
1142
1143def _choose_step(step):
1144  if step is None:
1145    return training_util.get_or_create_global_step()
1146  if not isinstance(step, ops.Tensor):
1147    return ops.convert_to_tensor(step, dtypes.int64)
1148  return step
1149
1150
1151def _check_create_file_writer_args(inside_function, **kwargs):
1152  """Helper to check the validity of arguments to a create_file_writer() call.
1153
1154  Args:
1155    inside_function: whether the create_file_writer() call is in a tf.function
1156    **kwargs: the arguments to check, as kwargs to give them names.
1157
1158  Raises:
1159    ValueError: if the arguments are graph tensors.
1160  """
1161  for arg_name, arg in kwargs.items():
1162    if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg):
1163      if inside_function:
1164        raise ValueError(
1165            "Invalid graph Tensor argument \"%s=%s\" to create_file_writer() "
1166            "inside an @tf.function. The create call will be lifted into the "
1167            "outer eager execution context, so it cannot consume graph tensors "
1168            "defined inside the function body." % (arg_name, arg))
1169      else:
1170        raise ValueError(
1171            "Invalid graph Tensor argument \"%s=%s\" to eagerly executed "
1172            "create_file_writer()." % (arg_name, arg))
1173
1174
1175def run_metadata(name, data, step=None):
1176  """Writes entire RunMetadata summary.
1177
1178  A RunMetadata can contain DeviceStats, partition graphs, and function graphs.
1179  Please refer to the proto for definition of each field.
1180
1181  Args:
1182    name: A name for this summary. The summary tag used for TensorBoard will be
1183      this name prefixed by any active name scopes.
1184    data: A RunMetadata proto to write.
1185    step: Explicit `int64`-castable monotonic step value for this summary. If
1186      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
1187      not be None.
1188
1189  Returns:
1190    True on success, or false if no summary was written because no default
1191    summary writer was available.
1192
1193  Raises:
1194    ValueError: if a default writer exists, but no step was provided and
1195      `tf.summary.experimental.get_step()` is None.
1196  """
1197  summary_metadata = summary_pb2.SummaryMetadata()
1198  # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
1199  # the rationale.
1200  summary_metadata.plugin_data.plugin_name = "graph_run_metadata"
1201  # version number = 1
1202  summary_metadata.plugin_data.content = b"1"
1203
1204  with summary_scope(name,
1205                     "graph_run_metadata_summary",
1206                     [data, step]) as (tag, _):
1207    with ops.device("cpu:0"):
1208      tensor = constant_op.constant(data.SerializeToString(),
1209                                    dtype=dtypes.string)
1210    return write(
1211        tag=tag,
1212        tensor=tensor,
1213        step=step,
1214        metadata=summary_metadata)
1215
1216
1217def run_metadata_graphs(name, data, step=None):
1218  """Writes graphs from a RunMetadata summary.
1219
1220  Args:
1221    name: A name for this summary. The summary tag used for TensorBoard will be
1222      this name prefixed by any active name scopes.
1223    data: A RunMetadata proto to write.
1224    step: Explicit `int64`-castable monotonic step value for this summary. If
1225      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
1226      not be None.
1227
1228  Returns:
1229    True on success, or false if no summary was written because no default
1230    summary writer was available.
1231
1232  Raises:
1233    ValueError: if a default writer exists, but no step was provided and
1234      `tf.summary.experimental.get_step()` is None.
1235  """
1236  summary_metadata = summary_pb2.SummaryMetadata()
1237  # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
1238  # the rationale.
1239  summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph"
1240  # version number = 1
1241  summary_metadata.plugin_data.content = b"1"
1242
1243  data = config_pb2.RunMetadata(
1244      function_graphs=data.function_graphs,
1245      partition_graphs=data.partition_graphs)
1246
1247  with summary_scope(name,
1248                     "graph_run_metadata_graph_summary",
1249                     [data, step]) as (tag, _):
1250    with ops.device("cpu:0"):
1251      tensor = constant_op.constant(data.SerializeToString(),
1252                                    dtype=dtypes.string)
1253    return write(
1254        tag=tag,
1255        tensor=tensor,
1256        step=step,
1257        metadata=summary_metadata)
1258
1259
1260_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler"))
1261_current_trace_context_lock = threading.Lock()
1262_current_trace_context = None
1263
1264
1265@tf_export("summary.trace_on", v1=[])
1266def trace_on(graph=True, profiler=False):  # pylint: disable=redefined-outer-name
1267  """Starts a trace to record computation graphs and profiling information.
1268
1269  Must be invoked in eager mode.
1270
1271  When enabled, TensorFlow runtime will collection information that can later be
1272  exported and consumed by TensorBoard. The trace is activated across the entire
1273  TensorFlow runtime and affects all threads of execution.
1274
1275  To stop the trace and export the collected information, use
1276  `tf.summary.trace_export`. To stop the trace without exporting, use
1277  `tf.summary.trace_off`.
1278
1279  Args:
1280    graph: If True, enables collection of executed graphs. It includes ones from
1281        tf.function invocation and ones from the legacy graph mode. The default
1282        is True.
1283    profiler: If True, enables the advanced profiler. Enabling profiler
1284        implicitly enables the graph collection. The profiler may incur a high
1285        memory overhead. The default is False.
1286
1287  """
1288  if ops.inside_function():
1289    logging.warn("Cannot enable trace inside a tf.function.")
1290    return
1291  if not context.executing_eagerly():
1292    logging.warn("Must enable trace in eager mode.")
1293    return
1294
1295  global _current_trace_context
1296  with _current_trace_context_lock:
1297    if _current_trace_context:
1298      logging.warn("Trace already enabled")
1299      return
1300
1301    if graph and not profiler:
1302      context.context().enable_graph_collection()
1303    if profiler:
1304      context.context().enable_run_metadata()
1305      _profiler.start()
1306
1307    _current_trace_context = _TraceContext(graph=graph, profiler=profiler)
1308
1309
1310@tf_export("summary.trace_export", v1=[])
1311def trace_export(name, step=None, profiler_outdir=None):
1312  """Stops and exports the active trace as a Summary and/or profile file.
1313
1314  Stops the trace and exports all metadata collected during the trace to the
1315  default SummaryWriter, if one has been set.
1316
1317  Args:
1318    name: A name for the summary to be written.
1319    step: Explicit `int64`-castable monotonic step value for this summary. If
1320      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
1321      not be None.
1322    profiler_outdir: Output directory for profiler. It is required when profiler
1323      is enabled when trace was started. Otherwise, it is ignored.
1324
1325  Raises:
1326    ValueError: if a default writer exists, but no step was provided and
1327      `tf.summary.experimental.get_step()` is None.
1328  """
1329  # TODO(stephanlee): See if we can remove profiler_outdir and infer it from
1330  # the SummaryWriter's logdir.
1331  global _current_trace_context
1332
1333  if ops.inside_function():
1334    logging.warn("Cannot export trace inside a tf.function.")
1335    return
1336  if not context.executing_eagerly():
1337    logging.warn("Can only export trace while executing eagerly.")
1338    return
1339
1340  with _current_trace_context_lock:
1341    if _current_trace_context is None:
1342      raise ValueError("Must enable trace before export.")
1343    graph, profiler = _current_trace_context  # pylint: disable=redefined-outer-name
1344    if profiler and profiler_outdir is None:
1345      raise ValueError("Required profiler_outdir is not specified")
1346
1347  run_meta = context.context().export_run_metadata()
1348
1349  if graph and not profiler:
1350    run_metadata_graphs(name, run_meta, step)
1351  else:
1352    run_metadata(name, run_meta, step)
1353
1354  if profiler:
1355    _profiler.save(profiler_outdir, _profiler.stop())
1356
1357  trace_off()
1358
1359
1360@tf_export("summary.trace_off", v1=[])
1361def trace_off():
1362  """Stops the current trace and discards any collected information."""
1363  global _current_trace_context
1364  with _current_trace_context_lock:
1365    if _current_trace_context is None:
1366      return  # tracing already off
1367    graph, profiler = _current_trace_context  # pylint: disable=redefined-outer-name, unpacking-non-sequence
1368    _current_trace_context = None
1369
1370  if graph:
1371    # Disabling run_metadata disables graph collection as well.
1372    context.context().disable_run_metadata()
1373
1374  if profiler:
1375    try:
1376      _profiler.stop()
1377    except _profiler.ProfilerNotRunningError:
1378      pass
1379