• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Reader class for tfdbg v2 debug events."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import collections
22import os
23import threading
24
25import six
26
27from tensorflow.core.protobuf import debug_event_pb2
28from tensorflow.python.framework import errors
29from tensorflow.python.framework import tensor_util
30from tensorflow.python.lib.io import file_io
31from tensorflow.python.lib.io import tf_record
32from tensorflow.python.util import compat
33
34
35DebugEventWithOffset = collections.namedtuple(
36    "DebugEventWithOffset", "debug_event offset")
37
38
39class DebugEventsReader(object):
40  """Reader class for a tfdbg v2 DebugEvents directory."""
41
42  # Number of digests after which a read lock is released and re-acquired during
43  # serial reading of digests for SourceFiles, Execution, and
44  # GraphExecutionTrace. This allows us to avoid releasing and re-acquiring the
45  # lock too often (i.e., after each digest) and to minimize performance
46  # penalty.
47  _READER_RELEASE_PER = 100
48
49  _METADATA_SUFFIX = ".metadata"
50  _SOURCE_FILE_SUFFIX = ".source_files"
51  _STACK_FRAMES_SUFFIX = ".stack_frames"
52  _GRAPHS_SUFFIX = ".graphs"
53  _EXECUTION_SUFFIX = ".execution"
54  _GRAPH_EXECUTION_TRACES_SUFFIX = ".graph_execution_traces"
55
56  def __init__(self, dump_root):
57    if not file_io.is_directory(dump_root):
58      raise ValueError("Specified dump_root is not a directory: %s" % dump_root)
59    self._dump_root = dump_root
60    self._metadata_paths = self._load_metadata_files()
61
62    prefixes = [
63        metadata_path[:-len(self._METADATA_SUFFIX)]
64        for metadata_path in self._metadata_paths
65    ]
66    prefix = prefixes[0]  # This is the prefix of the main file set.
67    self._source_files_path = compat.as_bytes(prefix + self._SOURCE_FILE_SUFFIX)
68    self._stack_frames_path = compat.as_bytes(prefix +
69                                              self._STACK_FRAMES_SUFFIX)
70    self._graphs_path = compat.as_bytes(prefix + self._GRAPHS_SUFFIX)
71    self._execution_path = compat.as_bytes(prefix + self._EXECUTION_SUFFIX)
72    # There can be multiple .graph_execution_trace files each belonging
73    # to a file set generated on an individual host, in the case of
74    # a distributed TensorFlow job.
75    # This is different from the other debug event files in the file set.
76    self._graph_execution_traces_paths = [
77        compat.as_bytes(prefix + self._GRAPH_EXECUTION_TRACES_SUFFIX)
78        for prefix in prefixes
79    ]
80    self._readers = dict()  # A map from file path to reader.
81    # A map from file path to current reading offset.
82    self._reader_offsets = dict()
83    # Lock for reader creation.
84    self._readers_lock = threading.Lock()
85    # Locks for read operation on individual readers.
86    self._reader_read_locks = dict()
87
88    self._offsets = dict()
89
90  def _load_metadata_files(self):
91    """Load and parse metadata files in the dump root.
92
93    Check that all metadata files have a common tfdbg_run_id, and raise
94    a ValueError if their tfdbg_run_ids differ.
95
96    Returns:
97      A list of metadata file paths in ascending order of their starting
98        wall_time timestamp.
99    """
100
101    metadata_paths = file_io.get_matching_files(
102        os.path.join(self._dump_root, "*%s" % self._METADATA_SUFFIX))
103    if not metadata_paths:
104      raise ValueError("Cannot find any tfdbg metadata file in directory: %s" %
105                       self._dump_root)
106    wall_times = []
107    run_ids = []
108    tensorflow_versions = []
109    file_versions = []
110    for metadata_path in metadata_paths:
111      reader = tf_record.tf_record_random_reader(metadata_path)
112      try:
113        record = reader.read(0)[0]
114        debug_event = debug_event_pb2.DebugEvent.FromString(record)
115        wall_times.append(debug_event.wall_time)
116        run_ids.append(debug_event.debug_metadata.tfdbg_run_id)
117        tensorflow_versions.append(
118            debug_event.debug_metadata.tensorflow_version)
119        file_versions.append(debug_event.debug_metadata.file_version)
120      finally:
121        reader.close()
122    self._starting_wall_time = wall_times[0]
123    self._tfdbg_run_id = run_ids[0]
124    self._tensorflow_version = tensorflow_versions[0]
125    self._file_version = file_versions[0]
126    if len(metadata_paths) == 1:
127      # Fast path for a common case (only one DebugEvent file set.)
128      return metadata_paths
129
130    num_no_id = len([run_id for run_id in run_ids if not run_id])
131    if num_no_id:
132      paths_without_run_id = [
133          metadata_path
134          for metadata_path, run_id in zip(metadata_paths, run_ids)
135          if not run_id
136      ]
137      raise ValueError(
138          "Found %d tfdbg metadata files and %d of them do not "
139          "have tfdbg run ids. The metadata files without run ids are: %s" %
140          (len(run_ids), num_no_id, paths_without_run_id))
141    elif len(set(run_ids)) != 1:
142      raise ValueError(
143          "Unexpected: Found multiple (%d) tfdbg2 runs in directory %s" %
144          (len(set(run_ids)), self._dump_root))
145    # Return the metadata files in ascending order of their timestamps.
146    paths_and_timestamps = sorted(
147        zip(metadata_paths, wall_times), key=lambda t: t[1])
148    self._starting_wall_time = paths_and_timestamps[0][1]
149    return [path[0] for path in paths_and_timestamps]
150
151  def starting_wall_time(self):
152    """Get the starting timestamp of the instrumented TensorFlow program.
153
154    When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest
155    timestamp among the file sets is returned. It is assumed to be the job that
156    starts first (e.g., the coordinator).
157
158    Returns:
159      Starting timestamp in seconds since the epoch, as a float.
160    """
161    return self._starting_wall_time
162
163  def tfdbg_run_id(self):
164    """Get the run ID of the instrumented TensorFlow program."""
165    return self._tfdbg_run_id
166
167  def tensorflow_version(self):
168    """Get the version string of TensorFlow that the debugged program ran on."""
169    return self._tensorflow_version
170
171  def tfdbg_file_version(self):
172    """Get the tfdbg file format version."""
173    return self._file_version
174
175  def __enter__(self):
176    return self
177
178  def __exit__(self, exception_type, exception_value, traceback):
179    del exception_type, exception_value, traceback  # Unused
180    self.close()
181
182  def _generic_iterator(self, file_path):
183    """A helper method that makes an iterator given a debug-events file path.
184
185    Repeated calls to this method create iterators that remember the last
186    successful reading position (offset) for each given `file_path`. So the
187    iterators are meant for incremental reading of the file.
188
189    Args:
190      file_path: Path to the file to create the iterator for.
191
192    Yields:
193      A tuple of (offset, debug_event_proto) on each `next()` call.
194    """
195    yield_count = 0
196    reader = self._get_reader(file_path)
197    read_lock = self._reader_read_locks[file_path]
198    read_lock.acquire()
199    try:
200      while True:
201        current_offset = self._reader_offsets[file_path]
202        try:
203          record, self._reader_offsets[file_path] = reader.read(current_offset)
204        except (errors.DataLossError, IndexError):
205          # We ignore partial read exceptions, because a record may be
206          # truncated. The PyRandomRecordReader throws an `IndexError` when
207          # offset goes out of bound.
208          break
209        yield DebugEventWithOffset(
210            debug_event=debug_event_pb2.DebugEvent.FromString(record),
211            offset=current_offset)
212        yield_count += 1
213        # The read lock must be periodically released to allow for concurrent
214        # random reads. But we do so at a number of reads, instead of after
215        # every single read, in order to minimize the performance penalty.
216        if yield_count % self._READER_RELEASE_PER == 0:
217          read_lock.release()
218          read_lock.acquire()
219    finally:
220      read_lock.release()
221
222  def _get_reader(self, file_path):
223    """Get a random-access reader for TFRecords file at file_path."""
224    file_path = compat.as_bytes(file_path)
225    # The following code uses the double-checked locking pattern to optimize
226    # the common case (where the reader is already initialized).
227    if file_path not in self._readers:  # 1st check, without lock.
228      with self._readers_lock:
229        if file_path not in self._readers:  # 2nd check, with lock.
230          self._readers[file_path] = tf_record.tf_record_random_reader(
231              file_path)
232          self._reader_read_locks[file_path] = threading.Lock()
233          self._reader_offsets[file_path] = 0
234    return self._readers[file_path]
235
236  def source_files_iterator(self):
237    return self._generic_iterator(self._source_files_path)
238
239  def stack_frames_iterator(self):
240    return self._generic_iterator(self._stack_frames_path)
241
242  def graphs_iterator(self):
243    return self._generic_iterator(self._graphs_path)
244
245  def read_source_files_event(self, offset):
246    """Read a DebugEvent proto at given offset from the .source_files file."""
247    with self._reader_read_locks[self._source_files_path]:
248      proto_string = self._get_reader(self._source_files_path).read(offset)[0]
249    return debug_event_pb2.DebugEvent.FromString(proto_string)
250
251  def read_graphs_event(self, offset):
252    """Read a DebugEvent proto at a given offset from the .graphs file.
253
254    Args:
255      offset: Offset to read the DebugEvent proto from.
256
257    Returns:
258      A DebugEventProto.
259
260    Raises:
261      `errors.DataLossError` if offset is at a wrong location.
262      `IndexError` if offset is out of range of the file.
263    """
264    return debug_event_pb2.DebugEvent.FromString(
265        self._get_reader(self._graphs_path).read(offset)[0])
266
267  def execution_iterator(self):
268    return self._generic_iterator(self._execution_path)
269
270  def read_execution_event(self, offset):
271    """Read a DebugEvent proto at a given offset from the .execution file.
272
273    Args:
274      offset: Offset to read the DebugEvent proto from.
275
276    Returns:
277      A DebugEventProto.
278
279    Raises:
280      `errors.DataLossError` if offset is at a wrong location.
281      `IndexError` if offset is out of range of the file.
282    """
283    with self._reader_read_locks[self._execution_path]:
284      proto_string = self._get_reader(self._execution_path).read(offset)[0]
285    return debug_event_pb2.DebugEvent.FromString(proto_string)
286
287  def graph_execution_traces_iterators(self):
288    return [
289        self._generic_iterator(path)
290        for path in self._graph_execution_traces_paths
291    ]
292
293  def read_graph_execution_traces_event(self, locator):
294    """Read DebugEvent at given offset from given .graph_execution_traces file.
295
296    Args:
297      locator: A (file_index, offset) tuple that locates the DebugEvent
298        containing the graph execution trace.
299
300    Returns:
301      A DebugEventProto.
302
303    Raises:
304      `errors.DataLossError` if offset is at a wrong location.
305      `IndexError` if offset is out of range of the file.
306    """
307    file_index, offset = locator
308    graph_execution_traces_path = self._graph_execution_traces_paths[file_index]
309    with self._reader_read_locks[graph_execution_traces_path]:
310      proto_string = self._get_reader(graph_execution_traces_path).read(
311          offset)[0]
312    return debug_event_pb2.DebugEvent.FromString(proto_string)
313
314  def close(self):
315    with self._readers_lock:
316      file_paths = list(self._readers.keys())
317      for file_path in file_paths:
318        self._readers[file_path].close()
319        del self._readers[file_path]
320
321
322class BaseDigest(object):
323  """Base class for digest.
324
325  Properties:
326    wall_time: A timestamp for the digest as a `float` (unit: s).
327    locator: A datum that allows tracng the digest to its original
328      location. It can be either of the two:
329       1. Bytes offset from the beginning of the file as a single integer,
330          for the case of all digests of the same kind coming from the same
331          file.
332       2. A tuple of a file index and a byte offset. This applies to case
333          in which the same type of debugger data may come from multple files,
334          e.g., graph execution traces.
335  """
336
337  def __init__(self, wall_time, locator):
338    self._wall_time = wall_time
339    self._locator = locator
340
341  @property
342  def wall_time(self):
343    return self._wall_time
344
345  @property
346  def locator(self):
347    return self._locator
348
349  def to_json(self):
350    return {"wall_time": self.wall_time}
351
352
353class ExecutionDigest(BaseDigest):
354  """Light-weight digest summarizing top-level execution event.
355
356  Use `DebugDataReader.read_execution(execution_digest)` to load the more
357  detailed data object concerning the execution event (`Execution`).
358
359  Properties:
360    op_type: Type name of the executed op. In the case of the eager execution of
361      an individual op, it is the name of the op (e.g., "MatMul").
362      In the case of the execution of a tf.function (FuncGraph), this is the
363      internally-generated name of the function (e.g.,
364      "__inference_my_func_123").
365    output_tensor_device_ids: IDs of the devices on which the output tensors of
366      the execution reside. For no-output execution, this is `None`.
367  """
368
369  def __init__(self,
370               wall_time,
371               locator,
372               op_type,
373               output_tensor_device_ids=None):
374    super(ExecutionDigest, self).__init__(wall_time, locator)
375    self._op_type = op_type
376    self._output_tensor_device_ids = _tuple_or_none(output_tensor_device_ids)
377
378  @property
379  def op_type(self):
380    return self._op_type
381
382  @property
383  def output_tensor_device_ids(self):
384    return self._output_tensor_device_ids
385
386  def to_json(self):
387    output = super(ExecutionDigest, self).to_json()
388    output.update({
389        "op_type": self.op_type,
390        "output_tensor_device_ids": self.output_tensor_device_ids,
391    })
392    return output
393
394
395def _tuple_or_none(data):
396  return tuple(data) if data else None
397
398
399class Execution(ExecutionDigest):
400  """Detailed data relating to a top-level execution event.
401
402  The execution is of an individual op or a tf.function, which may have any
403  number of output tensors.
404
405  Properties (beyond the base class `ExecutionDigest`):
406    host_name: Name of the host on which the execution happened.
407    stack_frame_ids: Reference IDs for stack frames, ordered from bottommost to
408      topmost. Use `DebugDataReader.read_execution_stack_trace()` to load the
409      detailed stack frames (filepath, lineno and function name).
410    tensor_debug_mode: TensorDebugMode enum value, as an `int`.
411    graph_id: ID of the executed FuncGraph (applicable only the execution of a
412      tf.function). `None` for the eager execution of an individual op.
413    input_tensor_ids: IDs of the input (eager) tensor(s) for this execution, if
414      any. If the eager execution has no input tensor, this is `None`. Else,
415      this is a `tuple` of `int`s.
416    output_tensor_ids: IDs of the output (eager) tensor(s) from this execution,
417      if any. If the eager execution produces no output tensor, this is `None`.
418      Else, this is a `tuple` of `int`s.
419    debug_tensor_values: Values of the debug tensor(s), applicable only to
420      non-FULL_TENSOR tensor debug mode. A tuple of list of numbers. Each
421      element of the tuple corresponds to an output tensor of the execution.
422      See documentation of the various TensorDebugModes for the semantics of the
423      numbers. If the eager execution produces no output tensor, this is
424      `None`. Else, this is a `tuple` of `list`s.
425  """
426
427  def __init__(self,
428               execution_digest,
429               host_name,
430               stack_frame_ids,
431               tensor_debug_mode,
432               graph_id=None,
433               input_tensor_ids=None,
434               output_tensor_ids=None,
435               debug_tensor_values=None):
436    super(Execution, self).__init__(
437        execution_digest.wall_time,
438        execution_digest.locator,
439        execution_digest.op_type,
440        output_tensor_device_ids=execution_digest.output_tensor_device_ids)
441    self._host_name = host_name
442    self._stack_frame_ids = tuple(stack_frame_ids)
443    self._tensor_debug_mode = tensor_debug_mode
444    self._graph_id = graph_id
445    self._input_tensor_ids = _tuple_or_none(input_tensor_ids)
446    self._output_tensor_ids = _tuple_or_none(output_tensor_ids)
447    self._debug_tensor_values = _tuple_or_none(debug_tensor_values)
448
449  @property
450  def host_name(self):
451    return self._host_name
452
453  @property
454  def stack_frame_ids(self):
455    return self._stack_frame_ids
456
457  @property
458  def tensor_debug_mode(self):
459    return self._tensor_debug_mode
460
461  @property
462  def graph_id(self):
463    return self._graph_id
464
465  @property
466  def input_tensor_ids(self):
467    return self._input_tensor_ids
468
469  @property
470  def num_outputs(self):
471    return len(self._output_tensor_ids) if self._output_tensor_ids else 0
472
473  @property
474  def output_tensor_ids(self):
475    return self._output_tensor_ids
476
477  @property
478  def debug_tensor_values(self):
479    return self._debug_tensor_values
480
481  def to_json(self):
482    output = super(Execution, self).to_json()
483    output.update({
484        "host_name": self.host_name,
485        "stack_frame_ids": self.stack_frame_ids,
486        "tensor_debug_mode": self.tensor_debug_mode,
487        "graph_id": self.graph_id,
488        "input_tensor_ids": self.input_tensor_ids,
489        "output_tensor_ids": self.output_tensor_ids,
490        "debug_tensor_values": self.debug_tensor_values,
491    })
492    return output
493
494
495class DebuggedGraph(object):
496  """Data object representing debugging information about a tf.Graph.
497
498  Includes `FuncGraph`s.
499
500  Properties:
501    name: Name of the graph (if any). May be `None` for non-function graphs.
502    graph_id: Debugger-generated ID for the graph.
503    inner_graph_ids: A list of the debugger-generated IDs for the graphs
504      enclosed by this graph.
505    outer_graph_id: If this graph is nested within an outer graph, ID of the
506      outer graph. If this is an outermost graph, `None`.
507  """
508
509  def __init__(self,
510               name,
511               graph_id,
512               outer_graph_id=None):
513    self._name = name
514    self._graph_id = graph_id
515    self._outer_graph_id = outer_graph_id
516    self._inner_graph_ids = []
517    # A dictionary from op name to GraphOpCreationDigest.
518    self._op_by_name = dict()
519    # A dictionary mapping op to immediate downstream consumers.
520    self._op_consumers = collections.defaultdict(list)
521
522  def add_inner_graph_id(self, inner_graph_id):
523    """Add the debugger-generated ID of a graph nested within this graph.
524
525    Args:
526      inner_graph_id: The debugger-generated ID of the nested inner graph.
527    """
528    assert isinstance(inner_graph_id, six.string_types)
529    self._inner_graph_ids.append(inner_graph_id)
530
531  def add_op(self, graph_op_creation_digest):
532    """Add an op creation data object.
533
534    Args:
535      graph_op_creation_digest: A GraphOpCreationDigest data object describing
536        the creation of an op inside this graph.
537    """
538    if graph_op_creation_digest.op_name in self._op_by_name:
539      raise ValueError(
540          "Duplicate op name: %s (op type: %s)" %
541          (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type))
542    self._op_by_name[
543        graph_op_creation_digest.op_name] = graph_op_creation_digest
544
545  def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):
546    """Add a consuming op for this op.
547
548    Args:
549      src_op_name: Name of the op of which the output tensor is being consumed.
550      src_slot: 0-based output slot of the op being consumed.
551      dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
552      dst_slot: 0-based input slot of the consuming op that receives the tensor
553        from this op.
554    """
555    self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))
556
557  @property
558  def name(self):
559    return self._name
560
561  @property
562  def graph_id(self):
563    return self._graph_id
564
565  @property
566  def outer_graph_id(self):
567    return self._outer_graph_id
568
569  @property
570  def inner_graph_ids(self):
571    return self._inner_graph_ids
572
573  def get_tensor_id(self, op_name, output_slot):
574    """Get the ID of a symbolic tensor in this graph."""
575    return self._op_by_name[op_name].output_tensor_ids[output_slot]
576
577  def get_op_creation_digest(self, op_name):
578    """Get the GraphOpCreationDigest for a op in the graph."""
579    return self._op_by_name[op_name]
580
581  def get_op_consumers(self, src_op_name):
582    """Get all the downstream consumers of this op.
583
584    Only data (non-control) edges are tracked.
585
586    Args:
587      src_op_name: Name of the op providing the tensor being consumed.
588
589    Returns:
590      A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of
591      the list:
592        src_slot: 0-based output slot of the op of which the output tensor
593          is being consumed.
594        dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
595        dst_slot: 0-based input slot of the consuming op that receives
596          the tensor from this op.
597    """
598    return self._op_consumers[src_op_name]
599
600  def to_json(self):
601    return {
602        "name": self.name,
603        "graph_id": self.graph_id,
604        "outer_graph_id": self._outer_graph_id,
605        "inner_graph_ids": self._inner_graph_ids,
606    }
607
608
609class DebuggedDevice(object):
610  """Debugger data regarding a device involved in the debugged program.
611
612  Properties:
613    device_name: Name of the device, as a str.
614    device_id: An integer ID for the device, unique for each device within
615      the scope of the debugged TensorFlow program.
616  """
617
618  def __init__(self,
619               device_name,
620               device_id):
621    self._device_name = device_name
622    self._device_id = device_id
623
624  @property
625  def device_name(self):
626    return self._device_name
627
628  @property
629  def device_id(self):
630    return self._device_id
631
632  def to_json(self):
633    return {
634        "device_name": self._device_name,
635        "device_id": self._device_id,
636    }
637
638
639class GraphOpCreationDigest(BaseDigest):
640  """Data object describing the creation of an op inside a graph.
641
642  For size efficiency, this digest object does not contain any stack frames or
643  any references to them. To obtain the stack frames, use
644  `DataReader.read_graph_op_creation_stack_trace()`.
645
646  Properties (beyond the base class):
647    graph_id: Debugger-generated ID of the immediately-enclosing graph.
648    op_type: Type name of the op (e.g., "MatMul").
649    op_name: Name of the op (e.g., "dense_1/MatMul").
650    output_tensor_ids: Debugger-generated IDs for the output(s) of the op.
651      If the op produces no output tensor, this is `None`. Else, this is a
652      `tuple` of `int`s.
653    input_names: Names of the input tensors to the op.
654    device_name: The name of the device that the op is placed on (if available).
655    host_name: Name of the host on which the op is created.
656    stack_frame_ids: IDs of the frames of the stack trace at which the op
657      is created.
658  """
659
660  def __init__(self,
661               wall_time,
662               locator,
663               graph_id,
664               op_type,
665               op_name,
666               output_tensor_ids,
667               host_name,
668               stack_frame_ids,
669               input_names=None,
670               device_name=None):
671    super(GraphOpCreationDigest, self).__init__(wall_time, locator)
672    self._graph_id = graph_id
673    self._op_type = op_type
674    self._op_name = op_name
675    self._output_tensor_ids = _tuple_or_none(output_tensor_ids)
676    self._host_name = host_name
677    self._stack_frame_ids = stack_frame_ids
678    self._input_names = _tuple_or_none(input_names)
679    self._device_name = device_name
680
681  @property
682  def graph_id(self):
683    return self._graph_id
684
685  @property
686  def op_type(self):
687    return self._op_type
688
689  @property
690  def op_name(self):
691    return self._op_name
692
693  @property
694  def output_tensor_ids(self):
695    return self._output_tensor_ids
696
697  @property
698  def num_outputs(self):
699    return len(self._output_tensor_ids) if self.output_tensor_ids else 0
700
701  @property
702  def input_names(self):
703    return self._input_names
704
705  @property
706  def device_name(self):
707    return self._device_name
708
709  @property
710  def host_name(self):
711    return self._host_name
712
713  @property
714  def stack_frame_ids(self):
715    return self._stack_frame_ids
716
717  def to_json(self):
718    output = super(GraphOpCreationDigest, self).to_json()
719    output.update({
720        "graph_id": self.graph_id,
721        "op_type": self.op_type,
722        "op_name": self.op_name,
723        "output_tensor_ids": self.output_tensor_ids,
724        "host_name": self.host_name,
725        "stack_frame_ids": self.stack_frame_ids,
726        "input_names": self.input_names,
727        "device_name": self.device_name,
728    })
729    return output
730
731
732class GraphExecutionTraceDigest(BaseDigest):
733  """Light-weight summary of a intra-graph tensor execution event.
734
735  Use `DebugDataReader.read_graph_execution_trace()` on this object to read more
736  detailed data (`GraphExecutionTrace`).
737
738  Properties (beyond the base class):
739    op_type: Type name of the executed op (e.g., "Conv2D").
740    op_name: Name of the op (e.g., "conv_2d_3/Conv2D").
741    output_slot: Output slot index of the tensor.
742    graph_id: The debugger-generated ID of the innermost (immediately-enclosing)
743      graph.
744  """
745
746  def __init__(self, wall_time, locator, op_type, op_name, output_slot,
747               graph_id):
748    super(GraphExecutionTraceDigest, self).__init__(wall_time, locator)
749    self._op_type = op_type
750    self._op_name = op_name
751    self._output_slot = output_slot
752    self._graph_id = graph_id
753
754  @property
755  def op_type(self):
756    return self._op_type
757
758  @property
759  def op_name(self):
760    return self._op_name
761
762  @property
763  def output_slot(self):
764    return self._output_slot
765
766  @property
767  def graph_id(self):
768    return self._graph_id
769
770  def to_json(self):
771    output = super(GraphExecutionTraceDigest, self).to_json()
772    output.update({
773        "op_type": self.op_type,
774        "op_name": self.op_name,
775        "output_slot": self.output_slot,
776        "graph_id": self.graph_id,
777    })
778    return output
779
780
781class GraphExecutionTrace(GraphExecutionTraceDigest):
782  """Detailed data object describing an intra-graph tensor execution.
783
784  Attributes (in addition to GraphExecutionTraceDigest):
785    graph_ids: The debugger-generated IDs of the graphs that enclose the
786      executed op (tensor), ordered from the outermost to the innermost.
787    graph_id: The debugger-generated ID of the innermost (immediately-enclosing)
788      graph.
789    tensor_debug_mode: TensorDebugMode enum value.
790    debug_tensor_value: Debug tensor values (only for non-FULL_TENSOR
791      tensor_debug_mode). A list of numbers. See the documentation of the
792      TensorDebugModes for the semantics of the numbers.
793    device_name: Device on which the tensor resides (if available)
794  """
795
796  def __init__(self,
797               graph_execution_trace_digest,
798               graph_ids,
799               tensor_debug_mode,
800               debug_tensor_value=None,
801               device_name=None):
802    super(GraphExecutionTrace,
803          self).__init__(graph_execution_trace_digest.wall_time,
804                         graph_execution_trace_digest.locator,
805                         graph_execution_trace_digest.op_type,
806                         graph_execution_trace_digest.op_name,
807                         graph_execution_trace_digest.output_slot,
808                         graph_execution_trace_digest.graph_id)
809    self._graph_ids = tuple(graph_ids)
810    self._tensor_debug_mode = tensor_debug_mode
811    self._debug_tensor_value = debug_tensor_value
812    self._device_name = device_name
813
814  @property
815  def graph_ids(self):
816    return self._graph_ids
817
818  @property
819  def graph_id(self):
820    return self._graph_ids[-1]
821
822  @property
823  def tensor_debug_mode(self):
824    return self._tensor_debug_mode
825
826  @property
827  def debug_tensor_value(self):
828    return _tuple_or_none(self._debug_tensor_value)
829
830  @property
831  def device_name(self):
832    return self._device_name
833
834  def to_json(self):
835    output = super(GraphExecutionTrace, self).to_json()
836    output.update({
837        "graph_ids": self.graph_ids,
838        "tensor_debug_mode": self.tensor_debug_mode,
839        "debug_tensor_value": self.debug_tensor_value,
840        "device_name": self.device_name,
841    })
842    return output
843
844
845def _parse_tensor_value(tensor_proto, return_list=False):
846  """Helper method for reading a tensor value from a tensor proto.
847
848  The rationale for the distinction between `True` and `False value of
849  `return_list` is as follows:
850  - `return_list=True` is used for TensorDebugMode values other than
851    FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under
852    those modes, the value is guaranteed (by contract) to be a 1D float64
853    tensor.
854  - `return_list=False` is used for the FULL_HEALTH TensorDebugMode
855    specifically. Instead, we use `numpy.ndarray` to maximally preserve
856    the shape, dtype and value information regarding the underlying tensor
857    value. Under that mode, we don't use a python list to represent the
858    tensor value because that can lead to loss of information (e.g., both
859    float16 and float32 dtypes get mapped to Python floats).
860
861  Args:
862    tensor_proto: The TensorProto instance from which the tensor value will be
863      loaded.
864    return_list: Whether the return value will be a nested Python list that
865      comes out from `numpy.ndarray.tolist()`.
866
867  Returns:
868    If parsing is successful, the tensor value as a `numpy.ndarray` or the
869      nested Python list converted from it.
870    If parsing fails, `None`.
871  """
872  try:
873    ndarray = tensor_util.MakeNdarray(tensor_proto)
874    return ndarray.tolist() if return_list else ndarray
875  except TypeError:
876    # Depending on tensor_debug_mode, certain dtype of tensors don't
877    # have logged debug tensor values.
878    return None
879
880
881def _execution_digest_from_debug_event_proto(debug_event, locator):
882  """Convert a DebugEvent proto into an ExecutionDigest data object."""
883  return ExecutionDigest(
884      debug_event.wall_time,
885      locator,
886      debug_event.execution.op_type,
887      output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids
888                                or None))
889
890
891def _execution_from_debug_event_proto(debug_event, locator):
892  """Convert a DebugEvent proto into an Execution data object."""
893  execution_proto = debug_event.execution
894
895  debug_tensor_values = None
896  if (execution_proto.tensor_debug_mode ==
897      debug_event_pb2.TensorDebugMode.FULL_TENSOR):
898    pass  # TODO(cais): Build tensor store.
899  elif (execution_proto.tensor_debug_mode !=
900        debug_event_pb2.TensorDebugMode.NO_TENSOR):
901    debug_tensor_values = []
902    for tensor_proto in execution_proto.tensor_protos:
903      # TODO(cais): Refactor into a helper method.
904      debug_tensor_values.append(
905          _parse_tensor_value(tensor_proto, return_list=True))
906  return Execution(
907      _execution_digest_from_debug_event_proto(debug_event, locator),
908      execution_proto.code_location.host_name,
909      tuple(execution_proto.code_location.stack_frame_ids),
910      execution_proto.tensor_debug_mode,
911      graph_id=execution_proto.graph_id,
912      input_tensor_ids=tuple(execution_proto.input_tensor_ids),
913      output_tensor_ids=tuple(execution_proto.output_tensor_ids),
914      debug_tensor_values=_tuple_or_none(debug_tensor_values))
915
916
917class DebugDataReader(object):
918  """A reader that reads structured debugging data in the tfdbg v2 format.
919
920  The set of data read by an object of this class concerns the execution history
921  of a tfdbg2-instrumented TensorFlow program.
922
923  Note:
924    - An object of this class incrementally reads data from files that belong to
925      the tfdbg v2 DebugEvent file set. Calling `update()` triggers the reading
926      from the last-successful reading positions in the files.
927    - This object can be used as a context manager. Its `__exit__()` call
928      closes the file readers cleanly.
929  """
930
931  def __init__(self, dump_root):
932    self._reader = DebugEventsReader(dump_root)
933
934    # TODO(cais): Implement pagination for memory constraints.
935    self._execution_digests = []
936
937    # Mapping (host_name, file_path) tuple to offset in the .source_files file.
938    self._host_name_file_path_to_offset = collections.OrderedDict()
939    # A dict mapping id to (host_name, file_path, lineno, func) tuple.
940    self._stack_frame_by_id = dict()
941    # Stores unprocessed stack frame IDs. This is necessary to handle the
942    # case in which reading of the .stack_frames file gets ahead of the reading
943    # of the .source_files file.
944    self._unprocessed_stack_frames = dict()
945    # A dict mapping id to DebuggedDevice objects.
946    self._device_by_id = dict()
947    # A dict mapping id to DebuggedGraph objects.
948    self._graph_by_id = dict()
949    self._graph_op_digests = []
950    # TODO(cais): Implement pagination for memory constraints.
951    self._graph_execution_trace_digests = []
952
953    self._monitors = []
954
955  def _add_monitor(self, monitor):
956    self._monitors.append(monitor)
957
958  def _load_source_files(self):
959    """Incrementally read the .source_files DebugEvent file."""
960    source_files_iter = self._reader.source_files_iterator()
961    for debug_event, offset in source_files_iter:
962      source_file = debug_event.source_file
963      self._host_name_file_path_to_offset[
964          (source_file.host_name, source_file.file_path)] = offset
965
966  def _load_stack_frames(self):
967    """Incrementally read the .stack_frames file.
968
969    This must be called after _load_source_files().
970    It assumes that the following contract is honored by the writer of the tfdbg
971    v2 data file set:
972      - Before a stack frame is written to the .stack_frames file, the
973        corresponding source file information must have been written to the
974        .source_files file first.
975    """
976    stack_frames_iter = self._reader.stack_frames_iterator()
977    for debug_event, _ in stack_frames_iter:
978      stack_frame_with_id = debug_event.stack_frame_with_id
979      file_line_col = stack_frame_with_id.file_line_col
980      self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col
981    # We do the processing in a separate stage, because the reading in the
982    # .source_files file may sometimes get ahead of the .source_files file.
983    unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())
984    for stack_frame_id in unprocessed_stack_frame_ids:
985      file_line_col = self._unprocessed_stack_frames[stack_frame_id]
986      if len(self._host_name_file_path_to_offset) > file_line_col.file_index:
987        host_name, file_path = list(self._host_name_file_path_to_offset.keys())[
988            file_line_col.file_index]
989        self._stack_frame_by_id[stack_frame_id] = (
990            host_name, file_path, file_line_col.line, file_line_col.func)
991      del self._unprocessed_stack_frames[stack_frame_id]
992
993  def _load_graphs(self):
994    """Incrementally read the .graphs file.
995
996    Compiles the DebuggedGraph and GraphOpCreation data.
997    """
998    graphs_iter = self._reader.graphs_iterator()
999    for debug_event, offset in graphs_iter:
1000      if debug_event.graph_op_creation.ByteSize():
1001        op_creation_proto = debug_event.graph_op_creation
1002        op_digest = GraphOpCreationDigest(
1003            debug_event.wall_time,
1004            offset,
1005            op_creation_proto.graph_id,
1006            op_creation_proto.op_type,
1007            op_creation_proto.op_name,
1008            tuple(op_creation_proto.output_tensor_ids),
1009            op_creation_proto.code_location.host_name,
1010            tuple(op_creation_proto.code_location.stack_frame_ids),
1011            input_names=tuple(op_creation_proto.input_names))
1012        self._graph_op_digests.append(op_digest)
1013        debugged_graph = self._graph_by_id[op_creation_proto.graph_id]
1014        debugged_graph.add_op(op_digest)
1015        for dst_slot, input_name in enumerate(op_creation_proto.input_names):
1016          src_op_name, src_slot = input_name.split(":")
1017          debugged_graph.add_op_consumer(src_op_name, int(src_slot),
1018                                         op_creation_proto.op_name, dst_slot)
1019
1020      elif debug_event.debugged_graph.ByteSize():
1021        graph_proto = debug_event.debugged_graph
1022        graph = DebuggedGraph(
1023            graph_proto.graph_name or None,
1024            graph_proto.graph_id,
1025            outer_graph_id=graph_proto.outer_context_id or None)
1026        self._graph_by_id[graph_proto.graph_id] = graph
1027        if graph_proto.outer_context_id:
1028          self._graph_by_id[
1029              graph_proto.outer_context_id].add_inner_graph_id(graph.graph_id)
1030      elif debug_event.debugged_device.ByteSize():
1031        device_proto = debug_event.debugged_device
1032        self._device_by_id[device_proto.device_id] = DebuggedDevice(
1033            device_proto.device_name, device_proto.device_id)
1034
1035  def _load_graph_execution_traces(self):
1036    """Incrementally load the .graph_execution_traces file."""
1037    for i, traces_iter in enumerate(
1038        self._reader.graph_execution_traces_iterators()):
1039      for debug_event, offset in traces_iter:
1040        self._graph_execution_trace_digests.append(
1041            self._graph_execution_trace_digest_from_debug_event_proto(
1042                debug_event, (i, offset)))
1043        if self._monitors:
1044          graph_execution_trace = (
1045              self._graph_execution_trace_from_debug_event_proto(
1046                  debug_event, (i, offset)))
1047          for monitor in self._monitors:
1048            monitor.on_graph_execution_trace(
1049                len(self._graph_execution_trace_digests) - 1,
1050                graph_execution_trace)
1051
1052  def _graph_execution_trace_digest_from_debug_event_proto(
1053      self, debug_event, locator):
1054    trace_proto = debug_event.graph_execution_trace
1055    op_name = trace_proto.op_name
1056    op_type = self._lookup_op_type(trace_proto.tfdbg_context_id, op_name)
1057    return GraphExecutionTraceDigest(
1058        debug_event.wall_time, locator, op_type, op_name,
1059        trace_proto.output_slot,
1060        debug_event.graph_execution_trace.tfdbg_context_id)
1061
1062  def _graph_execution_trace_from_debug_event_proto(self, debug_event, locator):
1063    """Convert a DebugEvent proto into a GraphExecutionTrace data object."""
1064    trace_proto = debug_event.graph_execution_trace
1065    graph_ids = [trace_proto.tfdbg_context_id]
1066    # Walk up the chain of outer contexts (graphs), so as to include all of
1067    # their IDs
1068    while True:
1069      graph = self.graph_by_id(graph_ids[0])
1070      if graph.outer_graph_id:
1071        graph_ids.insert(0, graph.outer_graph_id)
1072      else:
1073        break
1074
1075    if (trace_proto.tensor_debug_mode ==
1076        debug_event_pb2.TensorDebugMode.FULL_TENSOR):
1077      debug_tensor_value = None
1078    else:
1079      debug_tensor_value = _parse_tensor_value(
1080          trace_proto.tensor_proto, return_list=True)
1081    return GraphExecutionTrace(
1082        self._graph_execution_trace_digest_from_debug_event_proto(
1083            debug_event, locator),
1084        graph_ids=graph_ids,
1085        tensor_debug_mode=trace_proto.tensor_debug_mode,
1086        debug_tensor_value=debug_tensor_value,
1087        device_name=trace_proto.device_name or None)
1088
1089  def _lookup_op_type(self, graph_id, op_name):
1090    """Lookup the type of an op by name and the immediately enclosing graph.
1091
1092    Args:
1093      graph_id: Debugger-generated ID of the immediately-enclosing graph.
1094      op_name: Name of the op.
1095
1096    Returns:
1097      Op type as a str.
1098    """
1099    return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type
1100
1101  def _load_execution(self):
1102    """Incrementally read the .execution file."""
1103    execution_iter = self._reader.execution_iterator()
1104    for debug_event, offset in execution_iter:
1105      self._execution_digests.append(
1106          _execution_digest_from_debug_event_proto(debug_event, offset))
1107      if self._monitors:
1108        execution = _execution_from_debug_event_proto(debug_event, offset)
1109        for monitor in self._monitors:
1110          monitor.on_execution(len(self._execution_digests) - 1, execution)
1111
1112  def update(self):
1113    """Perform incremental read of the file set."""
1114    self._load_source_files()
1115    self._load_stack_frames()
1116    self._load_graphs()
1117    self._load_graph_execution_traces()
1118    self._load_execution()
1119
1120  def source_file_list(self):
1121    """Get a list of source files known to the debugger data reader.
1122
1123    Returns:
1124      A tuple of `(host_name, file_path)` tuples.
1125    """
1126    return tuple(self._host_name_file_path_to_offset.keys())
1127
1128  def source_lines(self, host_name, file_path):
1129    """Read the line-by-line content of a source file.
1130
1131    Args:
1132      host_name: Host name on which the source file is located.
1133      file_path: File path at which the source file is located.
1134
1135    Returns:
1136      Lines of the source file as a `list` of `str`s.
1137    """
1138    offset = self._host_name_file_path_to_offset[(host_name, file_path)]
1139    return list(self._reader.read_source_files_event(offset).source_file.lines)
1140
1141  def starting_wall_time(self):
1142    """Wall timestamp for when the debugged TensorFlow program started.
1143
1144    Returns:
1145      Stating wall time as seconds since the epoch, as a `float`.
1146    """
1147    return self._reader.starting_wall_time()
1148
1149  def tensorflow_version(self):
1150    """TensorFlow version used in the debugged TensorFlow program.
1151
1152    Note: this is not necessarily the same as the version of TensorFlow used to
1153    load the DebugEvent file set.
1154
1155    Returns:
1156      TensorFlow version used by the debugged program, as a `str`.
1157    """
1158    return self._reader.tensorflow_version()
1159
1160  def tfdbg_run_id(self):
1161    """Get the debugger run ID of the debugged TensorFlow program."""
1162    return self._reader.tfdbg_run_id()
1163
1164  def outermost_graphs(self):
1165    """Get the number of outer most graphs read so far."""
1166    return [graph for graph in self._graph_by_id.values()
1167            if not graph.outer_graph_id]
1168
1169  def graph_by_id(self, graph_id):
1170    """Get a DebuggedGraph object by its ID."""
1171    return self._graph_by_id[graph_id]
1172
1173  def device_name_by_id(self, device_id):
1174    """Get the name of a device by the debugger-generated ID of the device."""
1175    return self._device_by_id[device_id].device_name
1176
1177  def device_name_map(self):
1178    """Get a map mapping device IDs to device names."""
1179    return {device_id: self._device_by_id[device_id].device_name
1180            for device_id in self._device_by_id}
1181
1182  def graph_op_digests(self, op_type=None):
1183    """Get the list of the digests for graph-op creation so far.
1184
1185    Args:
1186      op_type: Optional op type to filter the creation events with.
1187
1188    Returns:
1189      A list of `GraphOpCreationDigest` objects.
1190    """
1191    if op_type is not None:
1192      return [digest for digest in self._graph_op_digests
1193              if digest.op_type == op_type]
1194    else:
1195      return self._graph_op_digests
1196
1197  def graph_execution_traces(self, digest=False, begin=None, end=None):
1198    """Get all the intra-graph execution tensor traces read so far.
1199
1200    Args:
1201      digest: Whether the results will be returned in the more light-weight
1202        digest form.
1203      begin: Optional beginning index for the requested traces or their digests.
1204        Python-style negative indices are supported.
1205      end: Optional ending index for the requested traces or their digests.
1206        Python-style negative indices are supported.
1207
1208    Returns:
1209      If `digest`: a `list` of `GraphExecutionTraceDigest` objects.
1210      Else: a `list` of `GraphExecutionTrace` objects.
1211    """
1212    digests = self._graph_execution_trace_digests
1213    if begin is not None or end is not None:
1214      begin = begin or 0
1215      end = end or len(digests)
1216      digests = digests[begin:end]
1217    if digest:
1218      return digests
1219    else:
1220      return [self.read_graph_execution_trace(digest) for digest in digests]
1221
1222  def num_graph_execution_traces(self):
1223    """Get the number of graph execution traces read so far."""
1224    return len(self._graph_execution_trace_digests)
1225
1226  def executions(self, digest=False, begin=None, end=None):
1227    """Get `Execution`s or `ExecutionDigest`s this reader has read so far.
1228
1229    Args:
1230      digest: Whether the results are returned in a digest form, i.e.,
1231        `ExecutionDigest` format, instead of the more detailed `Execution`
1232        format.
1233      begin: Optional beginning index for the requested execution data objects
1234        or their digests. Python-style negative indices are supported.
1235      end: Optional ending index for the requested execution data objects or
1236        their digests. Python-style negative indices are supported.
1237
1238    Returns:
1239      If `digest`: a `list` of `ExecutionDigest` objects.
1240      Else: a `list` of `Execution` objects.
1241    """
1242    digests = self._execution_digests
1243    if begin is not None or end is not None:
1244      begin = begin or 0
1245      end = end or len(digests)
1246      digests = digests[begin:end]
1247    if digest:
1248      return digests
1249    else:
1250      # TODO(cais): Optimizer performance removing repeated file open/close.
1251      return [self.read_execution(digest) for digest in digests]
1252
1253  def num_executions(self):
1254    """Get the number of execution events read so far."""
1255    return len(self._execution_digests)
1256
1257  def read_execution(self, execution_digest):
1258    """Read a detailed Execution object."""
1259    debug_event = self._reader.read_execution_event(execution_digest.locator)
1260    return _execution_from_debug_event_proto(debug_event,
1261                                             execution_digest.locator)
1262
1263  def read_graph_execution_trace(self, graph_execution_trace_digest):
1264    """Read the detailed graph execution trace.
1265
1266    Args:
1267      graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.
1268
1269    Returns:
1270      The corresponding `GraphExecutionTrace` object.
1271    """
1272    debug_event = self._reader.read_graph_execution_traces_event(
1273        graph_execution_trace_digest.locator)
1274    return self._graph_execution_trace_from_debug_event_proto(
1275        debug_event, graph_execution_trace_digest.locator)
1276
1277  def read_execution_stack_trace(self, execution):
1278    """Read the stack trace of a given Execution object.
1279
1280    Args:
1281      execution: The Execution object of interest.
1282
1283    Returns:
1284      1. The host name.
1285      2. The stack trace, as a list of (file_path, lineno, func) tuples.
1286    """
1287    host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]
1288    return (host_name, [
1289        self._stack_frame_by_id[frame_id][1:]
1290        for frame_id in execution.stack_frame_ids])
1291
1292  def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):
1293    """Read the stack trace of a given graph op creation object.
1294
1295    Args:
1296      graph_op_creation_digest: The GraphOpCreationDigest object of interest.
1297
1298    Returns:
1299      A tuple consisting of:
1300        1. The host name.
1301        2. The stack trace, as a list of (file_path, lineno, func) tuples.
1302    """
1303    return graph_op_creation_digest.host_name, [
1304        self._stack_frame_by_id[frame_id][1:]
1305        for frame_id in graph_op_creation_digest.stack_frame_ids
1306    ]
1307
1308  # TODO(cais): Add graph_execution_digests() with an ExecutionDigest
1309  #   as a kwarg, to establish the association between top-level and intra-graph
1310  #   execution events.
1311
1312  def execution_to_tensor_values(self, execution):
1313    """Read the full tensor values from an Execution or ExecutionDigest.
1314
1315    Args:
1316      execution: An `ExecutionDigest` or `ExeuctionDigest` object.
1317
1318    Returns:
1319      A list of numpy arrays representing the output tensor values of the
1320        execution event.
1321    """
1322    debug_event = self._reader.read_execution_event(execution.locator)
1323    return [_parse_tensor_value(tensor_proto)
1324            for tensor_proto in debug_event.execution.tensor_protos]
1325
1326  def graph_execution_trace_to_tensor_value(self, trace):
1327    """Read full tensor values from an Execution or ExecutionDigest.
1328
1329    Args:
1330      trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.
1331
1332    Returns:
1333      A numpy array representing the output tensor value of the intra-graph
1334        tensor execution event.
1335    """
1336    debug_event = self._reader.read_graph_execution_traces_event(trace.locator)
1337    return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)
1338
1339  def symbolic_tensor_id(self, graph_id, op_name, output_slot):
1340    """Get the ID of a symbolic tensor.
1341
1342    Args:
1343      graph_id: The ID of the immediately-enclosing graph.
1344      op_name: Name of the op.
1345      output_slot: Output slot as an int.
1346
1347    Returns:
1348      The ID of the symbolic tensor as an int.
1349    """
1350    return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)
1351
1352  def graph_execution_trace_to_tensor_id(self, trace):
1353    """Get symbolic tensor ID from a GraphExecutoinTraceDigest object."""
1354    return self.symbolic_tensor_id(
1355        trace.graph_id, trace.op_name, trace.output_slot)
1356
1357  def __enter__(self):
1358    return self
1359
1360  def __exit__(self, exception_type, exception_value, traceback):
1361    del exception_type, exception_value, traceback  # Unused
1362    self._reader.close()
1363