• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""CLI Backend for the Analyzer Part of the Debugger.
16
17The analyzer performs post hoc analysis of dumped intermediate tensors and
18graph structure information from debugged Session.run() calls.
19"""
20from __future__ import absolute_import
21from __future__ import division
22from __future__ import print_function
23
24import argparse
25import copy
26import re
27
28from six.moves import xrange  # pylint: disable=redefined-builtin
29
30from tensorflow.python.debug.cli import cli_config
31from tensorflow.python.debug.cli import cli_shared
32from tensorflow.python.debug.cli import command_parser
33from tensorflow.python.debug.cli import debugger_cli_common
34from tensorflow.python.debug.cli import evaluator
35from tensorflow.python.debug.cli import ui_factory
36from tensorflow.python.debug.lib import debug_graphs
37from tensorflow.python.debug.lib import source_utils
38
39RL = debugger_cli_common.RichLine
40
41# String constants for the depth-dependent hanging indent at the beginning
42# of each line.
43HANG_UNFINISHED = "|  "  # Used for unfinished recursion depths.
44HANG_FINISHED = "   "
45HANG_SUFFIX = "|- "
46
47# String constant for displaying depth and op type.
48DEPTH_TEMPLATE = "(%d) "
49OP_TYPE_TEMPLATE = "[%s] "
50
51# String constants for control inputs/outputs, etc.
52CTRL_LABEL = "(Ctrl) "
53ELLIPSIS = "..."
54
55SORT_TENSORS_BY_TIMESTAMP = "timestamp"
56SORT_TENSORS_BY_DUMP_SIZE = "dump_size"
57SORT_TENSORS_BY_OP_TYPE = "op_type"
58SORT_TENSORS_BY_TENSOR_NAME = "tensor_name"
59
60
61def _add_main_menu(output,
62                   node_name=None,
63                   enable_list_tensors=True,
64                   enable_node_info=True,
65                   enable_print_tensor=True,
66                   enable_list_inputs=True,
67                   enable_list_outputs=True):
68  """Generate main menu for the screen output from a command.
69
70  Args:
71    output: (debugger_cli_common.RichTextLines) the output object to modify.
72    node_name: (str or None) name of the node involved (if any). If None,
73      the menu items node_info, list_inputs and list_outputs will be
74      automatically disabled, overriding the values of arguments
75      enable_node_info, enable_list_inputs and enable_list_outputs.
76    enable_list_tensors: (bool) whether the list_tensor menu item will be
77      enabled.
78    enable_node_info: (bool) whether the node_info item will be enabled.
79    enable_print_tensor: (bool) whether the print_tensor item will be enabled.
80    enable_list_inputs: (bool) whether the item list_inputs will be enabled.
81    enable_list_outputs: (bool) whether the item list_outputs will be enabled.
82  """
83
84  menu = debugger_cli_common.Menu()
85
86  menu.append(
87      debugger_cli_common.MenuItem(
88          "list_tensors", "list_tensors", enabled=enable_list_tensors))
89
90  if node_name:
91    menu.append(
92        debugger_cli_common.MenuItem(
93            "node_info",
94            "node_info -a -d -t %s" % node_name,
95            enabled=enable_node_info))
96    menu.append(
97        debugger_cli_common.MenuItem(
98            "print_tensor",
99            "print_tensor %s" % node_name,
100            enabled=enable_print_tensor))
101    menu.append(
102        debugger_cli_common.MenuItem(
103            "list_inputs",
104            "list_inputs -c -r %s" % node_name,
105            enabled=enable_list_inputs))
106    menu.append(
107        debugger_cli_common.MenuItem(
108            "list_outputs",
109            "list_outputs -c -r %s" % node_name,
110            enabled=enable_list_outputs))
111  else:
112    menu.append(
113        debugger_cli_common.MenuItem(
114            "node_info", None, enabled=False))
115    menu.append(
116        debugger_cli_common.MenuItem("print_tensor", None, enabled=False))
117    menu.append(
118        debugger_cli_common.MenuItem("list_inputs", None, enabled=False))
119    menu.append(
120        debugger_cli_common.MenuItem("list_outputs", None, enabled=False))
121
122  menu.append(
123      debugger_cli_common.MenuItem("run_info", "run_info"))
124  menu.append(
125      debugger_cli_common.MenuItem("help", "help"))
126
127  output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
128
129
130class DebugAnalyzer(object):
131  """Analyzer for debug data from dump directories."""
132
133  _TIMESTAMP_COLUMN_HEAD = "t (ms)"
134  _DUMP_SIZE_COLUMN_HEAD = "Size (B)"
135  _OP_TYPE_COLUMN_HEAD = "Op type"
136  _TENSOR_NAME_COLUMN_HEAD = "Tensor name"
137
138  # Op types to be omitted when generating descriptions of graph structure.
139  _GRAPH_STRUCT_OP_TYPE_BLACKLIST = (
140      "_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
141
142  def __init__(self, debug_dump, config):
143    """DebugAnalyzer constructor.
144
145    Args:
146      debug_dump: A DebugDumpDir object.
147      config: A `cli_config.CLIConfig` object that carries user-facing
148        configurations.
149    """
150
151    self._debug_dump = debug_dump
152    self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)
153
154    # Initialize tensor filters state.
155    self._tensor_filters = {}
156
157    self._build_argument_parsers(config)
158    config.set_callback("graph_recursion_depth",
159                        self._build_argument_parsers)
160
161    # TODO(cais): Implement list_nodes.
162
163  def _build_argument_parsers(self, config):
164    """Build argument parsers for DebugAnalayzer.
165
166    Args:
167      config: A `cli_config.CLIConfig` object.
168
169    Returns:
170      A dict mapping command handler name to `ArgumentParser` instance.
171    """
172    # Argument parsers for command handlers.
173    self._arg_parsers = {}
174
175    # Parser for list_tensors.
176    ap = argparse.ArgumentParser(
177        description="List dumped intermediate tensors.",
178        usage=argparse.SUPPRESS)
179    ap.add_argument(
180        "-f",
181        "--tensor_filter",
182        dest="tensor_filter",
183        type=str,
184        default="",
185        help="List only Tensors passing the filter of the specified name")
186    ap.add_argument(
187        "-fenn",
188        "--filter_exclude_node_names",
189        dest="filter_exclude_node_names",
190        type=str,
191        default="",
192        help="When applying the tensor filter, exclude node with names "
193        "matching the regular expression. Applicable only if --tensor_filter "
194        "or -f is used.")
195    ap.add_argument(
196        "-n",
197        "--node_name_filter",
198        dest="node_name_filter",
199        type=str,
200        default="",
201        help="filter node name by regex.")
202    ap.add_argument(
203        "-t",
204        "--op_type_filter",
205        dest="op_type_filter",
206        type=str,
207        default="",
208        help="filter op type by regex.")
209    ap.add_argument(
210        "-s",
211        "--sort_by",
212        dest="sort_by",
213        type=str,
214        default=SORT_TENSORS_BY_TIMESTAMP,
215        help=("the field to sort the data by: (%s | %s | %s | %s)" %
216              (SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,
217               SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))
218    ap.add_argument(
219        "-r",
220        "--reverse",
221        dest="reverse",
222        action="store_true",
223        help="sort the data in reverse (descending) order")
224    self._arg_parsers["list_tensors"] = ap
225
226    # Parser for node_info.
227    ap = argparse.ArgumentParser(
228        description="Show information about a node.", usage=argparse.SUPPRESS)
229    ap.add_argument(
230        "node_name",
231        type=str,
232        help="Name of the node or an associated tensor, e.g., "
233        "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
234    ap.add_argument(
235        "-a",
236        "--attributes",
237        dest="attributes",
238        action="store_true",
239        help="Also list attributes of the node.")
240    ap.add_argument(
241        "-d",
242        "--dumps",
243        dest="dumps",
244        action="store_true",
245        help="Also list dumps available from the node.")
246    ap.add_argument(
247        "-t",
248        "--traceback",
249        dest="traceback",
250        action="store_true",
251        help="Also include the traceback of the node's creation "
252        "(if available in Python).")
253    self._arg_parsers["node_info"] = ap
254
255    # Parser for list_inputs.
256    ap = argparse.ArgumentParser(
257        description="Show inputs to a node.", usage=argparse.SUPPRESS)
258    ap.add_argument(
259        "node_name",
260        type=str,
261        help="Name of the node or an output tensor from the node, e.g., "
262        "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
263    ap.add_argument(
264        "-c", "--control", action="store_true", help="Include control inputs.")
265    ap.add_argument(
266        "-d",
267        "--depth",
268        dest="depth",
269        type=int,
270        default=config.get("graph_recursion_depth"),
271        help="Maximum depth of recursion used when showing the input tree.")
272    ap.add_argument(
273        "-r",
274        "--recursive",
275        dest="recursive",
276        action="store_true",
277        help="Show inputs to the node recursively, i.e., the input tree.")
278    ap.add_argument(
279        "-t",
280        "--op_type",
281        action="store_true",
282        help="Show op types of input nodes.")
283    self._arg_parsers["list_inputs"] = ap
284
285    # Parser for list_outputs.
286    ap = argparse.ArgumentParser(
287        description="Show the nodes that receive the outputs of given node.",
288        usage=argparse.SUPPRESS)
289    ap.add_argument(
290        "node_name",
291        type=str,
292        help="Name of the node or an output tensor from the node, e.g., "
293        "hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
294    ap.add_argument(
295        "-c", "--control", action="store_true", help="Include control inputs.")
296    ap.add_argument(
297        "-d",
298        "--depth",
299        dest="depth",
300        type=int,
301        default=config.get("graph_recursion_depth"),
302        help="Maximum depth of recursion used when showing the output tree.")
303    ap.add_argument(
304        "-r",
305        "--recursive",
306        dest="recursive",
307        action="store_true",
308        help="Show recipients of the node recursively, i.e., the output "
309        "tree.")
310    ap.add_argument(
311        "-t",
312        "--op_type",
313        action="store_true",
314        help="Show op types of recipient nodes.")
315    self._arg_parsers["list_outputs"] = ap
316
317    # Parser for print_tensor.
318    self._arg_parsers["print_tensor"] = (
319        command_parser.get_print_tensor_argparser(
320            "Print the value of a dumped tensor."))
321
322    # Parser for print_source.
323    ap = argparse.ArgumentParser(
324        description="Print a Python source file with overlaid debug "
325        "information, including the nodes (ops) or Tensors created at the "
326        "source lines.",
327        usage=argparse.SUPPRESS)
328    ap.add_argument(
329        "source_file_path",
330        type=str,
331        help="Path to the source file.")
332    ap.add_argument(
333        "-t",
334        "--tensors",
335        dest="tensors",
336        action="store_true",
337        help="Label lines with dumped Tensors, instead of ops.")
338    ap.add_argument(
339        "-m",
340        "--max_elements_per_line",
341        type=int,
342        default=10,
343        help="Maximum number of elements (ops or Tensors) to show per source "
344             "line.")
345    ap.add_argument(
346        "-b",
347        "--line_begin",
348        type=int,
349        default=1,
350        help="Print source beginning at line number (1-based.)")
351    self._arg_parsers["print_source"] = ap
352
353    # Parser for list_source.
354    ap = argparse.ArgumentParser(
355        description="List source files responsible for constructing nodes and "
356        "tensors present in the run().",
357        usage=argparse.SUPPRESS)
358    ap.add_argument(
359        "-p",
360        "--path_filter",
361        type=str,
362        default="",
363        help="Regular expression filter for file path.")
364    ap.add_argument(
365        "-n",
366        "--node_name_filter",
367        type=str,
368        default="",
369        help="Regular expression filter for node name.")
370    self._arg_parsers["list_source"] = ap
371
372    # Parser for eval.
373    ap = argparse.ArgumentParser(
374        description="""Evaluate an arbitrary expression. Can use tensor values
375        from the current debug dump. The debug tensor names should be enclosed
376        in pairs of backticks. Expressions with spaces should be enclosed in
377        a pair of double quotes or a pair of single quotes. By default, numpy
378        is imported as np and can be used in the expressions. E.g.,
379          1) eval np.argmax(`Softmax:0`),
380          2) eval 'np.sum(`Softmax:0`, axis=1)',
381          3) eval "np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)".
382        """,
383        usage=argparse.SUPPRESS)
384    ap.add_argument(
385        "expression",
386        type=str,
387        help="""Expression to be evaluated.
388        1) in the simplest case, use <node_name>:<output_slot>, e.g.,
389          hidden_0/MatMul:0.
390
391        2) if the default debug op "DebugIdentity" is to be overridden, use
392          <node_name>:<output_slot>:<debug_op>, e.g.,
393          hidden_0/MatMul:0:DebugNumericSummary.
394
395        3) if the tensor of the same name exists on more than one device, use
396          <device_name>:<node_name>:<output_slot>[:<debug_op>], e.g.,
397          /job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0
398          /job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount.
399
400        4) if the tensor is executed multiple times in a given `Session.run`
401        call, specify the execution index with a 0-based integer enclose in a
402        pair of brackets at the end, e.g.,
403          RNN/tanh:0[0]
404          /job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].""")
405    ap.add_argument(
406        "-a",
407        "--all",
408        dest="print_all",
409        action="store_true",
410        help="Print the tensor in its entirety, i.e., do not use ellipses "
411        "(may be slow for large results).")
412    ap.add_argument(
413        "-w",
414        "--write_path",
415        default="",
416        help="Path of the numpy file to write the evaluation result to, "
417        "using numpy.save()")
418    self._arg_parsers["eval"] = ap
419
420  def add_tensor_filter(self, filter_name, filter_callable):
421    """Add a tensor filter.
422
423    A tensor filter is a named callable of the signature:
424      filter_callable(dump_datum, tensor),
425
426    wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying
427    metadata about the dumped tensor, including tensor name, timestamps, etc.
428    tensor is the value of the dumped tensor as an numpy.ndarray object.
429    The return value of the function is a bool.
430    This is the same signature as the input argument to
431    debug_data.DebugDumpDir.find().
432
433    Args:
434      filter_name: (str) name of the filter. Cannot be empty.
435      filter_callable: (callable) a filter function of the signature described
436        as above.
437
438    Raises:
439      ValueError: If filter_name is an empty str.
440      TypeError: If filter_name is not a str.
441                 Or if filter_callable is not callable.
442    """
443
444    if not isinstance(filter_name, str):
445      raise TypeError("Input argument filter_name is expected to be str, "
446                      "but is not.")
447
448    # Check that filter_name is not an empty str.
449    if not filter_name:
450      raise ValueError("Input argument filter_name cannot be empty.")
451
452    # Check that filter_callable is callable.
453    if not callable(filter_callable):
454      raise TypeError(
455          "Input argument filter_callable is expected to be callable, "
456          "but is not.")
457
458    self._tensor_filters[filter_name] = filter_callable
459
460  def get_tensor_filter(self, filter_name):
461    """Retrieve filter function by name.
462
463    Args:
464      filter_name: Name of the filter set during add_tensor_filter() call.
465
466    Returns:
467      The callable associated with the filter name.
468
469    Raises:
470      ValueError: If there is no tensor filter of the specified filter name.
471    """
472
473    if filter_name not in self._tensor_filters:
474      raise ValueError("There is no tensor filter named \"%s\"" % filter_name)
475
476    return self._tensor_filters[filter_name]
477
478  def get_help(self, handler_name):
479    return self._arg_parsers[handler_name].format_help()
480
481  def list_tensors(self, args, screen_info=None):
482    """Command handler for list_tensors.
483
484    List tensors dumped during debugged Session.run() call.
485
486    Args:
487      args: Command-line arguments, excluding the command prefix, as a list of
488        str.
489      screen_info: Optional dict input containing screen information such as
490        cols.
491
492    Returns:
493      Output text lines as a RichTextLines object.
494
495    Raises:
496      ValueError: If `--filter_exclude_node_names` is used without `-f` or
497        `--tensor_filter` being used.
498    """
499
500    # TODO(cais): Add annotations of substrings for dumped tensor names, to
501    # facilitate on-screen highlighting/selection of node names.
502    _ = screen_info
503
504    parsed = self._arg_parsers["list_tensors"].parse_args(args)
505
506    output = []
507
508    filter_strs = []
509    if parsed.op_type_filter:
510      op_type_regex = re.compile(parsed.op_type_filter)
511      filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
512    else:
513      op_type_regex = None
514
515    if parsed.node_name_filter:
516      node_name_regex = re.compile(parsed.node_name_filter)
517      filter_strs.append("Node name regex filter: \"%s\"" %
518                         parsed.node_name_filter)
519    else:
520      node_name_regex = None
521
522    output = debugger_cli_common.RichTextLines(filter_strs)
523    output.append("")
524
525    if parsed.tensor_filter:
526      try:
527        filter_callable = self.get_tensor_filter(parsed.tensor_filter)
528      except ValueError:
529        output = cli_shared.error("There is no tensor filter named \"%s\"." %
530                                  parsed.tensor_filter)
531        _add_main_menu(output, node_name=None, enable_list_tensors=False)
532        return output
533
534      data_to_show = self._debug_dump.find(
535          filter_callable,
536          exclude_node_names=parsed.filter_exclude_node_names)
537    else:
538      if parsed.filter_exclude_node_names:
539        raise ValueError(
540            "The flag --filter_exclude_node_names is valid only when "
541            "the flag -f or --tensor_filter is used.")
542
543      data_to_show = self._debug_dump.dumped_tensor_data
544
545    # TODO(cais): Implement filter by lambda on tensor value.
546
547    max_timestamp_width, max_dump_size_width, max_op_type_width = (
548        self._measure_tensor_list_column_widths(data_to_show))
549
550    # Sort the data.
551    data_to_show = self._sort_dump_data_by(
552        data_to_show, parsed.sort_by, parsed.reverse)
553
554    output.extend(
555        self._tensor_list_column_heads(parsed, max_timestamp_width,
556                                       max_dump_size_width, max_op_type_width))
557
558    dump_count = 0
559    for dump in data_to_show:
560      if node_name_regex and not node_name_regex.match(dump.node_name):
561        continue
562
563      if op_type_regex:
564        op_type = self._debug_dump.node_op_type(dump.node_name)
565        if not op_type_regex.match(op_type):
566          continue
567
568      rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
569      dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
570      dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
571      op_type = self._debug_dump.node_op_type(dump.node_name)
572
573      line = "[%.3f]" % rel_time
574      line += " " * (max_timestamp_width - len(line))
575      line += dump_size_str
576      line += " " * (max_timestamp_width + max_dump_size_width - len(line))
577      line += op_type
578      line += " " * (max_timestamp_width + max_dump_size_width +
579                     max_op_type_width - len(line))
580      line += dumped_tensor_name
581
582      output.append(
583          line,
584          font_attr_segs=[(
585              len(line) - len(dumped_tensor_name), len(line),
586              debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
587      dump_count += 1
588
589    if parsed.tensor_filter:
590      output.prepend([
591          "%d dumped tensor(s) passing filter \"%s\":" %
592          (dump_count, parsed.tensor_filter)
593      ])
594    else:
595      output.prepend(["%d dumped tensor(s):" % dump_count])
596
597    _add_main_menu(output, node_name=None, enable_list_tensors=False)
598    return output
599
600  def _measure_tensor_list_column_widths(self, data):
601    """Determine the maximum widths of the timestamp and op-type column.
602
603    This method assumes that data is sorted in the default order, i.e.,
604    by ascending timestamps.
605
606    Args:
607      data: (list of DebugTensorDaum) the data based on which the maximum
608        column widths will be determined.
609
610    Returns:
611      (int) maximum width of the timestamp column. 0 if data is empty.
612      (int) maximum width of the dump size column. 0 if data is empty.
613      (int) maximum width of the op type column. 0 if data is empty.
614    """
615
616    max_timestamp_width = 0
617    if data:
618      max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0
619      max_timestamp_width = len("[%.3f] " % max_rel_time_ms) + 1
620    max_timestamp_width = max(max_timestamp_width,
621                              len(self._TIMESTAMP_COLUMN_HEAD) + 1)
622
623    max_dump_size_width = 0
624    for dump in data:
625      dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
626      if len(dump_size_str) + 1 > max_dump_size_width:
627        max_dump_size_width = len(dump_size_str) + 1
628    max_dump_size_width = max(max_dump_size_width,
629                              len(self._DUMP_SIZE_COLUMN_HEAD) + 1)
630
631    max_op_type_width = 0
632    for dump in data:
633      op_type = self._debug_dump.node_op_type(dump.node_name)
634      if len(op_type) + 1 > max_op_type_width:
635        max_op_type_width = len(op_type) + 1
636    max_op_type_width = max(max_op_type_width,
637                            len(self._OP_TYPE_COLUMN_HEAD) + 1)
638
639    return max_timestamp_width, max_dump_size_width, max_op_type_width
640
641  def _sort_dump_data_by(self, data, sort_by, reverse):
642    """Sort a list of DebugTensorDatum in specified order.
643
644    Args:
645      data: (list of DebugTensorDatum) the data to be sorted.
646      sort_by: The field to sort data by.
647      reverse: (bool) Whether to use reversed (descending) order.
648
649    Returns:
650      (list of DebugTensorDatum) in sorted order.
651
652    Raises:
653      ValueError: given an invalid value of sort_by.
654    """
655
656    if sort_by == SORT_TENSORS_BY_TIMESTAMP:
657      return sorted(
658          data,
659          reverse=reverse,
660          key=lambda x: x.timestamp)
661    elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:
662      return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)
663    elif sort_by == SORT_TENSORS_BY_OP_TYPE:
664      return sorted(
665          data,
666          reverse=reverse,
667          key=lambda x: self._debug_dump.node_op_type(x.node_name))
668    elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:
669      return sorted(
670          data,
671          reverse=reverse,
672          key=lambda x: "%s:%d" % (x.node_name, x.output_slot))
673    else:
674      raise ValueError("Unsupported key to sort tensors by: %s" % sort_by)
675
676  def _tensor_list_column_heads(self, parsed, max_timestamp_width,
677                                max_dump_size_width, max_op_type_width):
678    """Generate a line containing the column heads of the tensor list.
679
680    Args:
681      parsed: Parsed arguments (by argparse) of the list_tensors command.
682      max_timestamp_width: (int) maximum width of the timestamp column.
683      max_dump_size_width: (int) maximum width of the dump size column.
684      max_op_type_width: (int) maximum width of the op type column.
685
686    Returns:
687      A RichTextLines object.
688    """
689
690    base_command = "list_tensors"
691    if parsed.tensor_filter:
692      base_command += " -f %s" % parsed.tensor_filter
693    if parsed.op_type_filter:
694      base_command += " -t %s" % parsed.op_type_filter
695    if parsed.node_name_filter:
696      base_command += " -n %s" % parsed.node_name_filter
697
698    attr_segs = {0: []}
699    row = self._TIMESTAMP_COLUMN_HEAD
700    command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP)
701    if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:
702      command += " -r"
703    attr_segs[0].append(
704        (0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"]))
705    row += " " * (max_timestamp_width - len(row))
706
707    prev_len = len(row)
708    row += self._DUMP_SIZE_COLUMN_HEAD
709    command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)
710    if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:
711      command += " -r"
712    attr_segs[0].append((prev_len, len(row),
713                         [debugger_cli_common.MenuItem(None, command), "bold"]))
714    row += " " * (max_dump_size_width + max_timestamp_width - len(row))
715
716    prev_len = len(row)
717    row += self._OP_TYPE_COLUMN_HEAD
718    command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE)
719    if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:
720      command += " -r"
721    attr_segs[0].append((prev_len, len(row),
722                         [debugger_cli_common.MenuItem(None, command), "bold"]))
723    row += " " * (
724        max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
725    )
726
727    prev_len = len(row)
728    row += self._TENSOR_NAME_COLUMN_HEAD
729    command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)
730    if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:
731      command += " -r"
732    attr_segs[0].append((prev_len, len(row),
733                         [debugger_cli_common.MenuItem("", command), "bold"]))
734    row += " " * (
735        max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
736    )
737
738    return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)
739
740  def node_info(self, args, screen_info=None):
741    """Command handler for node_info.
742
743    Query information about a given node.
744
745    Args:
746      args: Command-line arguments, excluding the command prefix, as a list of
747        str.
748      screen_info: Optional dict input containing screen information such as
749        cols.
750
751    Returns:
752      Output text lines as a RichTextLines object.
753    """
754
755    # TODO(cais): Add annotation of substrings for node names, to facilitate
756    # on-screen highlighting/selection of node names.
757    _ = screen_info
758
759    parsed = self._arg_parsers["node_info"].parse_args(args)
760
761    # Get a node name, regardless of whether the input is a node name (without
762    # output slot attached) or a tensor name (with output slot attached).
763    node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(
764        parsed.node_name)
765
766    if not self._debug_dump.node_exists(node_name):
767      output = cli_shared.error(
768          "There is no node named \"%s\" in the partition graphs" % node_name)
769      _add_main_menu(
770          output,
771          node_name=None,
772          enable_list_tensors=True,
773          enable_node_info=False,
774          enable_list_inputs=False,
775          enable_list_outputs=False)
776      return output
777
778    # TODO(cais): Provide UI glossary feature to explain to users what the
779    # term "partition graph" means and how it is related to TF graph objects
780    # in Python. The information can be along the line of:
781    # "A tensorflow graph defined in Python is stripped of unused ops
782    # according to the feeds and fetches and divided into a number of
783    # partition graphs that may be distributed among multiple devices and
784    # hosts. The partition graphs are what's actually executed by the C++
785    # runtime during a run() call."
786
787    lines = ["Node %s" % node_name]
788    font_attr_segs = {
789        0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")]
790    }
791    lines.append("")
792    lines.append("  Op: %s" % self._debug_dump.node_op_type(node_name))
793    lines.append("  Device: %s" % self._debug_dump.node_device(node_name))
794    output = debugger_cli_common.RichTextLines(
795        lines, font_attr_segs=font_attr_segs)
796
797    # List node inputs (non-control and control).
798    inputs = self._exclude_blacklisted_ops(
799        self._debug_dump.node_inputs(node_name))
800    ctrl_inputs = self._exclude_blacklisted_ops(
801        self._debug_dump.node_inputs(node_name, is_control=True))
802    output.extend(self._format_neighbors("input", inputs, ctrl_inputs))
803
804    # List node output recipients (non-control and control).
805    recs = self._exclude_blacklisted_ops(
806        self._debug_dump.node_recipients(node_name))
807    ctrl_recs = self._exclude_blacklisted_ops(
808        self._debug_dump.node_recipients(node_name, is_control=True))
809    output.extend(self._format_neighbors("recipient", recs, ctrl_recs))
810
811    # Optional: List attributes of the node.
812    if parsed.attributes:
813      output.extend(self._list_node_attributes(node_name))
814
815    # Optional: List dumps available from the node.
816    if parsed.dumps:
817      output.extend(self._list_node_dumps(node_name))
818
819    if parsed.traceback:
820      output.extend(self._render_node_traceback(node_name))
821
822    _add_main_menu(output, node_name=node_name, enable_node_info=False)
823    return output
824
825  def _exclude_blacklisted_ops(self, node_names):
826    """Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_BLACKLIST.
827
828    Args:
829      node_names: An iterable of node or graph element names.
830
831    Returns:
832      A list of node names that are not blacklisted.
833    """
834    return [node_name for node_name in node_names
835            if self._debug_dump.node_op_type(
836                debug_graphs.get_node_name(node_name)) not in
837            self._GRAPH_STRUCT_OP_TYPE_BLACKLIST]
838
839  def _render_node_traceback(self, node_name):
840    """Render traceback of a node's creation in Python, if available.
841
842    Args:
843      node_name: (str) name of the node.
844
845    Returns:
846      A RichTextLines object containing the stack trace of the node's
847      construction.
848    """
849
850    lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")]
851
852    try:
853      node_stack = self._debug_dump.node_traceback(node_name)
854      for depth, (file_path, line, function_name, text) in enumerate(
855          node_stack):
856        lines.append("%d: %s" % (depth, file_path))
857
858        attribute = debugger_cli_common.MenuItem(
859            "", "ps %s -b %d" % (file_path, line)) if text else None
860        line_number_line = RL("  ")
861        line_number_line += RL("Line:     %d" % line, attribute)
862        lines.append(line_number_line)
863
864        lines.append("  Function: %s" % function_name)
865        lines.append("  Text:     " + (("\"%s\"" % text) if text else "None"))
866        lines.append("")
867    except KeyError:
868      lines.append("(Node unavailable in the loaded Python graph)")
869    except LookupError:
870      lines.append("(Unavailable because no Python graph has been loaded)")
871
872    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
873
874  def list_inputs(self, args, screen_info=None):
875    """Command handler for inputs.
876
877    Show inputs to a given node.
878
879    Args:
880      args: Command-line arguments, excluding the command prefix, as a list of
881        str.
882      screen_info: Optional dict input containing screen information such as
883        cols.
884
885    Returns:
886      Output text lines as a RichTextLines object.
887    """
888
889    # Screen info not currently used by this handler. Include this line to
890    # mute pylint.
891    _ = screen_info
892    # TODO(cais): Use screen info to format the output lines more prettily,
893    # e.g., hanging indent of long node names.
894
895    parsed = self._arg_parsers["list_inputs"].parse_args(args)
896
897    output = self._list_inputs_or_outputs(
898        parsed.recursive,
899        parsed.node_name,
900        parsed.depth,
901        parsed.control,
902        parsed.op_type,
903        do_outputs=False)
904
905    node_name = debug_graphs.get_node_name(parsed.node_name)
906    _add_main_menu(output, node_name=node_name, enable_list_inputs=False)
907
908    return output
909
910  def print_tensor(self, args, screen_info=None):
911    """Command handler for print_tensor.
912
913    Print value of a given dumped tensor.
914
915    Args:
916      args: Command-line arguments, excluding the command prefix, as a list of
917        str.
918      screen_info: Optional dict input containing screen information such as
919        cols.
920
921    Returns:
922      Output text lines as a RichTextLines object.
923    """
924
925    parsed = self._arg_parsers["print_tensor"].parse_args(args)
926
927    np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
928        screen_info)
929
930    # Determine if any range-highlighting is required.
931    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
932
933    tensor_name, tensor_slicing = (
934        command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
935
936    node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)
937    if (self._debug_dump.loaded_partition_graphs() and
938        not self._debug_dump.node_exists(node_name)):
939      output = cli_shared.error(
940          "Node \"%s\" does not exist in partition graphs" % node_name)
941      _add_main_menu(
942          output,
943          node_name=None,
944          enable_list_tensors=True,
945          enable_print_tensor=False)
946      return output
947
948    watch_keys = self._debug_dump.debug_watch_keys(node_name)
949    if output_slot is None:
950      output_slots = set()
951      for watch_key in watch_keys:
952        output_slots.add(int(watch_key.split(":")[1]))
953
954      if len(output_slots) == 1:
955        # There is only one dumped tensor from this node, so there is no
956        # ambiguity. Proceed to show the only dumped tensor.
957        output_slot = list(output_slots)[0]
958      else:
959        # There are more than one dumped tensors from this node. Indicate as
960        # such.
961        # TODO(cais): Provide an output screen with command links for
962        # convenience.
963        lines = [
964            "Node \"%s\" generated debug dumps from %s output slots:" %
965            (node_name, len(output_slots)),
966            "Please specify the output slot: %s:x." % node_name
967        ]
968        output = debugger_cli_common.RichTextLines(lines)
969        _add_main_menu(
970            output,
971            node_name=node_name,
972            enable_list_tensors=True,
973            enable_print_tensor=False)
974        return output
975
976    # Find debug dump data that match the tensor name (node name + output
977    # slot).
978    matching_data = []
979    for watch_key in watch_keys:
980      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
981      for datum in debug_tensor_data:
982        if datum.output_slot == output_slot:
983          matching_data.append(datum)
984
985    if not matching_data:
986      # No dump for this tensor.
987      output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
988                                parsed.tensor_name)
989    elif len(matching_data) == 1:
990      # There is only one dump for this tensor.
991      if parsed.number <= 0:
992        output = cli_shared.format_tensor(
993            matching_data[0].get_tensor(),
994            matching_data[0].watch_key,
995            np_printoptions,
996            print_all=parsed.print_all,
997            tensor_slicing=tensor_slicing,
998            highlight_options=highlight_options,
999            include_numeric_summary=parsed.numeric_summary,
1000            write_path=parsed.write_path)
1001      else:
1002        output = cli_shared.error(
1003            "Invalid number (%d) for tensor %s, which generated one dump." %
1004            (parsed.number, parsed.tensor_name))
1005
1006      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)
1007    else:
1008      # There are more than one dumps for this tensor.
1009      if parsed.number < 0:
1010        lines = [
1011            "Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
1012                                                   len(matching_data))
1013        ]
1014        font_attr_segs = {}
1015
1016        for i, datum in enumerate(matching_data):
1017          rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
1018          lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
1019          command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
1020          font_attr_segs[len(lines) - 1] = [(
1021              len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
1022              debugger_cli_common.MenuItem(None, command))]
1023
1024        lines.append("")
1025        lines.append(
1026            "You can use the -n (--number) flag to specify which dump to "
1027            "print.")
1028        lines.append("For example:")
1029        lines.append("  print_tensor %s -n 0" % parsed.tensor_name)
1030
1031        output = debugger_cli_common.RichTextLines(
1032            lines, font_attr_segs=font_attr_segs)
1033      elif parsed.number >= len(matching_data):
1034        output = cli_shared.error(
1035            "Specified number (%d) exceeds the number of available dumps "
1036            "(%d) for tensor %s" %
1037            (parsed.number, len(matching_data), parsed.tensor_name))
1038      else:
1039        output = cli_shared.format_tensor(
1040            matching_data[parsed.number].get_tensor(),
1041            matching_data[parsed.number].watch_key + " (dump #%d)" %
1042            parsed.number,
1043            np_printoptions,
1044            print_all=parsed.print_all,
1045            tensor_slicing=tensor_slicing,
1046            highlight_options=highlight_options,
1047            write_path=parsed.write_path)
1048      _add_main_menu(output, node_name=node_name, enable_print_tensor=False)
1049
1050    return output
1051
1052  def list_outputs(self, args, screen_info=None):
1053    """Command handler for inputs.
1054
1055    Show inputs to a given node.
1056
1057    Args:
1058      args: Command-line arguments, excluding the command prefix, as a list of
1059        str.
1060      screen_info: Optional dict input containing screen information such as
1061        cols.
1062
1063    Returns:
1064      Output text lines as a RichTextLines object.
1065    """
1066
1067    # Screen info not currently used by this handler. Include this line to
1068    # mute pylint.
1069    _ = screen_info
1070    # TODO(cais): Use screen info to format the output lines more prettily,
1071    # e.g., hanging indent of long node names.
1072
1073    parsed = self._arg_parsers["list_outputs"].parse_args(args)
1074
1075    output = self._list_inputs_or_outputs(
1076        parsed.recursive,
1077        parsed.node_name,
1078        parsed.depth,
1079        parsed.control,
1080        parsed.op_type,
1081        do_outputs=True)
1082
1083    node_name = debug_graphs.get_node_name(parsed.node_name)
1084    _add_main_menu(output, node_name=node_name, enable_list_outputs=False)
1085
1086    return output
1087
1088  def evaluate_expression(self, args, screen_info=None):
1089    parsed = self._arg_parsers["eval"].parse_args(args)
1090
1091    eval_res = self._evaluator.evaluate(parsed.expression)
1092
1093    np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
1094        screen_info)
1095    return cli_shared.format_tensor(
1096        eval_res,
1097        "from eval of expression '%s'" % parsed.expression,
1098        np_printoptions,
1099        print_all=parsed.print_all,
1100        include_numeric_summary=True,
1101        write_path=parsed.write_path)
1102
1103  def _reconstruct_print_source_command(self,
1104                                        parsed,
1105                                        line_begin,
1106                                        max_elements_per_line_increase=0):
1107    return "ps %s %s -b %d -m %d" % (
1108        parsed.source_file_path, "-t" if parsed.tensors else "", line_begin,
1109        parsed.max_elements_per_line + max_elements_per_line_increase)
1110
1111  def print_source(self, args, screen_info=None):
1112    """Print the content of a source file."""
1113    del screen_info  # Unused.
1114
1115    parsed = self._arg_parsers["print_source"].parse_args(args)
1116
1117    source_annotation = source_utils.annotate_source(
1118        self._debug_dump,
1119        parsed.source_file_path,
1120        do_dumped_tensors=parsed.tensors)
1121
1122    source_lines, line_num_width = source_utils.load_source(
1123        parsed.source_file_path)
1124
1125    labeled_source_lines = []
1126    actual_initial_scroll_target = 0
1127    for i, line in enumerate(source_lines):
1128      annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW)
1129      annotated_line += " " * (line_num_width - len(annotated_line))
1130      annotated_line += line
1131      labeled_source_lines.append(annotated_line)
1132
1133      if i + 1 == parsed.line_begin:
1134        actual_initial_scroll_target = len(labeled_source_lines) - 1
1135
1136      if i + 1 in source_annotation:
1137        sorted_elements = sorted(source_annotation[i + 1])
1138        for k, element in enumerate(sorted_elements):
1139          if k >= parsed.max_elements_per_line:
1140            omitted_info_line = RL("    (... Omitted %d of %d %s ...) " % (
1141                len(sorted_elements) - parsed.max_elements_per_line,
1142                len(sorted_elements),
1143                "tensor(s)" if parsed.tensors else "op(s)"))
1144            omitted_info_line += RL(
1145                "+5",
1146                debugger_cli_common.MenuItem(
1147                    None,
1148                    self._reconstruct_print_source_command(
1149                        parsed, i + 1, max_elements_per_line_increase=5)))
1150            labeled_source_lines.append(omitted_info_line)
1151            break
1152
1153          label = RL(" " * 4)
1154          if self._debug_dump.debug_watch_keys(
1155              debug_graphs.get_node_name(element)):
1156            attribute = debugger_cli_common.MenuItem("", "pt %s" % element)
1157          else:
1158            attribute = cli_shared.COLOR_BLUE
1159
1160          label += RL(element, attribute)
1161          labeled_source_lines.append(label)
1162
1163    output = debugger_cli_common.rich_text_lines_from_rich_line_list(
1164        labeled_source_lines,
1165        annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:
1166                     actual_initial_scroll_target})
1167    _add_main_menu(output, node_name=None)
1168    return output
1169
1170  def _make_source_table(self, source_list, is_tf_py_library):
1171    """Make a table summarizing the source files that create nodes and tensors.
1172
1173    Args:
1174      source_list: List of source files and related information as a list of
1175        tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
1176        first_line).
1177      is_tf_py_library: (`bool`) whether this table is for files that belong
1178        to the TensorFlow Python library.
1179
1180    Returns:
1181      The table as a `debugger_cli_common.RichTextLines` object.
1182    """
1183    path_head = "Source file path"
1184    num_nodes_head = "#(nodes)"
1185    num_tensors_head = "#(tensors)"
1186    num_dumps_head = "#(tensor dumps)"
1187
1188    if is_tf_py_library:
1189      # Use color to mark files that are guessed to belong to TensorFlow Python
1190      # library.
1191      color = cli_shared.COLOR_GRAY
1192      lines = [RL("TensorFlow Python library file(s):", color)]
1193    else:
1194      color = cli_shared.COLOR_WHITE
1195      lines = [RL("File(s) outside TensorFlow Python library:", color)]
1196
1197    if not source_list:
1198      lines.append(RL("[No files.]"))
1199      lines.append(RL())
1200      return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
1201
1202    path_column_width = max(
1203        max(len(item[0]) for item in source_list), len(path_head)) + 1
1204    num_nodes_column_width = max(
1205        max(len(str(item[2])) for item in source_list),
1206        len(num_nodes_head)) + 1
1207    num_tensors_column_width = max(
1208        max(len(str(item[3])) for item in source_list),
1209        len(num_tensors_head)) + 1
1210
1211    head = RL(path_head + " " * (path_column_width - len(path_head)), color)
1212    head += RL(num_nodes_head + " " * (
1213        num_nodes_column_width - len(num_nodes_head)), color)
1214    head += RL(num_tensors_head + " " * (
1215        num_tensors_column_width - len(num_tensors_head)), color)
1216    head += RL(num_dumps_head, color)
1217
1218    lines.append(head)
1219
1220    for (file_path, _, num_nodes, num_tensors, num_dumps,
1221         first_line_num) in source_list:
1222      path_attributes = [color]
1223      if source_utils.is_extension_uncompiled_python_source(file_path):
1224        path_attributes.append(
1225            debugger_cli_common.MenuItem(None, "ps %s -b %d" %
1226                                         (file_path, first_line_num)))
1227
1228      line = RL(file_path, path_attributes)
1229      line += " " * (path_column_width - len(line))
1230      line += RL(
1231          str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))),
1232          color)
1233      line += RL(
1234          str(num_tensors) + " " *
1235          (num_tensors_column_width - len(str(num_tensors))), color)
1236      line += RL(str(num_dumps), color)
1237      lines.append(line)
1238    lines.append(RL())
1239
1240    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
1241
1242  def list_source(self, args, screen_info=None):
1243    """List Python source files that constructed nodes and tensors."""
1244    del screen_info  # Unused.
1245
1246    parsed = self._arg_parsers["list_source"].parse_args(args)
1247    source_list = source_utils.list_source_files_against_dump(
1248        self._debug_dump,
1249        path_regex_whitelist=parsed.path_filter,
1250        node_name_regex_whitelist=parsed.node_name_filter)
1251
1252    top_lines = [
1253        RL("List of source files that created nodes in this run", "bold")]
1254    if parsed.path_filter:
1255      top_lines.append(
1256          RL("File path regex filter: \"%s\"" % parsed.path_filter))
1257    if parsed.node_name_filter:
1258      top_lines.append(
1259          RL("Node name regex filter: \"%s\"" % parsed.node_name_filter))
1260    top_lines.append(RL())
1261    output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)
1262    if not source_list:
1263      output.append("[No source file information.]")
1264      return output
1265
1266    output.extend(self._make_source_table(
1267        [item for item in source_list if not item[1]], False))
1268    output.extend(self._make_source_table(
1269        [item for item in source_list if item[1]], True))
1270    _add_main_menu(output, node_name=None)
1271    return output
1272
1273  def _list_inputs_or_outputs(self,
1274                              recursive,
1275                              node_name,
1276                              depth,
1277                              control,
1278                              op_type,
1279                              do_outputs=False):
1280    """Helper function used by list_inputs and list_outputs.
1281
1282    Format a list of lines to display the inputs or output recipients of a
1283    given node.
1284
1285    Args:
1286      recursive: Whether the listing is to be done recursively, as a boolean.
1287      node_name: The name of the node in question, as a str.
1288      depth: Maximum recursion depth, applies only if recursive == True, as an
1289        int.
1290      control: Whether control inputs or control recipients are included, as a
1291        boolean.
1292      op_type: Whether the op types of the nodes are to be included, as a
1293        boolean.
1294      do_outputs: Whether recipients, instead of input nodes are to be
1295        listed, as a boolean.
1296
1297    Returns:
1298      Input or recipient tree formatted as a RichTextLines object.
1299    """
1300
1301    if do_outputs:
1302      tracker = self._debug_dump.node_recipients
1303      type_str = "Recipients of"
1304      short_type_str = "recipients"
1305    else:
1306      tracker = self._debug_dump.node_inputs
1307      type_str = "Inputs to"
1308      short_type_str = "inputs"
1309
1310    lines = []
1311    font_attr_segs = {}
1312
1313    # Check if this is a tensor name, instead of a node name.
1314    node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)
1315
1316    # Check if node exists.
1317    if not self._debug_dump.node_exists(node_name):
1318      return cli_shared.error(
1319          "There is no node named \"%s\" in the partition graphs" % node_name)
1320
1321    if recursive:
1322      max_depth = depth
1323    else:
1324      max_depth = 1
1325
1326    if control:
1327      include_ctrls_str = ", control %s included" % short_type_str
1328    else:
1329      include_ctrls_str = ""
1330
1331    line = "%s node \"%s\"" % (type_str, node_name)
1332    font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold")
1333                        ]
1334    lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str
1335                                                   ))
1336
1337    command_template = "lo -c -r %s" if do_outputs else "li -c -r %s"
1338    self._dfs_from_node(
1339        lines,
1340        font_attr_segs,
1341        node_name,
1342        tracker,
1343        max_depth,
1344        1, [],
1345        control,
1346        op_type,
1347        command_template=command_template)
1348
1349    # Include legend.
1350    lines.append("")
1351    lines.append("Legend:")
1352    lines.append("  (d): recursion depth = d.")
1353
1354    if control:
1355      lines.append("  (Ctrl): Control input.")
1356    if op_type:
1357      lines.append("  [Op]: Input node has op type Op.")
1358
1359    # TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.
1360
1361    return debugger_cli_common.RichTextLines(
1362        lines, font_attr_segs=font_attr_segs)
1363
1364  def _dfs_from_node(self,
1365                     lines,
1366                     attr_segs,
1367                     node_name,
1368                     tracker,
1369                     max_depth,
1370                     depth,
1371                     unfinished,
1372                     include_control=False,
1373                     show_op_type=False,
1374                     command_template=None):
1375    """Perform depth-first search (DFS) traversal of a node's input tree.
1376
1377    It recursively tracks the inputs (or output recipients) of the node called
1378    node_name, and append these inputs (or output recipients) to a list of text
1379    lines (lines) with proper indentation that reflects the recursion depth,
1380    together with some formatting attributes (to attr_segs). The formatting
1381    attributes can include command shortcuts, for example.
1382
1383    Args:
1384      lines: Text lines to append to, as a list of str.
1385      attr_segs: (dict) Attribute segments dictionary to append to.
1386      node_name: Name of the node, as a str. This arg is updated during the
1387        recursion.
1388      tracker: A callable that takes one str as the node name input and
1389        returns a list of str as the inputs/outputs.
1390        This makes it this function general enough to be used with both
1391        node-input and node-output tracking.
1392      max_depth: Maximum recursion depth, as an int.
1393      depth: Current recursion depth. This arg is updated during the
1394        recursion.
1395      unfinished: A stack of unfinished recursion depths, as a list of int.
1396      include_control: Whether control dependencies are to be included as
1397        inputs (and marked as such).
1398      show_op_type: Whether op type of the input nodes are to be displayed
1399        alongside the nodes' names.
1400      command_template: (str) Template for command shortcut of the node names.
1401    """
1402
1403    # Make a shallow copy of the list because it may be extended later.
1404    all_inputs = self._exclude_blacklisted_ops(
1405        copy.copy(tracker(node_name, is_control=False)))
1406    is_ctrl = [False] * len(all_inputs)
1407    if include_control:
1408      # Sort control inputs or recipients in alphabetical order of the node
1409      # names.
1410      ctrl_inputs = self._exclude_blacklisted_ops(
1411          sorted(tracker(node_name, is_control=True)))
1412      all_inputs.extend(ctrl_inputs)
1413      is_ctrl.extend([True] * len(ctrl_inputs))
1414
1415    if not all_inputs:
1416      if depth == 1:
1417        lines.append("  [None]")
1418
1419      return
1420
1421    unfinished.append(depth)
1422
1423    # Create depth-dependent hanging indent for the line.
1424    hang = ""
1425    for k in xrange(depth):
1426      if k < depth - 1:
1427        if k + 1 in unfinished:
1428          hang += HANG_UNFINISHED
1429        else:
1430          hang += HANG_FINISHED
1431      else:
1432        hang += HANG_SUFFIX
1433
1434    if all_inputs and depth > max_depth:
1435      lines.append(hang + ELLIPSIS)
1436      unfinished.pop()
1437      return
1438
1439    hang += DEPTH_TEMPLATE % depth
1440
1441    for i, inp in enumerate(all_inputs):
1442      op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
1443      if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
1444        continue
1445
1446      if is_ctrl[i]:
1447        ctrl_str = CTRL_LABEL
1448      else:
1449        ctrl_str = ""
1450
1451      op_type_str = ""
1452      if show_op_type:
1453        op_type_str = OP_TYPE_TEMPLATE % op_type
1454
1455      if i == len(all_inputs) - 1:
1456        unfinished.pop()
1457
1458      line = hang + ctrl_str + op_type_str + inp
1459      lines.append(line)
1460      if command_template:
1461        attr_segs[len(lines) - 1] = [(
1462            len(line) - len(inp), len(line),
1463            debugger_cli_common.MenuItem(None, command_template % inp))]
1464
1465      # Recursive call.
1466      # The input's/output's name can be a tensor name, in the case of node
1467      # with >1 output slots.
1468      inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp)
1469      self._dfs_from_node(
1470          lines,
1471          attr_segs,
1472          inp_node_name,
1473          tracker,
1474          max_depth,
1475          depth + 1,
1476          unfinished,
1477          include_control=include_control,
1478          show_op_type=show_op_type,
1479          command_template=command_template)
1480
1481  def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):
1482    """List neighbors (inputs or recipients) of a node.
1483
1484    Args:
1485      neighbor_type: ("input" | "recipient")
1486      non_ctrls: Non-control neighbor node names, as a list of str.
1487      ctrls: Control neighbor node names, as a list of str.
1488
1489    Returns:
1490      A RichTextLines object.
1491    """
1492
1493    # TODO(cais): Return RichTextLines instead, to allow annotation of node
1494    # names.
1495    lines = []
1496    font_attr_segs = {}
1497
1498    lines.append("")
1499    lines.append("  %d %s(s) + %d control %s(s):" %
1500                 (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))
1501    lines.append("    %d %s(s):" % (len(non_ctrls), neighbor_type))
1502    for non_ctrl in non_ctrls:
1503      line = "      [%s] %s" % (self._debug_dump.node_op_type(non_ctrl),
1504                                non_ctrl)
1505      lines.append(line)
1506      font_attr_segs[len(lines) - 1] = [(
1507          len(line) - len(non_ctrl), len(line),
1508          debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))]
1509
1510    if ctrls:
1511      lines.append("")
1512      lines.append("    %d control %s(s):" % (len(ctrls), neighbor_type))
1513      for ctrl in ctrls:
1514        line = "      [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl)
1515        lines.append(line)
1516        font_attr_segs[len(lines) - 1] = [(
1517            len(line) - len(ctrl), len(line),
1518            debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))]
1519
1520    return debugger_cli_common.RichTextLines(
1521        lines, font_attr_segs=font_attr_segs)
1522
1523  def _list_node_attributes(self, node_name):
1524    """List neighbors (inputs or recipients) of a node.
1525
1526    Args:
1527      node_name: Name of the node of which the attributes are to be listed.
1528
1529    Returns:
1530      A RichTextLines object.
1531    """
1532
1533    lines = []
1534    lines.append("")
1535    lines.append("Node attributes:")
1536
1537    attrs = self._debug_dump.node_attributes(node_name)
1538    for attr_key in attrs:
1539      lines.append("  %s:" % attr_key)
1540      attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ")
1541      lines.append("    %s" % attr_val_str)
1542      lines.append("")
1543
1544    return debugger_cli_common.RichTextLines(lines)
1545
1546  def _list_node_dumps(self, node_name):
1547    """List dumped tensor data from a node.
1548
1549    Args:
1550      node_name: Name of the node of which the attributes are to be listed.
1551
1552    Returns:
1553      A RichTextLines object.
1554    """
1555
1556    lines = []
1557    font_attr_segs = {}
1558
1559    watch_keys = self._debug_dump.debug_watch_keys(node_name)
1560
1561    dump_count = 0
1562    for watch_key in watch_keys:
1563      debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
1564      for datum in debug_tensor_data:
1565        line = "  Slot %d @ %s @ %.3f ms" % (
1566            datum.output_slot, datum.debug_op,
1567            (datum.timestamp - self._debug_dump.t0) / 1000.0)
1568        lines.append(line)
1569        command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count)
1570        font_attr_segs[len(lines) - 1] = [(
1571            2, len(line), debugger_cli_common.MenuItem(None, command))]
1572        dump_count += 1
1573
1574    output = debugger_cli_common.RichTextLines(
1575        lines, font_attr_segs=font_attr_segs)
1576    output_with_header = debugger_cli_common.RichTextLines(
1577        ["%d dumped tensor(s):" % dump_count, ""])
1578    output_with_header.extend(output)
1579    return output_with_header
1580
1581
1582def create_analyzer_ui(debug_dump,
1583                       tensor_filters=None,
1584                       ui_type="curses",
1585                       on_ui_exit=None,
1586                       config=None):
1587  """Create an instance of CursesUI based on a DebugDumpDir object.
1588
1589  Args:
1590    debug_dump: (debug_data.DebugDumpDir) The debug dump to use.
1591    tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor
1592      filter (Callable).
1593    ui_type: (str) requested UI type, e.g., "curses", "readline".
1594    on_ui_exit: (`Callable`) the callback to be called when the UI exits.
1595    config: A `cli_config.CLIConfig` object.
1596
1597  Returns:
1598    (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
1599      commands and tab-completions registered.
1600  """
1601  if config is None:
1602    config = cli_config.CLIConfig()
1603
1604  analyzer = DebugAnalyzer(debug_dump, config=config)
1605  if tensor_filters:
1606    for tensor_filter_name in tensor_filters:
1607      analyzer.add_tensor_filter(
1608          tensor_filter_name, tensor_filters[tensor_filter_name])
1609
1610  cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)
1611  cli.register_command_handler(
1612      "list_tensors",
1613      analyzer.list_tensors,
1614      analyzer.get_help("list_tensors"),
1615      prefix_aliases=["lt"])
1616  cli.register_command_handler(
1617      "node_info",
1618      analyzer.node_info,
1619      analyzer.get_help("node_info"),
1620      prefix_aliases=["ni"])
1621  cli.register_command_handler(
1622      "list_inputs",
1623      analyzer.list_inputs,
1624      analyzer.get_help("list_inputs"),
1625      prefix_aliases=["li"])
1626  cli.register_command_handler(
1627      "list_outputs",
1628      analyzer.list_outputs,
1629      analyzer.get_help("list_outputs"),
1630      prefix_aliases=["lo"])
1631  cli.register_command_handler(
1632      "print_tensor",
1633      analyzer.print_tensor,
1634      analyzer.get_help("print_tensor"),
1635      prefix_aliases=["pt"])
1636  cli.register_command_handler(
1637      "print_source",
1638      analyzer.print_source,
1639      analyzer.get_help("print_source"),
1640      prefix_aliases=["ps"])
1641  cli.register_command_handler(
1642      "list_source",
1643      analyzer.list_source,
1644      analyzer.get_help("list_source"),
1645      prefix_aliases=["ls"])
1646  cli.register_command_handler(
1647      "eval",
1648      analyzer.evaluate_expression,
1649      analyzer.get_help("eval"),
1650      prefix_aliases=["ev"])
1651
1652  dumped_tensor_names = []
1653  for datum in debug_dump.dumped_tensor_data:
1654    dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot))
1655
1656  # Tab completions for command "print_tensors".
1657  cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)
1658
1659  return cli
1660