• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1syntax = "proto3";
2
3package tensorflow;
4option cc_enable_arenas = true;
5option java_outer_classname = "RewriterConfigProtos";
6option java_multiple_files = true;
7option java_package = "org.tensorflow.framework";
8// add go_package externally with copybara
9
10import "tensorflow/core/framework/attr_value.proto";
11import "tensorflow/core/protobuf/verifier_config.proto";
12
13message AutoParallelOptions {
14  bool enable = 1;
15  int32 num_replicas = 2;
16}
17
18message ScopedAllocatorOptions {
19  // If present, only perform optimization for these ops.
20  repeated string enable_op = 1;
21}
22
23message RewriterConfig {
24  // Graph rewriting is experimental and subject to change, not covered by any
25  // API stability guarantees.
26
27  // Configuration options for the meta-optimizer. Unless otherwise noted, these
28  // configuration options do not apply to explicitly triggered optimization
29  // passes in the optimizers field.
30
31  enum Toggle {
32    DEFAULT = 0;
33    ON = 1;
34    OFF = 2;
35    // Enable some aggressive optimizations that use assumptions that TF graphs
36    // may break. For example, assume the shape of a placeholder matches its
37    // actual feed.
38    AGGRESSIVE = 3;
39  }
40
41  // Enum controlling the number of times to run optimizers. The default is to
42  // run them twice.
43  enum NumIterationsType {
44    DEFAULT_NUM_ITERS = 0;
45    ONE = 1;
46    TWO = 2;
47  }
48
49  // Optimize tensor layouts (default is ON)
50  // e.g. This will try to use NCHW layout on GPU which is faster.
51  Toggle layout_optimizer = 1;
52  // Fold constants (default is ON)
53  // Statically infer the value of tensors when possible, and materialize the
54  // result using constants.
55  Toggle constant_folding = 3;
56  // Shape optimizations (default is ON)
57  // Simplify computations made on shapes.
58  Toggle shape_optimization = 13;
59  // Remapping (default is ON)
60  // Remap subgraphs onto more efficient implementations.
61  Toggle remapping = 14;
62  // Arithmetic optimizations (default is ON)
63  // e.g. Simplify arithmetic ops; merge ops with same value (like constants).
64  Toggle arithmetic_optimization = 7;
65  // Control dependency optimizations (default is ON).
66  // Remove redundant control dependencies, which may enable other optimization.
67  Toggle dependency_optimization = 8;
68  // Loop optimizations (default is ON).
69  Toggle loop_optimization = 9;
70  // Function optimizations (default is ON).
71  Toggle function_optimization = 10;
72  // Strips debug-related nodes from the graph (off by default).
73  Toggle debug_stripper = 11;
74  // If true, don't remove unnecessary ops from the graph
75  bool disable_model_pruning = 2;
76  // Try to allocate some independent Op outputs contiguously in order to
77  // merge or eliminate downstream Ops (off by default).
78  Toggle scoped_allocator_optimization = 15;
79  // Force small ops onto the CPU (default is OFF).
80  Toggle pin_to_host_optimization = 18;
81  // Enable the swap of kernel implementations based on the device placement
82  // (default is ON).
83  Toggle implementation_selector = 22;
84  // Disable the entire meta optimizer (off by default).
85  bool disable_meta_optimizer = 19;
86
87  // Controls how many times we run the optimizers in meta optimizer (default
88  // is once).
89  NumIterationsType meta_optimizer_iterations = 12;
90
91  // The minimum number of nodes in a graph to optimizer. For smaller graphs,
92  // optimization is skipped.
93  // 0 means the system picks an appropriate number.
94  // < 0 means do not skip optimization.
95  int32 min_graph_nodes = 17;
96
97  enum MemOptType {
98    // The default setting (SCHEDULING and SWAPPING HEURISTICS only)
99    DEFAULT_MEM_OPT = 0;
100    // Disabled in the meta-optimizer.
101    NO_MEM_OPT = 1;
102    // Driven by manual op-level annotations.
103    MANUAL = 2;
104
105    // Driven by heuristics. The behavior of these heuristics is subject to
106    // change. Currently includes an experimental recomputation and swapping
107    // heuristics. Manual annotations are respected, but additional nodes are
108    // selected automatically.
109
110    // Swapping heuristic will move a tensor from the GPU to the CPU and move
111    // it back when needed to reduce peak memory usage.
112    SWAPPING_HEURISTICS = 4;
113    // Recomputation heuristics will recompute ops (such as Relu activation)
114    // during backprop instead of storing them, reducing peak memory usage.
115    RECOMPUTATION_HEURISTICS = 5;
116    // Scheduling will split big ops such as AddN and try to enforce a schedule
117    // of the new computations that decreases peak memory usage.
118    SCHEDULING_HEURISTICS = 6;
119    // Use any combination of swapping and recomputation heuristics.
120    HEURISTICS = 3;
121  }
122  // Configures memory optimization passes through the meta-optimizer. Has no
123  // effect on manually requested memory optimization passes in the optimizers
124  // field.
125  MemOptType memory_optimization = 4;
126  // A node name scope for node names which are valid outputs of recompuations.
127  // Inputs to nodes that match this scope may be recomputed (subject either to
128  // manual annotation of those input nodes or to manual annotation and
129  // heuristics depending on memory_optimization), but the nodes themselves will
130  // not be recomputed. This matches any sub-scopes as well, meaning the scope
131  // can appear not just as a top-level scope. For example, if the value is
132  // "gradients/", the default, it will match node name "gradients/foo",
133  // "foo/gradients/bar", but not "foo_gradients/"
134  string memory_optimizer_target_node_name_scope = 6;
135  // Maximum number of milliseconds to spend optimizing a single graph before
136  // timing out. If equal to 0 the system picks a default (currently 5 minutes).
137  // If less than 0 the optimizer will never time out.
138  int64 meta_optimizer_timeout_ms = 20;
139
140  // Configures AutoParallel optimization passes either through the
141  // meta-optimizer or when manually specified through the optimizers field.
142  AutoParallelOptions auto_parallel = 5;
143
144  // If true, any optimization pass failing will cause the MetaOptimizer to
145  // stop with an error. By default - or when set to false, failing passes are
146  // skipped silently.
147  bool fail_on_optimizer_errors = 21;
148
149  ScopedAllocatorOptions scoped_allocator_opts = 16;
150
151  // If non-empty, will use this as an alternative way to specify a list of
152  // optimizations to turn on and the order of the optimizations (replacing the
153  // meta-optimizer).
154  //
155  // Of the RewriterConfig options, only the AutoParallel configuration options
156  // (the auto_parallel field) apply to manually requested optimization passes
157  // ("autoparallel"). Memory optimization passes ("memory") invoked here are
158  // not configurable (in contrast to memory optimization passes through the
159  // meta-optimizer) and act only on manual op annotations.
160  //
161  // Custom optimizers (see custom_optimizers) that are not part of this
162  // schedule will be run after - in the order that they were specified.
163  repeated string optimizers = 100;
164
165  // Message to describe custom graph optimizer and its parameters
166  message CustomGraphOptimizer {
167    string name = 1;
168    map<string, AttrValue> parameter_map = 2;
169  }
170
171  // list of CustomGraphOptimizers to apply.
172  repeated CustomGraphOptimizer custom_optimizers = 200;
173
174  // VerifierConfig specifying the verifiers to be run after every optimizer.
175  VerifierConfig inter_optimizer_verifier_config = 300;
176
177  // VerifierConfig specifying the verifiers to be run at the end, after all
178  // optimizers have run.
179  VerifierConfig post_optimization_verifier_config = 301;
180}
181