• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1syntax = "proto3";
2
3package tensorflow.data;
4
5option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto";
6
7// Represents the type of auto-sharding we enable.
8enum AutoShardPolicy {
9  // AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
10  AUTO = 0;
11  // FILE: Shards by input files (i.e. each worker will get a set of files to
12  // process). When this option is selected, make sure that there is at least as
13  // many files as workers. If there are fewer input files than workers, a
14  // runtime error will be raised.
15  FILE = 1;
16  // DATA: Shards by elements produced by the dataset. Each worker will process
17  // the whole dataset and discard the portion that is not for itself. Note that
18  // for this mode to correctly partitions the dataset elements, the dataset
19  // needs to produce elements in a deterministic order.
20  DATA = 2;
21  // HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated
22  // as a placeholder to replace with `shard(num_workers, worker_index)`.
23  HINT = 3;
24  // OFF: No sharding will be performed.
25  OFF = -1;
26}
27
28// next: 4
29message AutotuneOptions {
30  // Whether to automatically tune performance knobs.
31  oneof optional_enabled {
32    bool enabled = 1;
33  }
34  // When autotuning is enabled (through autotune), determines the CPU budget to
35  // use. Values greater than the number of schedulable CPU cores are allowed
36  // but may result in CPU contention.
37  oneof optional_cpu_budget {
38    int32 cpu_budget = 2;
39  }
40  // When autotuning is enabled (through autotune), determines the RAM budget to
41  // use. Values greater than the available RAM in bytes may result in OOM. If
42  // 0, defaults to half of the available RAM in bytes.
43  oneof optional_ram_budget {
44    int64 ram_budget = 3;
45  }
46}
47
48// next: 3
49message DistributeOptions {
50  AutoShardPolicy auto_shard_policy = 1;
51  // The number of devices attached to this input pipeline.
52  oneof optional_num_devices {
53    int32 num_devices = 2;
54  }
55}
56
57// next: 18
58message OptimizationOptions {
59  // Whether to apply default graph optimizations. If False, only graph
60  // optimizations that have been explicitly enabled will be applied.
61  oneof optional_apply_default_optimizations {
62    bool apply_default_optimizations = 1;
63  }
64  reserved 2;
65  reserved 3;
66  reserved 4;
67  reserved 5;
68  // Whether to fuse filter transformations.
69  oneof optional_filter_fusion {
70    bool filter_fusion = 6;
71  }
72  // NOTE: field id 7 deleted in June 2021.
73  reserved 7;
74  // NOTE: field id 8 deleted in June 2021.
75  reserved 8;
76  // Whether to fuse map and batch transformations.
77  oneof optional_map_and_batch_fusion {
78    bool map_and_batch_fusion = 9;
79  }
80  // Whether to fuse map and filter transformations.
81  oneof optional_map_and_filter_fusion {
82    bool map_and_filter_fusion = 10;
83  }
84  // Whether to fuse map transformations.
85  oneof optional_map_fusion {
86    bool map_fusion = 11;
87  }
88  // Whether to parallelize stateless map transformations.
89  oneof optional_map_parallelization {
90    bool map_parallelization = 12;
91  }
92
93  // NOTE: field id 13 deleted in June 2021.
94  reserved 13;
95
96  // Whether to eliminate no-op transformations.
97  oneof optional_noop_elimination {
98    bool noop_elimination = 14;
99  }
100  // Whether to parallelize copying of batch elements. This optimization is
101  // highly experimental and can cause performance degradation (e.g. when the
102  // parallelization overhead exceeds the benefits of performing the data copies
103  // in parallel). You should only enable this optimization if a) your input
104  // pipeline is bottlenecked on batching and b) you have validated that this
105  // optimization improves performance.
106  oneof optional_parallel_batch {
107    bool parallel_batch = 15;
108  }
109  // Field id 16 was removed in 06/2021.
110  reserved 16;
111  // Whether to fuse shuffle and repeat transformations.
112  oneof optional_shuffle_and_repeat_fusion {
113    bool shuffle_and_repeat_fusion = 17;
114  }
115}
116
117// next: 3
118message ThreadingOptions {
119  // If set, it overrides the maximum degree of intra-op parallelism.
120  oneof optional_max_intra_op_parallelism {
121    int32 max_intra_op_parallelism = 1;
122  }
123  // If set, the dataset will use a private threadpool of the given size.
124  oneof optional_private_threadpool_size {
125    int32 private_threadpool_size = 2;
126  }
127}
128
129// Represents how to handle external state during serialization.
130enum ExternalStatePolicy {
131  POLICY_WARN = 0;
132  POLICY_IGNORE = 1;
133  POLICY_FAIL = 2;
134}
135
136// Message stored with Dataset objects to control how datasets are processed and
137// optimized.
138//
139// next: 8
140message Options {
141  // Whether the outputs need to be produced in deterministic order.
142  oneof optional_deterministic {
143    bool deterministic = 1;
144  }
145  // The distribution strategy options associated with the dataset.
146  AutotuneOptions autotune_options = 7;
147  // The distribution strategy options associated with the dataset.
148  DistributeOptions distribute_options = 2;
149  // The optimization options associated with the dataset.
150  OptimizationOptions optimization_options = 3;
151  // Whether to introduce 'slack' in the last `prefetch` of the input pipeline,
152  // if it exists. This may reduce CPU contention with accelerator host-side
153  // activity at the start of a step. The slack frequency is determined by the
154  // number of devices attached to this input pipeline.
155  oneof optional_slack {
156    bool slack = 4;
157  }
158  // The threading options associated with the dataset.
159  ThreadingOptions threading_options = 5;
160  // This option can be used to override the default policy for how to handle
161  // external state when serializing a dataset or checkpointing its iterator.
162  // There are three settings available - IGNORE: External state is ignored
163  // without a warning; WARN: External state is ignored and a warning is logged;
164  // FAIL: External state results in an error.
165  oneof optional_external_state_policy {
166    ExternalStatePolicy external_state_policy = 6;
167  }
168}
169