1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_TFRT_COMPILE_OPTIONS_H_ 17 #define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_TFRT_COMPILE_OPTIONS_H_ 18 19 #include <string> 20 #include <vector> 21 22 namespace tensorflow { 23 24 struct TfrtCompileOptions { 25 // TODO(tfrt-devs): Ideally, compiler should make the decision where 26 // to place the variable. 27 std::string variable_device = "/job:localhost/replica:0/task:0/device:CPU:0"; 28 std::string default_device = "/job:localhost/replica:0/task:0/device:CPU:0"; 29 30 // Enable compiler optimization in TFRT dialect. 31 bool enable_optimizer = true; 32 33 // If true, native ops will be used if they are implemented in TFRT. If 34 // false, all ops are using fallback. 35 bool enable_native_ops = true; 36 37 // If true, run grappler passes before compiling. 38 bool enable_grappler = false; 39 40 // Force data format for all layout sensitive operations, eg. setting it to 41 // "NHWC" will changes all data format in the graph to "NHWC" by inserting 42 // or removing related tf.Transpose op. Currently the supported formats are 43 // "NHWC" and "NCHW". 44 // 45 // TODO(tfrt-devs): Ideally compiler should figure out whether the 46 // data format should be changed, instead of controlled by users. 47 std::string force_data_format; 48 49 // TODO(tfrt-devs): Ideally, compiler should work for both CPU and TPU 50 // models and no need to manually specify this option. 51 // If true, the compiler runs TPU specific passes and the runtime does TPU 52 // specific initializations (e.g. create the TpuVariablesTable). 53 bool target_tpu = false; 54 55 // If true, the compiler will try to hoist invariant ops (e.g., const ops and 56 // their non-side-effecting consumers) to loading phase, which avoids the 57 // runtime cost during later running. 58 // TODO(tfrt-devs): Set the default value to true after testing as it is 59 // supposed to be turned on by default. 60 bool hoist_invariant_ops = false; 61 62 // A set of flags to control auto-fusion: automatic clustering of Tensorflow 63 // operations and compiling outlined regions using MLIR based compilation 64 // stack. 65 // 66 // WARNING: These flags are experimental and are intended for manual testing 67 // of different auto-fusion strategies. They will be removed in the future. 68 69 // A list of Tensorflow operations that are supported by auto-fusion 70 // clustering and compilation (e.g. tf.Tanh). 71 std::vector<std::string> auto_fusion_oplist; 72 73 // Minimum size of the cluster to be compiled at runtime. 74 int auto_fusion_min_cluster_size = 2; 75 76 // The cost threshold to decide whether a sequence of operations is cheap, and 77 // then whether it can be executed inline. If the cost is smaller than the 78 // threshold, it will be considered as cheap operations. Since the cost must 79 // be positive integers, setting the threshold to 1 makes all operations 80 // expensive. 81 uint64_t cost_threshold = 1; 82 83 // The threshold to decie whether an inline execution sequence is too large 84 // even if the operations forms a sequential data dependency as it may occupy 85 // the CPU core for too long. In that case, they are broken into multiple 86 // sequences. The default is -1 which means no limit. 87 int64_t upper_cost_threshold = -1; 88 89 // If true, streams with inter data depenedencies will be preferred to be 90 // merged for inline execution. 91 bool merge_inter_dependent_streams = false; 92 }; 93 94 } // namespace tensorflow 95 96 #endif // TENSORFLOW_COMPILER_MLIR_TFRT_TRANSLATE_TFRT_COMPILE_OPTIONS_H_ 97