1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_DYNAMIC_PADDER_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_DYNAMIC_PADDER_H_ 18 19 #include "tensorflow/compiler/xla/service/dynamic_dimension_inference.h" 20 #include "tensorflow/compiler/xla/service/hlo_pass_interface.h" 21 22 namespace xla { 23 24 // With bounded shapes, only part of the shape contains effective data and the 25 // rest contains padded data, whose value can be anything depending on the 26 // source of the data. When a bounded shape is directly consumed by an 27 // instruction that collapses dimensions (reduce for example), the padding data 28 // would affect result of the instruction. 29 // 30 // DynamicPadder uses DynamicDimensionInference to detect bounded shapes in a 31 // hlo module, it then inserts certain instructions to reset the padding into an 32 // identity value so that in doesn't affect the result of subsequent 33 // instruction. For example, it'd reset the padding to 0 before a bounded shape 34 // is consumed by a reduce-sum. 35 // 36 // Dynamic_padder removes dynamic shapes from the entry computation, and inserts 37 // custom calls (with dynamic shapes), which are lowered by specialized 38 // emitters: PadToStatic and SliceToDynamic. 39 40 // Each instruction can have one of the three modes in supporting dynamic 41 // lowering. 42 enum OpDynamismSupport { 43 // There is no support for dynamic lowering -- dynamic padder will make sure 44 // the input to that op has static bound by rewriting the op (e.g, extra space 45 // in reduce_sum will be padded with 0). 46 kNoSupport = 0, 47 // The op can take either dynamic input or static input. 48 kOptional, 49 // The op only has a dynamic lowering, dynamic padder will make sure the input 50 // to this op is in dynamic form. 51 kRequired, 52 }; 53 54 class DynamicPadder : public HloModulePass { 55 public: 56 // Returns true if given instruction supports native dynamic lowering. If so, 57 // dynamic padder will not attempt to pad it. 58 using OpSupportsDynamismHandler = 59 std::function<OpDynamismSupport(HloInstruction*)>; 60 61 // If `slice_dynamic_output` is true, insert 'slice_to_dynamic' ops to all 62 // outputs that are inferred to be dynamic. 63 explicit DynamicPadder( 64 bool slice_dynamic_output = true, 65 DynamicDimensionInference::CustomCallInferenceHandler 66 custom_call_handler = nullptr, 67 OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr) slice_dynamic_output_(slice_dynamic_output)68 : slice_dynamic_output_(slice_dynamic_output), 69 custom_call_handler_(custom_call_handler), 70 op_supports_dynamism_handler_(op_supports_dynamism_handler) {} 71 name()72 absl::string_view name() const override { return "dynamic_padder"; } 73 74 StatusOr<bool> Run(HloModule* module) override; 75 76 private: 77 // Insert 'slice_to_dynamic' ops to all outputs that are inferred to be 78 // dynamic. 79 bool slice_dynamic_output_; 80 81 // A handler for dynamic dimension inference of custom calls. 82 DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_; 83 84 // A handler to indicate if a given hlo instruction support native dynamism 85 // lowering. 86 OpSupportsDynamismHandler op_supports_dynamism_handler_; 87 }; 88 89 } // namespace xla 90 91 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_DYNAMIC_PADDER_H_ 92