• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_
16 #define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_
17 
18 #include <vector>
19 
20 #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
21 #include "tensorflow/core/framework/graph.pb.h"
22 #include "tensorflow/core/grappler/clusters/cluster.h"
23 #include "tensorflow/core/grappler/costs/graph_properties.h"
24 #include "tensorflow/core/lib/core/status.h"
25 #include "tensorflow/core/platform/types.h"
26 
27 #if GOOGLE_CUDA
28 #if GOOGLE_TENSORRT
29 
30 namespace tensorflow {
31 namespace tensorrt {
32 namespace convert {
33 
34 // Helper class for the segmenter to determine whether given TF node is
35 // supported by TRT.
36 class TrtCandidateSelector {
37  public:
38   TrtCandidateSelector(const grappler::GraphProperties& graph_properties,
39                        TrtPrecisionMode precision_mode);
40 
41   // Returns OK iff 'node' is a TF-TRT conversion candidate, which will be added
42   // to TRT subgraph and later converted into TRT engine.
43   Status IsTensorRTCandidate(const Node* node);
44 
45  private:
46   // The TF-TRT node converter used to verify whether individual node is
47   // supported. It will operate in validation-only mode.
48   TrtNodeValidator validator_;
49 
50   // GraphProperties of the graph whose nodes are to be validated by
51   // IsTensorRTCandidate().
52   const grappler::GraphProperties& graph_properties_;
53 
54   // Quantization ops are only converted when using quantized precisions.
55   const TrtPrecisionMode precision_mode_;
56 };
57 
58 struct ConversionParams {
59   const GraphDef* input_graph_def = nullptr;
60   const std::vector<string>* output_names = nullptr;
61   size_t max_batch_size = 1;
62   size_t max_workspace_size_bytes = 1 << 30;
63   GraphDef* output_graph_def = nullptr;
64   TrtPrecisionMode precision_mode = TrtPrecisionMode::FP32;
65   int minimum_segment_size = 3;
66   const grappler::GraphProperties* graph_properties = nullptr;
67   const grappler::Cluster* cluster = nullptr;
68   // Whether to create engine on conversion or execution time
69   bool is_dyn_op = false;
70   // maximum number of cached engines
71   int max_cached_engines = 1;
72   bool use_calibration = true;
73   // list of cached engines
74   std::vector<int> cached_engine_batches;
75   // Whether to use function fallback for TRTEngineOp
76   bool use_function_backup = true;
77 };
78 
79 // - max_batch_size: maximum batch size which can be used for inference for
80 //   optimization targets inference run with max batch size.
81 // - max_workspace_size_bytes: The upper bound of memory allowance for engine
82 //   building.
83 Status ConvertGraphDefToTensorRT(
84     const GraphDef& graph_def, const std::vector<string>& output_names,
85     size_t max_batch_size, size_t max_workspace_size_bytes,
86     GraphDef* new_graph_def,
87     TrtPrecisionMode precision_mode = TrtPrecisionMode::FP32,
88     int minimum_segment_size = 3, bool is_dyn_op = false,
89     int max_cached_engines = 1, std::vector<int> cached_engine_batches = {},
90     bool use_calibration = true);
91 
92 // Method to call from optimization pass
93 Status ConvertAfterShapes(const ConversionParams& params);
94 
95 // Helper method for the conversion, expose for testing.
96 std::pair<int, Allocator*> GetDeviceAndAllocator(const ConversionParams& params,
97                                                  const EngineInfo& engine);
98 
99 }  // namespace convert
100 }  // namespace tensorrt
101 }  // namespace tensorflow
102 
103 #endif  // GOOGLE_TENSORRT
104 #endif  // GOOGLE_CUDA
105 
106 #endif  // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_
107