• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_QUANTIZATION_WRAPPER_H_
16 #define TENSORFLOW_LITE_TOOLS_OPTIMIZE_QUANTIZATION_WRAPPER_H_
17 
18 #include <string>
19 
20 namespace tflite {
21 namespace optimize {
22 
23 // Makes an copy of the model at input_path and writes it to output_path, adding
24 // tensors to the model needed for calibration.
25 // Returns true if it is successful.
26 // Example: a/b/c.tflite becomes a/b/c.calibrated.tflite and has
27 // intermediate tensors added according to operator properties.
28 bool CreateModelForCalibration(const std::string& input_path,
29                                const std::string& output_path);
30 
31 // Quantize a model in place. This function is only to be called after calling
32 // CreateModelForCalibration and running calibration over data.
33 // Returns true if it is successful.
34 bool CreateQuantizedModel(const std::string& path);
35 
36 }  // namespace optimize
37 }  // namespace tflite
38 
39 #endif  // TENSORFLOW_LITE_TOOLS_OPTIMIZE_QUANTIZATION_WRAPPER_H_
40