1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "mlir/Dialect/StandardOps/IR/Ops.h"
17 #include "mlir/IR/AffineExpr.h"
18 #include "mlir/IR/AffineMap.h"
19 #include "mlir/IR/Attributes.h"
20 #include "mlir/IR/BuiltinTypes.h"
21 #include "mlir/IR/PatternMatch.h"
22 #include "mlir/Pass/Pass.h"
23 #include "mlir/Support/LLVM.h"
24 #include "absl/memory/memory.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "mlir/Dialect/Quant/FakeQuantSupport.h" // from @llvm-project
28 #include "mlir/Dialect/Quant/QuantOps.h" // from @llvm-project
29 #include "mlir/IR/Location.h" // from @llvm-project
30 #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
31 #include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
32 #include "tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h"
33
34 //===----------------------------------------------------------------------===//
35 // The Pass to add default quantization parameters for the activations which
36 // don't have quantization information. These default parameters are usually
37 // not from real measurement, so this pass is only for test purpose.
38
39 namespace mlir {
40 namespace TFL {
41 // Includs an auto-generated function, which can retrieve the quantization
42 // specification for an TFL operation. The signature of the function is
43 // std::unique_pointer<OpQuantSpec> TFL::GetOpQuantSpec(Operation *)
44 #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc"
45
46 namespace {
47 class DefaultQuantParamsPass
48 : public PassWrapper<DefaultQuantParamsPass, FunctionPass> {
49 public:
DefaultQuantParamsPass(double default_min,double default_max,bool is_signed)50 explicit DefaultQuantParamsPass(double default_min, double default_max,
51 bool is_signed)
52 : default_min_(default_min),
53 default_max_(default_max),
54 is_signed_(is_signed) {}
55
56 void runOnFunction() override;
57
58 private:
59 // Whether the value is used as a bias input of another op. Here we assume
60 // bias is used immediately by the user. This assumption is always correct
61 // after constant folding.
UsedAsBias(Value value)62 bool UsedAsBias(Value value) {
63 for (auto &use : value.getUses()) {
64 auto biases = TFL::GetOpQuantSpec(use.getOwner())->biases_params;
65 if (biases.find(use.getOperandNumber()) != biases.end()) return true;
66 }
67 return false;
68 }
69
70 // Uses `quant_params` to quantize `value` and inserting a pair of
71 // tfl.quantize and tfl.dequantize ops for this `value`.
72 void QuantizeValue(OpBuilder builder, Value value,
73 quant::QuantParams quant_params);
74
75 // If the value hasn't been quantized, the functions adds it to `values`.
76 void AddToWorkListIfUnquantized(Value value, std::vector<Value> *values);
77
78 // Converts the default min/max to the default quantization parameters.
79 quant::QuantParams GetDefaultQuantParams(Builder builder);
80
81 // Gets the quantization parameters for the bias of an operation by using the
82 // quantization parameters from the non-biases operands.
83 quant::QuantParams GetQuantParamsForBias(Operation *op, int bias,
84 const std::vector<int> &non_biases,
85 quant::AccumulatorScaleFunc func);
86
87 double default_min_;
88 double default_max_;
89 bool is_signed_;
90 quant::QuantParams default_quant_params_;
91 };
92 } // namespace
93
runOnFunction()94 void DefaultQuantParamsPass::runOnFunction() {
95 FuncOp func = getFunction();
96 OpBuilder builder(func);
97
98 std::vector<Value> activation_values;
99 std::vector<Value> bias_values;
100
101 // First of all, collect all the values (block arguments and op results) which
102 // are required to be quantized.
103 for (auto arg : func.getBody().begin()->getArguments()) {
104 if (UsedAsBias(arg)) {
105 AddToWorkListIfUnquantized(arg, &bias_values);
106 } else {
107 AddToWorkListIfUnquantized(arg, &activation_values);
108 }
109 }
110
111 func.walk([&](Operation *op) {
112 if (op->hasTrait<OpTrait::IsTerminator>() ||
113 op->hasTrait<OpTrait::quant::NoQuantizableResult>() ||
114 llvm::isa<quant::QuantizeCastOp, quant::DequantizeCastOp>(op))
115 return;
116
117 for (auto res : op->getResults()) {
118 if (UsedAsBias(res)) {
119 AddToWorkListIfUnquantized(res, &bias_values);
120 } else {
121 AddToWorkListIfUnquantized(res, &activation_values);
122 }
123 }
124 });
125
126 // Apply the default quantization parameters for these activation values.
127 quant::QuantParams default_params = GetDefaultQuantParams(builder);
128 for (Value value : activation_values) {
129 QuantizeValue(builder, value, default_params);
130 }
131
132 // Since all the non-biases operands have quantization parameters now, we
133 // should be able to propagate them to the bias operand.
134 for (Value bias : bias_values) {
135 Operation *op = *bias.user_begin();
136 auto spec = TFL::GetOpQuantSpec(op);
137 for (auto &it : spec->biases_params) {
138 quant::QuantParams bias_params = GetQuantParamsForBias(
139 op, it.first, it.second.first, it.second.second);
140 if (!bias_params) continue;
141 QuantizeValue(builder, bias, bias_params);
142 }
143 }
144 }
145
AddToWorkListIfUnquantized(Value value,std::vector<Value> * values)146 void DefaultQuantParamsPass::AddToWorkListIfUnquantized(
147 Value value, std::vector<Value> *values) {
148 // If the result isn't with float type, this result is an integer tensor and
149 // doesn't require quantization.
150 auto tensor_type = value.getType().dyn_cast<TensorType>();
151 if (!tensor_type) {
152 // There are none type values.
153 return;
154 }
155 if (!tensor_type.getElementType().isF32()) return;
156
157 // If the result is consumed by a quantize op, it has been quantized.
158 if (value.hasOneUse() &&
159 llvm::isa<TFL::QuantizeOp>(*value.getUsers().begin()))
160 return;
161
162 // Add this result to the list to apply the default value.
163 values->push_back(value);
164 }
165
QuantizeValue(OpBuilder builder,Value value,quant::QuantParams quant_params)166 void DefaultQuantParamsPass::QuantizeValue(OpBuilder builder, Value value,
167 quant::QuantParams quant_params) {
168 Type expressed_type = value.getType();
169 Type new_type = quant_params.castFromExpressedType(expressed_type);
170 // This value isn't an expressed type (float), skip.
171 if (!new_type) return;
172
173 Block &block = value.getParentRegion()->front();
174 Operation *op = value.getDefiningOp();
175 if (op) {
176 builder.setInsertionPoint(&block, ++Block::iterator(op));
177 } else {
178 builder.setInsertionPointToStart(&block);
179 }
180 TypeAttr type_attr = TypeAttr::get(new_type);
181 auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type,
182 value, type_attr);
183 auto dequantize = builder.create<TFL::DequantizeOp>(
184 value.getLoc(), expressed_type, quantize.output());
185 value.replaceAllUsesWith(dequantize);
186
187 // `quantize` is using `dequantize` now, so we should set its operand to
188 // `value`.
189 quantize.getOperation()->replaceUsesOfWith(dequantize, value);
190 }
191
GetQuantParamsForBias(Operation * op,int bias,const std::vector<int> & non_biases,quant::AccumulatorScaleFunc func)192 quant::QuantParams DefaultQuantParamsPass::GetQuantParamsForBias(
193 Operation *op, int bias, const std::vector<int> &non_biases,
194 quant::AccumulatorScaleFunc func) {
195 std::vector<quant::QuantizedType> non_bias_types;
196 non_bias_types.reserve(non_biases.size());
197 for (int non_bias : non_biases) {
198 Operation *non_bias_define = op->getOperand(non_bias).getDefiningOp();
199 if (auto dequant = llvm::dyn_cast<TFL::DequantizeOp>(non_bias_define)) {
200 auto non_bias_type = dequant.input().getType().cast<TensorType>();
201 auto non_bias_ele_type =
202 non_bias_type.getElementType().cast<quant::QuantizedType>();
203 non_bias_types.push_back(non_bias_ele_type);
204 } else {
205 // The non-bias hasn't been quantized, let's skip this bias.
206 break;
207 }
208 }
209 // The non-bias hasn't been quantized, let's skip this bias.
210 if (non_bias_types.size() != non_biases.size()) return {};
211
212 return func(non_bias_types, false);
213 }
214
GetDefaultQuantParams(Builder builder)215 quant::QuantParams DefaultQuantParamsPass::GetDefaultQuantParams(
216 Builder builder) {
217 if (!default_quant_params_) {
218 default_quant_params_ = quant::fakeQuantAttrsToType(
219 builder.getUnknownLoc(),
220 /*numBits=*/8, default_min_, default_max_, /*narrowRange=*/false,
221 builder.getF32Type(), is_signed_);
222 }
223 return default_quant_params_;
224 }
225
226 // Creates an instance of the default quant parameters pass.
CreateDefaultQuantParamsPass(double default_min,double default_max,bool is_signed)227 std::unique_ptr<OperationPass<FuncOp>> CreateDefaultQuantParamsPass(
228 double default_min, double default_max, bool is_signed) {
229 return absl::make_unique<DefaultQuantParamsPass>(default_min, default_max,
230 is_signed);
231 }
232
233 // Registers this pass with default values, only for test
234 static PassRegistration<DefaultQuantParamsPass> pass(
235 "tfl-default-quant",
__anonb214d9680302null236 "Apply quantization with default quantization parameter", [] {
237 return CreateDefaultQuantParamsPass(/*default_min=*/-1.0,
238 /*default_max=*/1.0,
239 /*is_signed=*/false);
240 });
241
242 } // namespace TFL
243 } // namespace mlir
244