• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the quantization definition file for TensorFlow.
17
18#ifdef TF_Quantization
19#else
20#define TF_Quantization
21
22include "mlir/IR/OpBase.td"
23include "mlir/Dialect/Quant/QuantOpsBase.td"
24
25//===----------------------------------------------------------------------===//
26// QuantizedType definitions.
27//===----------------------------------------------------------------------===//
28
29// The base class of a quantized type.
30class QuantizedType<string n, list<int> params, bit signed>
31  : Type<And<[CPred<"$_self.isa<mlir::quant::QuantizedType>()">,
32              CPred<"$_self.cast<mlir::quant::QuantizedType>()" #
33                    ".getStorageTypeIntegralWidth() == " # !head(params)>]>,
34    "Q" # !if (signed, "I", "UI") # !head(params) # " type"> {
35  string name = n;
36  string asTraitArgsStr =
37    !interleave(params, ", ") # !if(signed, ", true", ", false");
38}
39
40// Uniform quantized types. Two integers "smantissa" and "sexp" are used to
41// express the Mantissa and Exponent components of the floating-point scale so
42// the scale of the quantized type is "smantissa * 10 ^ sexp".
43class UInt8UniformQuantizedType<int zero_pt, int smantissa, int sexp>
44    : QuantizedType<"Uniform",
45                        [8, zero_pt, smantissa, sexp, 0, 255], 0>;
46class Int8UniformQuantizedType<int zero_pt, int smantissa, int sexp>
47    : QuantizedType<"Uniform",
48                        [8, zero_pt, smantissa, sexp, -128, 127], 1>;
49
50// General uniform quantized types. The definitions can be used to specify
51// operand's tensor types.
52def QUI8 : QuantizedType<"Uniform", [8], 0>;
53def QI8 : QuantizedType<"Uniform", [8], 1>;
54def QUI16 : QuantizedType<"Uniform", [16], 0>;
55def QI16 : QuantizedType<"Uniform", [16], 1>;
56def QUI32 : QuantizedType<"Uniform", [32], 0>;
57def QI32 : QuantizedType<"Uniform", [32], 1>;
58
59//===----------------------------------------------------------------------===//
60// TFL native op traits (for quantization).
61//
62// Ops in this link should have those traits specified:
63// https://www.tensorflow.org/lite/performance/quantization_spec
64//===----------------------------------------------------------------------===//
65
66// TODO(b/157870442): replace all FixedResultScale trait
67def FixedOutputRangeInterface : OpInterface<
68  "FixedOutputRangeInterface"> {
69  let description = [{
70    Interface for defining the fixed output range.
71  }];
72
73  let methods = [
74    InterfaceMethod<
75      [{Returns the fixed output range.}],
76      "UniformQuantizedType", "GetFixedOutputRange",
77      (ins "bool":$sign, "int":$bit_width)
78    >,
79  ];
80}
81
82// TODO(b/157870442): extend this trait to replace the other two
83// tratis/interfaces AccumulatorUniformScale and AffineOpCoefficient, which
84// are used to generate the op quantization specs.
85def AffineQuantizedOpInterface : OpInterface<
86  "AffineQuantizedOpInterface"> {
87  let description = [{
88    Interface for affine quantized ops (conv2d, fully_connected, etc.)
89  }];
90
91  let methods = [
92    InterfaceMethod<
93      [{Returns the affine operand index.}],
94      "int", "GetAffineOperandIndex",
95      (ins), [{}], [{return 1;}]>,
96    InterfaceMethod<
97      [{Returns whether narrow range is required for the affine operand.}],
98      "bool", "RequiredNarrowRangeAffineOperand",
99      (ins), [{}], [{return true;}]>,
100    InterfaceMethod<
101      [{Returns quantization dim for the affine operand.}],
102      "int", "GetQuantizationDimIndex",
103      (ins)>,
104    InterfaceMethod<
105      [{Returns the dimension index of the output channels.}],
106      "int", "GetChannelDimIndex", (ins)
107    >,
108  ];
109}
110
111def SameOperandsAndResultsScale : OpInterface<"SameScalesOpInterface"> {
112  let description = [{
113    Interface for ops potentially have same operands and results scales.
114  }];
115
116  let methods = [
117    InterfaceMethod<
118      [{Returns whether same operands and results scales are required.}],
119      "bool", "RequiredSameOperandsAndResultsScale",
120      (ins "bool":$sign, "int":$bit_width), [{}], [{return true;}]
121    >,
122  ];
123
124  let verify = [{
125    return quant::VerifySameScales($_op);
126  }];
127}
128
129// Specify this trait if the op has a fixed output value range.
130class FixedResultScale<QuantizedType qt> : NativeOpTrait<!strconcat(
131  "quant::FixedResult", qt.name, "Scale<", qt.asTraitArgsStr, ">::Impl")>;
132
133// Specify this trait if the bias-th input of the op is a bias input, which
134// needs a scale based on the scales of op1 and op2.
135class AccumulatorUniformScale<int bias, int op1, int op2> : NativeOpTrait<
136  !strconcat("quant::AccumulatorUniformScale<",
137             !interleave([bias, op1, op2], ", "),
138             ">::Impl")>;
139
140// Specify the operand index of the coefficient operand for an affine op
141// and also the quantization dimension if per-axis quantization is support.
142// If the quantization dimension is -1, per-axis quantization isn't supported.
143class AffineOpCoefficient<int dim, int index> : NativeOpTrait<
144  !strconcat("quant::AffineOpCoefficient<",
145             !interleave([dim, index], ", "),
146             ">::Impl")>;
147
148// Specify this trait if the op doesn't have quantizable output. We shouldn't
149// apply quantization on this op.
150def NoQuantizableResult : NativeOpTrait<"quant::NoQuantizableResult">;
151
152#endif // TF_Quantization
153