• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/tools/versioning/op_version.h"
16 
17 #include <algorithm>
18 #include <string>
19 #include <utility>
20 #include <vector>
21 
22 #include "tensorflow/core/platform/logging.h"
23 #include "tensorflow/lite/builtin_op_data.h"
24 #include "tensorflow/lite/kernels/internal/compatibility.h"
25 #include "tensorflow/lite/schema/schema_generated.h"
26 #include "tensorflow/lite/schema/schema_utils.h"
27 
28 namespace tflite {
29 namespace {
30 
NeedBroadcastForBinaryInputs(const OpSignature & op_sig)31 bool NeedBroadcastForBinaryInputs(const OpSignature& op_sig) {
32   if (op_sig.inputs.size() < 2) {
33     return false;
34   }
35   return (op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims);
36 }
37 
GetInputMaxDims(const OpSignature & op_sig)38 int GetInputMaxDims(const OpSignature& op_sig) {
39   int max_dims = 0;
40   for (auto& input : op_sig.inputs) {
41     if (input.dims.size() > max_dims) {
42       max_dims = input.dims.size();
43     }
44   }
45   return max_dims;
46 }
47 
48 }  // namespace
49 
GetBuiltinOperatorVersion(const OpSignature & op_sig)50 int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
51   switch (op_sig.op) {
52     case BuiltinOperator_CONV_2D:
53       // If the op has signed int16 op_sig.inputs and op_sig.outputs, its
54       // version 4.
55       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
56           op_sig.inputs.at(1).type == kTfLiteInt16 &&
57           op_sig.outputs.at(1).type == kTfLiteInt16) {
58         return 4;
59       }
60 
61       // If the op has signed int8 op_sig.inputs and op_sig.outputs, its
62       // version 3.
63       if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
64           op_sig.inputs.at(1).type == kTfLiteInt8 &&
65           op_sig.outputs.at(0).type == kTfLiteInt8) {
66         return 3;
67       }
68       // If the op is a signed int8 hybrid operation, we need to return
69       // version 2 or 5 if per channel.
70       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
71           op_sig.inputs.at(1).type == kTfLiteInt8 &&
72           op_sig.outputs.at(0).type == kTfLiteFloat32) {
73         if (op_sig.ext_options.conv_2d.is_per_channel_quantized) {
74           return 5;
75         }
76         return 2;
77       }
78       return 1;
79 
80     case BuiltinOperator_DEPTHWISE_CONV_2D: {
81       // If the op accepts int16, we return version 5.
82       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
83           op_sig.inputs.at(1).type == kTfLiteInt16 &&
84           op_sig.outputs.at(1).type == kTfLiteInt16) {
85         return 5;
86       }
87 
88       // If the op is a signed int8 hybrid operation, we need to return
89       // version 4 or 6 if per-channel.
90       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
91           op_sig.inputs.at(1).type == kTfLiteInt8 &&
92           op_sig.outputs.at(0).type == kTfLiteFloat32) {
93         if (op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized) {
94           return 6;
95         }
96         return 4;
97       }
98       // If the op has signed int8 op_sig.inputs and op_sig.outputs, its
99       // version 3.
100       if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
101           op_sig.inputs.at(1).type == kTfLiteInt8 &&
102           op_sig.outputs.at(0).type == kTfLiteInt8) {
103         return 3;
104       }
105       auto depthwise_conv_params =
106           reinterpret_cast<TfLiteDepthwiseConvParams*>(op_sig.builtin_data);
107       TFLITE_DCHECK(depthwise_conv_params != nullptr);
108       if (depthwise_conv_params->dilation_width_factor != 1 ||
109           depthwise_conv_params->dilation_height_factor != 1) {
110         return 2;
111       }
112       return 1;
113     }
114 
115     case BuiltinOperator_FAKE_QUANT: {
116       auto fake_quant_params =
117           reinterpret_cast<TfLiteFakeQuantParams*>(op_sig.builtin_data);
118       TFLITE_DCHECK(fake_quant_params != nullptr);
119       if (fake_quant_params->narrow_range) {
120         return 2;
121       }
122       return 1;
123     }
124 
125     case BuiltinOperator_FULLY_CONNECTED: {
126       // +-----------------+--------------------+--------------------------+
127       // |                 |    Weight::Default | Weight::Shuffled4x16Int8 |
128       // +-----------------+--------------------+--------------------------+
129       // | Float           |                  1 |                        2 |
130       // | Quantized Uint8 |                  1 |                        2 |
131       // | Hybrid          |                  3 |                        3 |
132       // | Quantized Int8  |                  4 |                        4 |
133       // +-----------------+--------------------+--------------------------+
134 
135       // FullyConnected with sparse weight is supported at version 8.
136       if (op_sig.ext_options.fully_connected.sparse_weight) {
137         return 8;
138       }
139 
140       // Int16 fully fixed point kernel is at version 7.
141       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
142           op_sig.inputs.at(1).type == kTfLiteInt16 &&
143           op_sig.outputs.at(0).type == kTfLiteInt16) {
144         return 7;
145       }
146 
147       // 2 op_sig.inputs (no bias) use case is supported starting from
148       // version 6.
149       if (op_sig.inputs.size() == 2) {
150         return 6;
151       }
152       auto fully_connected_params =
153           reinterpret_cast<TfLiteFullyConnectedParams*>(op_sig.builtin_data);
154       TFLITE_DCHECK(fully_connected_params != nullptr);
155       // `keep_num_dims` is supported at version 5.
156       if (fully_connected_params->keep_num_dims) {
157         return 5;
158       }
159       // Int8 fully fixed point kernel is at version 4.
160       if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
161           op_sig.inputs.at(1).type == kTfLiteInt8 &&
162           op_sig.outputs.at(0).type == kTfLiteInt8) {
163         return 4;
164       }
165       // If the op is a signed int8 hybrid operation, we need to return
166       // version 3.
167       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
168           op_sig.inputs.at(1).type == kTfLiteInt8 &&
169           op_sig.outputs.at(0).type == kTfLiteFloat32) {
170         if (fully_connected_params->asymmetric_quantize_inputs) {
171           // This is to use the updated quantization scheme.
172           return 9;
173         }
174         return 3;
175       }
176       // For float and uint8 fixed point kernels, if the weight is
177       // Shuffled4x16Int8, it is version 2.
178       if (fully_connected_params->weights_format ==
179           kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
180         return 2;
181       }
182       // Otherwise (weight is default), the version is 1.
183       return 1;
184     }
185 
186     case BuiltinOperator_GATHER: {
187       auto gather_params =
188           reinterpret_cast<TfLiteGatherParams*>(op_sig.builtin_data);
189       if (gather_params && gather_params->batch_dims != 0) {
190         return 5;
191       }
192 
193       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
194         return 4;
195       }
196       // If the op takes bool input, it is version 3.
197       if (op_sig.inputs.at(0).type == kTfLiteBool) {
198         return 3;
199       }
200       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
201         return 2;
202       }
203       return 1;
204     }
205 
206     case BuiltinOperator_SVDF: {
207       // Fully integer SVDF has int8 as input and is of version 3.
208       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
209         return 3;
210       }
211       // If the op is a signed int8 hybrid operation, we need to return
212       // version 2.
213       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
214           op_sig.inputs.at(1).type == kTfLiteInt8 &&
215           op_sig.outputs.at(0).type == kTfLiteFloat32) {
216         auto svdf_params =
217             reinterpret_cast<TfLiteSVDFParams*>(op_sig.builtin_data);
218         // This is to use the updated quantization scheme
219         if (svdf_params && svdf_params->asymmetric_quantize_inputs) {
220           return 4;
221         }
222         return 2;
223       }
224       return 1;
225     }
226 
227     case BuiltinOperator_MUL:
228       // Version 5 supports int64 inputs
229       if (op_sig.inputs.at(0).type == kTfLiteInt64) {
230         return 5;
231       }
232       // Version 4 supports int16 inputs
233       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
234         return 4;
235       }
236       // Version 3 supports have a rescale value greater than or equal to 1.
237       if (op_sig.ext_options.mul.input1_scale != 0 &&
238           op_sig.ext_options.mul.input2_scale != 0 &&
239           op_sig.ext_options.mul.output_scale != 0 &&
240           (op_sig.ext_options.mul.input1_scale *
241            op_sig.ext_options.mul.input2_scale /
242            op_sig.ext_options.mul.output_scale) >= 1.0) {
243         return 3;
244       }
245       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
246         return 2;
247       }
248       return 1;
249 
250     case BuiltinOperator_MAX_POOL_2D:
251     case BuiltinOperator_AVERAGE_POOL_2D:
252       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
253           op_sig.outputs.at(0).type == kTfLiteInt16) {
254         return 3;
255       }
256 
257       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
258         return 2;
259       }
260       return 1;
261 
262     case BuiltinOperator_TRANSPOSE:
263       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
264         return 5;
265       }
266       if (op_sig.inputs.at(0).dims.size() > 4) {
267         return 4;
268       }
269       // If the op takes bool input, it is version 3.
270       if (op_sig.inputs.at(0).type == kTfLiteBool) {
271         return 3;
272       }
273       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
274         return 2;
275       }
276       return 1;
277 
278     case BuiltinOperator_TRANSPOSE_CONV: {
279       if (op_sig.inputs.size() == 4 &&
280           op_sig.inputs.at(3).type != kTfLiteNoType) {
281         return 3;
282       }
283       // If the op takes int8 input, it is version 2.
284       if (op_sig.inputs.at(1).type == kTfLiteInt8) {
285         return 2;
286       }
287       return 1;
288     }
289 
290     case BuiltinOperator_LSTM: {
291       // If the input tensor is float and a weight is int8, this is a version
292       // 3 hybrid operation.
293       auto lstm_params =
294           reinterpret_cast<TfLiteLSTMParams*>(op_sig.builtin_data);
295       TFLITE_DCHECK(lstm_params != nullptr);
296       if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
297           op_sig.inputs.at(0).type == kTfLiteFloat32 &&
298           op_sig.inputs.at(2).type == kTfLiteInt8 &&
299           op_sig.outputs.at(0).type == kTfLiteFloat32) {
300         if (lstm_params->asymmetric_quantize_inputs) {
301           return 4;
302         }
303         return 3;
304       }
305       // KERNEL_BASIC was added in version 2.
306       if (lstm_params->kernel_type == kTfLiteLSTMBasicKernel) {
307         return 2;
308       }
309       return 1;
310     }
311 
312     case BuiltinOperator_SPLIT:
313       // If the op take in16 input, it is version 4.
314       if (op_sig.inputs.at(1).type == kTfLiteInt16) {
315         return 4;
316       }
317       // If the op take int8 input, it is version 2, for int32 it's version 3.
318       // The input tensor is at index 1 not 0, 0 is the axis.
319       if (op_sig.inputs.at(1).type == kTfLiteInt32) {
320         return 3;
321       }
322       if (op_sig.inputs.at(1).type == kTfLiteInt8) {
323         return 2;
324       }
325       return 1;
326 
327     case BuiltinOperator_SPARSE_TO_DENSE:
328       // Version 3 supports Int8 and Uint8 type.
329       if (op_sig.inputs.at(2).type == kTfLiteInt8 ||
330           op_sig.inputs.at(2).type == kTfLiteUInt8) {
331         return 3;
332       }
333       // Version 2 supports Int64 value type.
334       if (op_sig.inputs.at(2).type == kTfLiteInt64) {
335         return 2;
336       }
337       return 1;
338 
339     case BuiltinOperator_SLICE:
340       if (op_sig.inputs.at(0).dims.size() > 4) {
341         return 5;
342       }
343       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
344         return 4;
345       }
346       // Version 3 supports string input types.
347       if (op_sig.inputs.at(0).type == kTfLiteString) {
348         return 3;
349       }
350       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
351         return 2;
352       }
353       return 1;
354 
355     case BuiltinOperator_UNPACK:
356       // If the op take int8/uint8 input, it is version 2.
357       if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
358           op_sig.inputs.at(0).type == kTfLiteUInt8) {
359         return 2;
360       }
361       // If the op take bool input, it is version 3.
362       if (op_sig.inputs.at(0).type == kTfLiteBool) {
363         return 3;
364       }
365       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
366           op_sig.outputs.at(0).type == kTfLiteInt16) {
367         return 4;
368       }
369       return 1;
370 
371     case BuiltinOperator_DEQUANTIZE:
372       // Version 3 supports signed int16 input types.
373       if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
374           op_sig.inputs.at(0).type == kTfLiteFloat16) {
375         return 3;
376       }
377       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
378         if (op_sig.ext_options.dequantize.is_per_channel_quantized) {
379           return 5;
380         }
381         return 2;
382       }
383       return 1;
384 
385     case BuiltinOperator_FLOOR_DIV:
386       if (op_sig.inputs.at(0).type == kTfLiteFloat32) {
387         return 2;
388       }
389       return 1;
390 
391     case BuiltinOperator_L2_NORMALIZATION:
392       if (op_sig.outputs.at(0).type == kTfLiteInt8) {
393         return 2;
394       }
395       return 1;
396 
397     case BuiltinOperator_ABS:
398       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
399         return op_sig.ext_options.abs.input_quantized ? 3 : 4;
400       }
401       if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
402           op_sig.inputs.at(0).type == kTfLiteUInt8) {
403         return 2;
404       }
405       return 1;
406     case BuiltinOperator_RELU:
407       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
408         return 3;
409       }
410       if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
411           op_sig.inputs.at(0).type == kTfLiteUInt8) {
412         return 2;
413       }
414       return 1;
415 
416     case BuiltinOperator_STRIDED_SLICE: {
417       auto strided_slice_params =
418           reinterpret_cast<TfLiteStridedSliceParams*>(op_sig.builtin_data);
419       TFLITE_DCHECK(strided_slice_params != nullptr);
420       if (strided_slice_params->ellipsis_mask != 0 ||
421           strided_slice_params->new_axis_mask != 0) {
422         return 6;
423       }
424       if (op_sig.inputs.at(0).type == kTfLiteString) {
425         return 5;
426       }
427       if (op_sig.ext_options.strided_slice.num_dims > 4) {
428         return 4;
429       }
430       // If the op takes bool input, it is version 3.
431       if (op_sig.inputs.at(0).type == kTfLiteBool) {
432         return 3;
433       }
434       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
435         return 2;
436       }
437       return 1;
438     }
439     case BuiltinOperator_REVERSE_V2:
440       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
441         return 3;
442       }
443       if (op_sig.inputs.at(0).type == kTfLiteBool) {
444         return 2;
445       }
446       return 1;
447     case BuiltinOperator_RESIZE_BILINEAR: {
448       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
449         return 4;
450       }
451       auto resize_bilinear_params =
452           reinterpret_cast<TfLiteResizeBilinearParams*>(op_sig.builtin_data);
453       TFLITE_DCHECK(resize_bilinear_params != nullptr);
454       if (resize_bilinear_params->half_pixel_centers) {
455         return 3;
456       } else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
457         return 2;
458       }
459       return 1;
460     }
461     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
462       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
463         return 4;
464       }
465       auto resize_nearest_neighbor_params =
466           reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
467               op_sig.builtin_data);
468       TFLITE_DCHECK(resize_nearest_neighbor_params != nullptr);
469       if (resize_nearest_neighbor_params->half_pixel_centers ||
470           resize_nearest_neighbor_params->align_corners) {
471         return 3;
472       } else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
473         return 2;
474       }
475       return 1;
476     }
477 
478     case BuiltinOperator_MAXIMUM:
479     case BuiltinOperator_MINIMUM:
480       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
481           op_sig.outputs.at(0).type == kTfLiteInt16) {
482         return 4;
483       }
484       if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
485         return 3;
486       }
487       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
488         return 2;
489       }
490       return 1;
491 
492     case BuiltinOperator_PACK:
493       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
494         return 2;
495       }
496 
497       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
498           op_sig.outputs.at(0).type == kTfLiteInt16) {
499         return 3;
500       }
501       return 1;
502 
503     case BuiltinOperator_TILE:
504       if (op_sig.inputs.at(0).type == kTfLiteString) {
505         return 2;
506       }
507       return 1;
508 
509     case BuiltinOperator_SQUEEZE:
510       if (op_sig.inputs.at(0).type == kTfLiteString) {
511         return 2;
512       }
513       return 1;
514 
515     case BuiltinOperator_SPACE_TO_BATCH_ND:
516     case BuiltinOperator_BATCH_TO_SPACE_ND:
517       if (op_sig.inputs.at(0).dims.size() != 4) {
518         return 3;
519       }
520       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
521         return 2;
522       }
523       return 1;
524 
525     case BuiltinOperator_ADD: {
526       if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
527         return 4;
528       }
529       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
530           op_sig.outputs.at(0).type == kTfLiteInt16) {
531         auto add_params =
532             reinterpret_cast<TfLiteAddParams*>(op_sig.builtin_data);
533         if (add_params && !add_params->pot_scale_int16) {
534           return 3;
535         }
536       }
537       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
538         return 2;
539       }
540       return 1;
541     }
542 
543     case BuiltinOperator_SUB: {
544       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
545           op_sig.outputs.at(0).type == kTfLiteInt16) {
546         auto sub_params =
547             reinterpret_cast<TfLiteSubParams*>(op_sig.builtin_data);
548         if (sub_params && !sub_params->pot_scale_int16) {
549           return 5;
550         }
551       }
552       if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
553         return 4;
554       }
555       if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
556         return 3;
557       }
558       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
559         return 2;
560       }
561       return 1;
562     }
563 
564     case BuiltinOperator_GATHER_ND:
565       if (!op_sig.inputs.empty() &&
566           (op_sig.inputs.at(0).type == kTfLiteInt16)) {
567         return 3;
568       }
569       if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteString) {
570         return 2;
571       }
572       return 1;
573 
574     case BuiltinOperator_DIV:
575       if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
576         return 2;
577       }
578       return 1;
579     case BuiltinOperator_TANH:
580     case BuiltinOperator_LOGISTIC:
581       if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
582           op_sig.outputs.at(0).type == kTfLiteInt16) {
583         return 3;
584       }
585 
586       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
587         return 2;
588       }
589       return 1;
590 
591     case BuiltinOperator_FILL:
592       if (op_sig.inputs.size() >= 2) {
593         if (op_sig.inputs.at(1).type == kTfLiteInt8 ||
594             op_sig.inputs.at(1).type == kTfLiteInt16) {
595           return 3;
596         } else if ((op_sig.inputs.at(1).type == kTfLiteBool ||
597                     op_sig.inputs.at(1).type == kTfLiteString)) {
598           return 2;
599         }
600       }
601       return 1;
602 
603     case BuiltinOperator_EQUAL:
604     case BuiltinOperator_NOT_EQUAL:
605       if (!op_sig.inputs.empty()) {
606         if (op_sig.inputs.at(0).type == kTfLiteString) {
607           return 3;
608         }
609         if (op_sig.inputs.at(0).type == kTfLiteInt8) {
610           return 2;
611         }
612       }
613       return 1;
614 
615     case BuiltinOperator_LEAKY_RELU:
616       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
617         return 2;
618       }
619       return 1;
620 
621     case BuiltinOperator_BATCH_MATMUL: {
622       // In case of int16 inputs, the version is 3.
623       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
624         return 3;
625       }
626       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
627         return 2;
628       }
629       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
630           op_sig.inputs.at(1).type == kTfLiteInt8 &&
631           op_sig.outputs.at(0).type == kTfLiteFloat32) {
632         auto batch_mat_mul_params =
633             reinterpret_cast<TfLiteBatchMatMulParams*>(op_sig.builtin_data);
634         if (batch_mat_mul_params &&
635             batch_mat_mul_params->asymmetric_quantize_inputs) {
636           // This is to use the updated quantization scheme.
637           return 4;
638         }
639       }
640       return 1;
641     }
642 
643     case BuiltinOperator_PAD:
644     case BuiltinOperator_PADV2:
645       if (op_sig.inputs.at(0).dims.size() > 4) {
646         return 4;
647       }
648       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
649         return 3;
650       }
651       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
652         return 2;
653       }
654       return 1;
655 
656     case BuiltinOperator_CONCATENATION:
657     case BuiltinOperator_SOFTMAX:
658     case BuiltinOperator_MEAN:
659     case BuiltinOperator_REDUCE_MAX:
660     case BuiltinOperator_REDUCE_MIN:
661     case BuiltinOperator_RELU6:
662       // In case of int16 inputs, the version is 3.
663       if (op_sig.inputs.at(0).type == kTfLiteInt16) {
664         return 3;
665       }
666       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
667         return 2;
668       }
669       return 1;
670 
671     case BuiltinOperator_RNN: {
672       if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
673           op_sig.outputs.at(0).type == kTfLiteFloat32) {
674         auto rnn_params =
675             reinterpret_cast<TfLiteRNNParams*>(op_sig.builtin_data);
676         if (rnn_params && rnn_params->asymmetric_quantize_inputs) {
677           return 3;
678         } else {
679           return 2;
680         }
681       }
682       return 1;
683     }
684 
685     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
686       if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
687           op_sig.outputs.at(0).type == kTfLiteFloat32) {
688         auto sequence_rnn_params =
689             reinterpret_cast<TfLiteSequenceRNNParams*>(op_sig.builtin_data);
690         if (sequence_rnn_params &&
691             sequence_rnn_params->asymmetric_quantize_inputs) {
692           return 3;
693         } else {
694           return 2;
695         }
696       }
697       return 1;
698     }
699 
700     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
701       if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
702           op_sig.outputs.at(0).type == kTfLiteFloat32) {
703         auto bidirectional_sequence_rnn_params =
704             reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
705                 op_sig.builtin_data);
706         if (bidirectional_sequence_rnn_params &&
707             bidirectional_sequence_rnn_params->asymmetric_quantize_inputs) {
708           return 3;
709         } else {
710           return 2;
711         }
712       }
713       return 1;
714     }
715 
716     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
717       if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
718           op_sig.outputs.at(0).type == kTfLiteFloat32) {
719         auto bidirectional_sequence_lstm_params =
720             reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
721                 op_sig.builtin_data);
722         if (bidirectional_sequence_lstm_params &&
723             bidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
724           return 3;
725         } else {
726           return 2;
727         }
728       }
729       return 1;
730     }
731 
732     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
733       // If the input tensor is float and a weight is int8, this is a version
734       // 2 hybrid operation.
735       if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
736           op_sig.inputs.at(2).type == kTfLiteInt8 &&
737           op_sig.outputs.at(0).type == kTfLiteFloat32) {
738         auto unidirectional_sequence_lstm_params =
739             reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
740                 op_sig.builtin_data);
741         if (unidirectional_sequence_lstm_params &&
742             unidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
743           return 3;
744         }
745         return 2;
746       }
747       return 1;
748     }
749 
750     case BuiltinOperator_SPACE_TO_DEPTH:
751     case BuiltinOperator_SPLIT_V:
752     case BuiltinOperator_SUM:
753     case BuiltinOperator_LOG_SOFTMAX:
754     case BuiltinOperator_TOPK_V2:
755     case BuiltinOperator_ARG_MAX:
756     case BuiltinOperator_ARG_MIN:
757     case BuiltinOperator_GREATER:
758     case BuiltinOperator_GREATER_EQUAL:
759     case BuiltinOperator_LESS:
760     case BuiltinOperator_LESS_EQUAL:
761     case BuiltinOperator_SELECT:
762     case BuiltinOperator_RSQRT:
763     case BuiltinOperator_SQUARED_DIFFERENCE:
764     case BuiltinOperator_DEPTH_TO_SPACE:
765     case BuiltinOperator_MIRROR_PAD:
766       if (op_sig.inputs.at(0).type == kTfLiteInt8) {
767         return 2;
768       }
769       return 1;
770 
771     case BuiltinOperator_REDUCE_PROD:
772       if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
773           op_sig.inputs.at(0).type == kTfLiteInt16) {
774         return 2;
775       }
776       return 1;
777 
778     // The version one of broadcast to op won't be not supported since the
779     // version one was rollbacked and the builtin op code number has been
780     // changed because of builtin op code shortage problem.
781     // Quantized broadcast_to is version 3
782     case BuiltinOperator_BROADCAST_TO:
783       if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
784           op_sig.inputs.at(0).type == kTfLiteInt16) {
785         return 3;
786       }
787       return 2;
788     default:
789       return 1;
790   }
791   // Prevent lint error about this function being too long.
792   // NOLINTNEXTLINE
793 }
794 
UpdateOpVersion(uint8_t * model_buffer_pointer)795 void UpdateOpVersion(uint8_t* model_buffer_pointer) {
796   auto model = GetMutableModel(model_buffer_pointer);
797   auto subgraphs = model->subgraphs();
798 
799   for (int i = 0; i < subgraphs->Length(); ++i) {
800     const SubGraph* subgraph = subgraphs->Get(i);
801     for (int j = 0; j < subgraph->operators()->Length(); ++j) {
802       const Operator* op = subgraph->operators()->Get(j);
803       OperatorCode* op_code =
804           model->mutable_operator_codes()->GetMutableObject(op->opcode_index());
805 
806       auto builtin_code = GetBuiltinCode(op_code);
807       if (builtin_code != BuiltinOperator_CUSTOM) {
808         OpSignature op_sig = GetOpSignature(op_code, op, subgraph, model);
809         // Update builtin operator version.
810         int32_t op_ver = GetBuiltinOperatorVersion(op_sig);
811         if (op_sig.builtin_data) {
812           free(op_sig.builtin_data);
813         }
814         // Skip updating op version if the current node uses lower version.
815         // TODO(b/184366869): Populate multiple versions of operator once MLIR
816         // quantizer is ready.
817         if (op_ver <= op_code->version()) {
818           continue;
819         }
820         if (!op_code->mutate_version(op_ver)) {
821           LOG(ERROR) << "Can't set operator "
822                      << EnumNameBuiltinOperator(builtin_code) << " to version "
823                      << op_ver;
824         }
825       }
826     }
827   }
828 }
829 
830 }  // namespace tflite
831