• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for tflite_convert.py."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import os
22
23from absl.testing import parameterized
24import numpy as np
25from tensorflow import keras
26
27from tensorflow.core.framework import graph_pb2
28from tensorflow.lite.python import test_util as tflite_test_util
29from tensorflow.lite.python import tflite_convert
30from tensorflow.lite.python.convert import register_custom_opdefs
31from tensorflow.python import tf2
32from tensorflow.python.client import session
33from tensorflow.python.eager import def_function
34from tensorflow.python.framework import constant_op
35from tensorflow.python.framework import dtypes
36from tensorflow.python.framework import ops
37from tensorflow.python.framework import test_util
38from tensorflow.python.framework.importer import import_graph_def
39from tensorflow.python.ops import array_ops
40from tensorflow.python.ops import random_ops
41from tensorflow.python.platform import gfile
42from tensorflow.python.platform import resource_loader
43from tensorflow.python.platform import test
44from tensorflow.python.saved_model import saved_model
45from tensorflow.python.saved_model.save import save
46from tensorflow.python.training.tracking import tracking
47from tensorflow.python.training.training_util import write_graph
48
49
50class TestModels(test_util.TensorFlowTestCase):
51
52  def _getFilepath(self, filename):
53    return os.path.join(self.get_temp_dir(), filename)
54
55  def _run(self,
56           flags_str,
57           should_succeed,
58           expected_ops_in_converted_model=None,
59           expected_output_shapes=None):
60    output_file = os.path.join(self.get_temp_dir(), 'model.tflite')
61    tflite_bin = resource_loader.get_path_to_datafile('tflite_convert')
62    cmdline = '{0} --output_file={1} {2}'.format(tflite_bin, output_file,
63                                                 flags_str)
64
65    exitcode = os.system(cmdline)
66    if exitcode == 0:
67      with gfile.Open(output_file, 'rb') as model_file:
68        content = model_file.read()
69      self.assertEqual(content is not None, should_succeed)
70      if expected_ops_in_converted_model:
71        op_set = tflite_test_util.get_ops_list(content)
72        for opname in expected_ops_in_converted_model:
73          self.assertIn(opname, op_set)
74      if expected_output_shapes:
75        output_shapes = tflite_test_util.get_output_shapes(content)
76        self.assertEqual(output_shapes, expected_output_shapes)
77      os.remove(output_file)
78    else:
79      self.assertFalse(should_succeed)
80
81  def _getKerasModelFile(self):
82    x = np.array([[1.], [2.]])
83    y = np.array([[2.], [4.]])
84
85    model = keras.models.Sequential([
86        keras.layers.Dropout(0.2, input_shape=(1,)),
87        keras.layers.Dense(1),
88    ])
89    model.compile(optimizer='sgd', loss='mean_squared_error')
90    model.fit(x, y, epochs=1)
91
92    keras_file = self._getFilepath('model.h5')
93    keras.models.save_model(model, keras_file)
94    return keras_file
95
96  def _getKerasFunctionalModelFile(self):
97    """Returns a functional Keras model with output shapes [[1, 1], [1, 2]]."""
98    input_tensor = keras.layers.Input(shape=(1,))
99    output1 = keras.layers.Dense(1, name='b')(input_tensor)
100    output2 = keras.layers.Dense(2, name='a')(input_tensor)
101    model = keras.models.Model(inputs=input_tensor, outputs=[output1, output2])
102
103    keras_file = self._getFilepath('functional_model.h5')
104    keras.models.save_model(model, keras_file)
105    return keras_file
106
107
108class TfLiteConvertV1Test(TestModels):
109
110  def _run(self,
111           flags_str,
112           should_succeed,
113           expected_ops_in_converted_model=None):
114    if tf2.enabled():
115      flags_str += ' --enable_v1_converter'
116    super(TfLiteConvertV1Test, self)._run(flags_str, should_succeed,
117                                          expected_ops_in_converted_model)
118
119  def testFrozenGraphDef(self):
120    with ops.Graph().as_default():
121      in_tensor = array_ops.placeholder(
122          shape=[1, 16, 16, 3], dtype=dtypes.float32)
123      _ = in_tensor + in_tensor
124      sess = session.Session()
125
126    # Write graph to file.
127    graph_def_file = self._getFilepath('model.pb')
128    write_graph(sess.graph_def, '', graph_def_file, False)
129    sess.close()
130
131    flags_str = ('--graph_def_file={0} --input_arrays={1} '
132                 '--output_arrays={2}'.format(graph_def_file, 'Placeholder',
133                                              'add'))
134    self._run(flags_str, should_succeed=True)
135    os.remove(graph_def_file)
136
137  # Run `tflite_convert` explicitly with the legacy converter.
138  # Before the new converter is enabled by default, this flag has no real
139  # effects.
140  def testFrozenGraphDefWithLegacyConverter(self):
141    with ops.Graph().as_default():
142      in_tensor = array_ops.placeholder(
143          shape=[1, 16, 16, 3], dtype=dtypes.float32)
144      _ = in_tensor + in_tensor
145      sess = session.Session()
146
147    # Write graph to file.
148    graph_def_file = self._getFilepath('model.pb')
149    write_graph(sess.graph_def, '', graph_def_file, False)
150    sess.close()
151
152    flags_str = (
153        '--graph_def_file={0} --input_arrays={1} '
154        '--output_arrays={2} --experimental_new_converter=false'.format(
155            graph_def_file, 'Placeholder', 'add'))
156    self._run(flags_str, should_succeed=True)
157    os.remove(graph_def_file)
158
159  def testFrozenGraphDefNonPlaceholder(self):
160    with ops.Graph().as_default():
161      in_tensor = random_ops.random_normal(shape=[1, 16, 16, 3], name='random')
162      _ = in_tensor + in_tensor
163      sess = session.Session()
164
165    # Write graph to file.
166    graph_def_file = self._getFilepath('model.pb')
167    write_graph(sess.graph_def, '', graph_def_file, False)
168    sess.close()
169
170    flags_str = ('--graph_def_file={0} --input_arrays={1} '
171                 '--output_arrays={2}'.format(graph_def_file, 'random', 'add'))
172    self._run(flags_str, should_succeed=True)
173    os.remove(graph_def_file)
174
175  def testQATFrozenGraphDefInt8(self):
176    with ops.Graph().as_default():
177      in_tensor_1 = array_ops.placeholder(
178          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
179      in_tensor_2 = array_ops.placeholder(
180          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
181      _ = array_ops.fake_quant_with_min_max_args(
182          in_tensor_1 + in_tensor_2, min=0., max=1., name='output',
183          num_bits=16)  # INT8 inference type works for 16 bits fake quant.
184      sess = session.Session()
185
186    # Write graph to file.
187    graph_def_file = self._getFilepath('model.pb')
188    write_graph(sess.graph_def, '', graph_def_file, False)
189    sess.close()
190
191    flags_str = ('--inference_type=INT8 --std_dev_values=128,128 '
192                 '--mean_values=128,128 '
193                 '--graph_def_file={0} --input_arrays={1},{2} '
194                 '--output_arrays={3}'.format(graph_def_file, 'inputA',
195                                              'inputB', 'output'))
196    self._run(flags_str, should_succeed=True)
197    os.remove(graph_def_file)
198
199  def testQATFrozenGraphDefUInt8(self):
200    with ops.Graph().as_default():
201      in_tensor_1 = array_ops.placeholder(
202          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
203      in_tensor_2 = array_ops.placeholder(
204          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
205      _ = array_ops.fake_quant_with_min_max_args(
206          in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
207      sess = session.Session()
208
209    # Write graph to file.
210    graph_def_file = self._getFilepath('model.pb')
211    write_graph(sess.graph_def, '', graph_def_file, False)
212    sess.close()
213
214    # Define converter flags
215    flags_str = ('--std_dev_values=128,128 --mean_values=128,128 '
216                 '--graph_def_file={0} --input_arrays={1} '
217                 '--output_arrays={2}'.format(graph_def_file, 'inputA,inputB',
218                                              'output'))
219
220    # Set inference_type UINT8 and (default) inference_input_type UINT8
221    flags_str_1 = flags_str + ' --inference_type=UINT8'
222    self._run(flags_str_1, should_succeed=True)
223
224    # Set inference_type UINT8 and inference_input_type FLOAT
225    flags_str_2 = flags_str_1 + ' --inference_input_type=FLOAT'
226    self._run(flags_str_2, should_succeed=True)
227
228    os.remove(graph_def_file)
229
230  def testSavedModel(self):
231    saved_model_dir = self._getFilepath('model')
232    with ops.Graph().as_default():
233      with session.Session() as sess:
234        in_tensor = array_ops.placeholder(
235            shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
236        out_tensor = in_tensor + in_tensor
237        inputs = {'x': in_tensor}
238        outputs = {'z': out_tensor}
239        saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
240
241    flags_str = '--saved_model_dir={}'.format(saved_model_dir)
242    self._run(flags_str, should_succeed=True)
243
244  def _createSavedModelWithCustomOp(self, opname='CustomAdd'):
245    custom_opdefs_str = (
246        'name: \'' + opname + '\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
247        'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
248        '\'Output\' type: DT_FLOAT}')
249
250    # Create a graph that has one add op.
251    new_graph = graph_pb2.GraphDef()
252    with ops.Graph().as_default():
253      with session.Session() as sess:
254        in_tensor = array_ops.placeholder(
255            shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
256        out_tensor = in_tensor + in_tensor
257        inputs = {'x': in_tensor}
258        outputs = {'z': out_tensor}
259
260        new_graph.CopyFrom(sess.graph_def)
261
262    # Rename Add op name to opname.
263    for node in new_graph.node:
264      if node.op.startswith('Add'):
265        node.op = opname
266        del node.attr['T']
267
268    # Register custom op defs to import modified graph def.
269    register_custom_opdefs([custom_opdefs_str])
270
271    # Store saved model.
272    saved_model_dir = self._getFilepath('model')
273    with ops.Graph().as_default():
274      with session.Session() as sess:
275        import_graph_def(new_graph, name='')
276        saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
277    return (saved_model_dir, custom_opdefs_str)
278
279  def testEnsureCustomOpdefsFlag(self):
280    saved_model_dir, _ = self._createSavedModelWithCustomOp()
281
282    # Ensure --custom_opdefs.
283    flags_str = ('--saved_model_dir={0} --allow_custom_ops '
284                 '--experimental_new_converter'.format(saved_model_dir))
285    self._run(flags_str, should_succeed=False)
286
287  def testSavedModelWithCustomOpdefsFlag(self):
288    saved_model_dir, custom_opdefs_str = self._createSavedModelWithCustomOp()
289
290    # Valid conversion.
291    flags_str = (
292        '--saved_model_dir={0} --custom_opdefs="{1}" --allow_custom_ops '
293        '--experimental_new_converter'.format(saved_model_dir,
294                                              custom_opdefs_str))
295    self._run(
296        flags_str,
297        should_succeed=True,
298        expected_ops_in_converted_model=['CustomAdd'])
299
300  def testSavedModelWithFlex(self):
301    saved_model_dir, custom_opdefs_str = self._createSavedModelWithCustomOp(
302        opname='CustomAdd2')
303
304    # Valid conversion. OpDef already registered.
305    flags_str = ('--saved_model_dir={0} --allow_custom_ops '
306                 '--custom_opdefs="{1}" '
307                 '--experimental_new_converter '
308                 '--experimental_select_user_tf_ops=CustomAdd2 '
309                 '--target_ops=TFLITE_BUILTINS,SELECT_TF_OPS'.format(
310                     saved_model_dir, custom_opdefs_str))
311    self._run(
312        flags_str,
313        should_succeed=True,
314        expected_ops_in_converted_model=['FlexCustomAdd2'])
315
316  def testSavedModelWithInvalidCustomOpdefsFlag(self):
317    saved_model_dir, _ = self._createSavedModelWithCustomOp()
318
319    invalid_custom_opdefs_str = (
320        'name: \'CustomAdd\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
321        'output_arg: {name: \'Output\' type: DT_FLOAT}')
322
323    # Valid conversion.
324    flags_str = (
325        '--saved_model_dir={0} --custom_opdefs="{1}" --allow_custom_ops '
326        '--experimental_new_converter'.format(saved_model_dir,
327                                              invalid_custom_opdefs_str))
328    self._run(flags_str, should_succeed=False)
329
330  def testKerasFile(self):
331    keras_file = self._getKerasModelFile()
332
333    flags_str = '--keras_model_file={}'.format(keras_file)
334    self._run(flags_str, should_succeed=True)
335    os.remove(keras_file)
336
337  def testKerasFileMLIR(self):
338    keras_file = self._getKerasModelFile()
339
340    flags_str = (
341        '--keras_model_file={} --experimental_new_converter'.format(keras_file))
342    self._run(flags_str, should_succeed=True)
343    os.remove(keras_file)
344
345  def testConversionSummary(self):
346    keras_file = self._getKerasModelFile()
347    log_dir = self.get_temp_dir()
348
349    flags_str = ('--keras_model_file={} --experimental_new_converter  '
350                 '--conversion_summary_dir={}'.format(keras_file, log_dir))
351    self._run(flags_str, should_succeed=True)
352    os.remove(keras_file)
353
354    num_items_conversion_summary = len(os.listdir(log_dir))
355    self.assertTrue(num_items_conversion_summary)
356
357  def testConversionSummaryWithOldConverter(self):
358    keras_file = self._getKerasModelFile()
359    log_dir = self.get_temp_dir()
360
361    flags_str = ('--keras_model_file={} --experimental_new_converter=false '
362                 '--conversion_summary_dir={}'.format(keras_file, log_dir))
363    self._run(flags_str, should_succeed=True)
364    os.remove(keras_file)
365
366    num_items_conversion_summary = len(os.listdir(log_dir))
367    self.assertEqual(num_items_conversion_summary, 0)
368
369  def _initObjectDetectionArgs(self):
370    # Initializes the arguments required for the object detection model.
371    # Looks for the model file which is saved in a different location internally
372    # and externally.
373    filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
374    if not os.path.exists(filename):
375      filename = os.path.join(
376          resource_loader.get_root_dir_with_all_resources(),
377          '../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
378      if not os.path.exists(filename):
379        raise IOError("File '{0}' does not exist.".format(filename))
380
381    self._graph_def_file = filename
382    self._input_arrays = 'normalized_input_image_tensor'
383    self._output_arrays = (
384        'TFLite_Detection_PostProcess,TFLite_Detection_PostProcess:1,'
385        'TFLite_Detection_PostProcess:2,TFLite_Detection_PostProcess:3')
386    self._input_shapes = '1,300,300,3'
387
388  def testObjectDetection(self):
389    """Tests object detection model through TOCO."""
390    self._initObjectDetectionArgs()
391    flags_str = ('--graph_def_file={0} --input_arrays={1} '
392                 '--output_arrays={2} --input_shapes={3} '
393                 '--allow_custom_ops'.format(self._graph_def_file,
394                                             self._input_arrays,
395                                             self._output_arrays,
396                                             self._input_shapes))
397    self._run(flags_str, should_succeed=True)
398
399  def testObjectDetectionMLIR(self):
400    """Tests object detection model through MLIR converter."""
401    self._initObjectDetectionArgs()
402    custom_opdefs_str = (
403        'name: \'TFLite_Detection_PostProcess\' '
404        'input_arg: { name: \'raw_outputs/box_encodings\' type: DT_FLOAT } '
405        'input_arg: { name: \'raw_outputs/class_predictions\' type: DT_FLOAT } '
406        'input_arg: { name: \'anchors\' type: DT_FLOAT } '
407        'output_arg: { name: \'TFLite_Detection_PostProcess\' type: DT_FLOAT } '
408        'output_arg: { name: \'TFLite_Detection_PostProcess:1\' '
409        'type: DT_FLOAT } '
410        'output_arg: { name: \'TFLite_Detection_PostProcess:2\' '
411        'type: DT_FLOAT } '
412        'output_arg: { name: \'TFLite_Detection_PostProcess:3\' '
413        'type: DT_FLOAT } '
414        'attr : { name: \'h_scale\' type: \'float\'} '
415        'attr : { name: \'max_classes_per_detection\' type: \'int\'} '
416        'attr : { name: \'max_detections\' type: \'int\'} '
417        'attr : { name: \'nms_iou_threshold\' type: \'float\'} '
418        'attr : { name: \'nms_score_threshold\' type: \'float\'} '
419        'attr : { name: \'num_classes\' type: \'int\'} '
420        'attr : { name: \'w_scale\' type: \'float\'} '
421        'attr : { name: \'x_scale\' type: \'float\'} '
422        'attr : { name: \'y_scale\' type: \'float\'}')
423
424    flags_str = ('--graph_def_file={0} --input_arrays={1} '
425                 '--output_arrays={2} --input_shapes={3} '
426                 '--custom_opdefs="{4}"'.format(self._graph_def_file,
427                                                self._input_arrays,
428                                                self._output_arrays,
429                                                self._input_shapes,
430                                                custom_opdefs_str))
431
432    # Ensure --allow_custom_ops.
433    flags_str_final = ('{} --allow_custom_ops').format(flags_str)
434
435    self._run(
436        flags_str_final,
437        should_succeed=True,
438        expected_ops_in_converted_model=['TFLite_Detection_PostProcess'])
439
440  def testObjectDetectionMLIRWithFlex(self):
441    """Tests object detection model through MLIR converter."""
442    self._initObjectDetectionArgs()
443
444    flags_str = ('--graph_def_file={0} --input_arrays={1} '
445                 '--output_arrays={2} --input_shapes={3}'.format(
446                     self._graph_def_file, self._input_arrays,
447                     self._output_arrays, self._input_shapes))
448
449    # Valid conversion.
450    flags_str_final = (
451        '{} --allow_custom_ops '
452        '--experimental_new_converter '
453        '--experimental_select_user_tf_ops=TFLite_Detection_PostProcess '
454        '--target_ops=TFLITE_BUILTINS,SELECT_TF_OPS').format(flags_str)
455    self._run(
456        flags_str_final,
457        should_succeed=True,
458        expected_ops_in_converted_model=['FlexTFLite_Detection_PostProcess'])
459
460
461class TfLiteConvertV2Test(TestModels):
462
463  @test_util.run_v2_only
464  def testSavedModel(self):
465    input_data = constant_op.constant(1., shape=[1])
466    root = tracking.AutoTrackable()
467    root.f = def_function.function(lambda x: 2. * x)
468    to_save = root.f.get_concrete_function(input_data)
469
470    saved_model_dir = self._getFilepath('model')
471    save(root, saved_model_dir, to_save)
472
473    flags_str = '--saved_model_dir={}'.format(saved_model_dir)
474    self._run(flags_str, should_succeed=True)
475
476  @test_util.run_v2_only
477  def testKerasFile(self):
478    keras_file = self._getKerasModelFile()
479
480    flags_str = '--keras_model_file={}'.format(keras_file)
481    self._run(flags_str, should_succeed=True)
482    os.remove(keras_file)
483
484  @test_util.run_v2_only
485  def testKerasFileMLIR(self):
486    keras_file = self._getKerasModelFile()
487
488    flags_str = (
489        '--keras_model_file={} --experimental_new_converter'.format(keras_file))
490    self._run(flags_str, should_succeed=True)
491    os.remove(keras_file)
492
493  @test_util.run_v2_only
494  def testFunctionalKerasModel(self):
495    keras_file = self._getKerasFunctionalModelFile()
496
497    flags_str = '--keras_model_file={}'.format(keras_file)
498    self._run(flags_str, should_succeed=True,
499              expected_output_shapes=[[1, 1], [1, 2]])
500    os.remove(keras_file)
501
502  @test_util.run_v2_only
503  def testFunctionalKerasModelMLIR(self):
504    keras_file = self._getKerasFunctionalModelFile()
505
506    flags_str = (
507        '--keras_model_file={} --experimental_new_converter'.format(keras_file))
508    self._run(flags_str, should_succeed=True,
509              expected_output_shapes=[[1, 1], [1, 2]])
510    os.remove(keras_file)
511
512  def testMissingRequired(self):
513    self._run('--invalid_args', should_succeed=False)
514
515  def testMutuallyExclusive(self):
516    self._run(
517        '--keras_model_file=model.h5 --saved_model_dir=/tmp/',
518        should_succeed=False)
519
520
521class ArgParserTest(test_util.TensorFlowTestCase, parameterized.TestCase):
522
523  @parameterized.named_parameters(('v1', False), ('v2', True))
524  def test_without_experimental_new_converter(self, use_v2_converter):
525    args = [
526        '--saved_model_dir=/tmp/saved_model/',
527        '--output_file=/tmp/output.tflite',
528    ]
529
530    # Note that when the flag parses to None, the converter uses the default
531    # value, which is True.
532
533    parser = tflite_convert._get_parser(use_v2_converter=use_v2_converter)
534    parsed_args = parser.parse_args(args)
535    self.assertTrue(parsed_args.experimental_new_converter)
536    self.assertIsNone(parsed_args.experimental_new_quantizer)
537
538  @parameterized.named_parameters(('v1', False), ('v2', True))
539  def test_experimental_new_converter_none(self, use_v2_converter):
540    args = [
541        '--saved_model_dir=/tmp/saved_model/',
542        '--output_file=/tmp/output.tflite',
543        '--experimental_new_converter',
544    ]
545
546    parser = tflite_convert._get_parser(use_v2_converter=use_v2_converter)
547    parsed_args = parser.parse_args(args)
548    self.assertTrue(parsed_args.experimental_new_converter)
549
550  @parameterized.named_parameters(
551      ('v1_true', False, True),
552      ('v1_false', False, False),
553      ('v2_true', True, True),
554      ('v2_false', True, False),
555  )
556  def test_experimental_new_converter(self, use_v2_converter, new_converter):
557    args = [
558        '--saved_model_dir=/tmp/saved_model/',
559        '--output_file=/tmp/output.tflite',
560        '--experimental_new_converter={}'.format(new_converter),
561    ]
562
563    parser = tflite_convert._get_parser(use_v2_converter=use_v2_converter)
564    parsed_args = parser.parse_args(args)
565    self.assertEqual(parsed_args.experimental_new_converter, new_converter)
566
567  @parameterized.named_parameters(('v1', False), ('v2', True))
568  def test_experimental_new_quantizer_none(self, use_v2_converter):
569    args = [
570        '--saved_model_dir=/tmp/saved_model/',
571        '--output_file=/tmp/output.tflite',
572        '--experimental_new_quantizer',
573    ]
574
575    parser = tflite_convert._get_parser(use_v2_converter=use_v2_converter)
576    parsed_args = parser.parse_args(args)
577    self.assertTrue(parsed_args.experimental_new_quantizer)
578
579  @parameterized.named_parameters(
580      ('v1_true', False, True),
581      ('v1_false', False, False),
582      ('v2_true', True, True),
583      ('v2_false', True, False),
584  )
585  def test_experimental_new_quantizer(self, use_v2_converter, new_quantizer):
586    args = [
587        '--saved_model_dir=/tmp/saved_model/',
588        '--output_file=/tmp/output.tflite',
589        '--experimental_new_quantizer={}'.format(new_quantizer),
590    ]
591
592    parser = tflite_convert._get_parser(use_v2_converter=use_v2_converter)
593    parsed_args = parser.parse_args(args)
594    self.assertEqual(parsed_args.experimental_new_quantizer, new_quantizer)
595
596
597if __name__ == '__main__':
598  test.main()
599