• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""TensorFlow Lite Python Interface: Sanity check."""
16import ctypes
17import io
18import sys
19from unittest import mock
20
21from unittest import mock
22
23import numpy as np
24import tensorflow as tf
25
26# Force loaded shared object symbols to be globally visible. This is needed so
27# that the interpreter_wrapper, in one .so file, can see the test_registerer,
28# in a different .so file. Note that this may already be set by default.
29# pylint: disable=g-import-not-at-top
30if hasattr(sys, 'setdlopenflags') and hasattr(sys, 'getdlopenflags'):
31  sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
32
33from tensorflow.lite.python import interpreter as interpreter_wrapper
34from tensorflow.lite.python.metrics import metrics
35from tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer
36from tensorflow.python.framework import test_util
37from tensorflow.python.platform import resource_loader
38from tensorflow.python.platform import test
39try:
40  from tensorflow.lite.python import metrics_portable
41  metrics = metrics_portable
42except ImportError:
43  from tensorflow.lite.python import metrics_nonportable
44  metrics = metrics_nonportable
45# pylint: enable=g-import-not-at-top
46
47
48class InterpreterCustomOpsTest(test_util.TensorFlowTestCase):
49
50  def testRegistererByName(self):
51    interpreter = interpreter_wrapper.InterpreterWithCustomOps(
52        model_path=resource_loader.get_path_to_datafile(
53            'testdata/permute_float.tflite'),
54        custom_op_registerers=['TF_TestRegisterer'])
55    self.assertTrue(interpreter._safe_to_run())
56    self.assertEqual(test_registerer.get_num_test_registerer_calls(), 1)
57
58  def testRegistererByFunc(self):
59    interpreter = interpreter_wrapper.InterpreterWithCustomOps(
60        model_path=resource_loader.get_path_to_datafile(
61            'testdata/permute_float.tflite'),
62        custom_op_registerers=[test_registerer.TF_TestRegisterer])
63    self.assertTrue(interpreter._safe_to_run())
64    self.assertEqual(test_registerer.get_num_test_registerer_calls(), 1)
65
66  def testRegistererFailure(self):
67    bogus_name = 'CompletelyBogusRegistererName'
68    with self.assertRaisesRegex(
69        ValueError, 'Looking up symbol \'' + bogus_name + '\' failed'):
70      interpreter_wrapper.InterpreterWithCustomOps(
71          model_path=resource_loader.get_path_to_datafile(
72              'testdata/permute_float.tflite'),
73          custom_op_registerers=[bogus_name])
74
75  def testNoCustomOps(self):
76    interpreter = interpreter_wrapper.InterpreterWithCustomOps(
77        model_path=resource_loader.get_path_to_datafile(
78            'testdata/permute_float.tflite'))
79    self.assertTrue(interpreter._safe_to_run())
80
81
82class InterpreterTest(test_util.TensorFlowTestCase):
83
84  def assertQuantizationParamsEqual(self, scales, zero_points,
85                                    quantized_dimension, params):
86    self.assertAllEqual(scales, params['scales'])
87    self.assertAllEqual(zero_points, params['zero_points'])
88    self.assertEqual(quantized_dimension, params['quantized_dimension'])
89
90  def testThreads_NegativeValue(self):
91    with self.assertRaisesRegex(ValueError, 'num_threads should >= 1'):
92      interpreter_wrapper.Interpreter(
93          model_path=resource_loader.get_path_to_datafile(
94              'testdata/permute_float.tflite'),
95          num_threads=-1)
96
97  def testThreads_WrongType(self):
98    with self.assertRaisesRegex(ValueError,
99                                'type of num_threads should be int'):
100      interpreter_wrapper.Interpreter(
101          model_path=resource_loader.get_path_to_datafile(
102              'testdata/permute_float.tflite'),
103          num_threads=4.2)
104
105  def testNotSupportedOpResolverTypes(self):
106    with self.assertRaisesRegex(
107        ValueError, 'Unrecognized passed in op resolver type: test'):
108      interpreter_wrapper.Interpreter(
109          model_path=resource_loader.get_path_to_datafile(
110              'testdata/permute_float.tflite'),
111          experimental_op_resolver_type='test')
112
113  def testFloatWithDifferentOpResolverTypes(self):
114    op_resolver_types = [
115        interpreter_wrapper.OpResolverType.BUILTIN,
116        interpreter_wrapper.OpResolverType.BUILTIN_REF,
117        interpreter_wrapper.OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES
118    ]
119
120    for op_resolver_type in op_resolver_types:
121      interpreter = interpreter_wrapper.Interpreter(
122          model_path=resource_loader.get_path_to_datafile(
123              'testdata/permute_float.tflite'),
124          experimental_op_resolver_type=op_resolver_type)
125      interpreter.allocate_tensors()
126
127      input_details = interpreter.get_input_details()
128      self.assertEqual(1, len(input_details))
129      self.assertEqual('input', input_details[0]['name'])
130      self.assertEqual(np.float32, input_details[0]['dtype'])
131      self.assertTrue(([1, 4] == input_details[0]['shape']).all())
132      self.assertEqual((0.0, 0), input_details[0]['quantization'])
133      self.assertQuantizationParamsEqual(
134          [], [], 0, input_details[0]['quantization_parameters'])
135
136      output_details = interpreter.get_output_details()
137      self.assertEqual(1, len(output_details))
138      self.assertEqual('output', output_details[0]['name'])
139      self.assertEqual(np.float32, output_details[0]['dtype'])
140      self.assertTrue(([1, 4] == output_details[0]['shape']).all())
141      self.assertEqual((0.0, 0), output_details[0]['quantization'])
142      self.assertQuantizationParamsEqual(
143          [], [], 0, output_details[0]['quantization_parameters'])
144
145      test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
146      expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
147      interpreter.set_tensor(input_details[0]['index'], test_input)
148      interpreter.invoke()
149
150      output_data = interpreter.get_tensor(output_details[0]['index'])
151      self.assertTrue((expected_output == output_data).all())
152
153  def testFloatWithTwoThreads(self):
154    interpreter = interpreter_wrapper.Interpreter(
155        model_path=resource_loader.get_path_to_datafile(
156            'testdata/permute_float.tflite'),
157        num_threads=2)
158    interpreter.allocate_tensors()
159
160    input_details = interpreter.get_input_details()
161    test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
162    expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
163    interpreter.set_tensor(input_details[0]['index'], test_input)
164    interpreter.invoke()
165
166    output_details = interpreter.get_output_details()
167    output_data = interpreter.get_tensor(output_details[0]['index'])
168    self.assertTrue((expected_output == output_data).all())
169
170  def testUint8(self):
171    model_path = resource_loader.get_path_to_datafile(
172        'testdata/permute_uint8.tflite')
173    with io.open(model_path, 'rb') as model_file:
174      data = model_file.read()
175
176    interpreter = interpreter_wrapper.Interpreter(model_content=data)
177    interpreter.allocate_tensors()
178
179    input_details = interpreter.get_input_details()
180    self.assertEqual(1, len(input_details))
181    self.assertEqual('input', input_details[0]['name'])
182    self.assertEqual(np.uint8, input_details[0]['dtype'])
183    self.assertTrue(([1, 4] == input_details[0]['shape']).all())
184    self.assertEqual((1.0, 0), input_details[0]['quantization'])
185    self.assertQuantizationParamsEqual(
186        [1.0], [0], 0, input_details[0]['quantization_parameters'])
187
188    output_details = interpreter.get_output_details()
189    self.assertEqual(1, len(output_details))
190    self.assertEqual('output', output_details[0]['name'])
191    self.assertEqual(np.uint8, output_details[0]['dtype'])
192    self.assertTrue(([1, 4] == output_details[0]['shape']).all())
193    self.assertEqual((1.0, 0), output_details[0]['quantization'])
194    self.assertQuantizationParamsEqual(
195        [1.0], [0], 0, output_details[0]['quantization_parameters'])
196
197    test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
198    expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
199    interpreter.resize_tensor_input(input_details[0]['index'], test_input.shape)
200    interpreter.allocate_tensors()
201    interpreter.set_tensor(input_details[0]['index'], test_input)
202    interpreter.invoke()
203
204    output_data = interpreter.get_tensor(output_details[0]['index'])
205    self.assertTrue((expected_output == output_data).all())
206
207  def testString(self):
208    interpreter = interpreter_wrapper.Interpreter(
209        model_path=resource_loader.get_path_to_datafile(
210            'testdata/gather_string.tflite'))
211    interpreter.allocate_tensors()
212
213    input_details = interpreter.get_input_details()
214    self.assertEqual(2, len(input_details))
215    self.assertEqual('input', input_details[0]['name'])
216    self.assertEqual(np.string_, input_details[0]['dtype'])
217    self.assertTrue(([10] == input_details[0]['shape']).all())
218    self.assertEqual((0.0, 0), input_details[0]['quantization'])
219    self.assertQuantizationParamsEqual(
220        [], [], 0, input_details[0]['quantization_parameters'])
221    self.assertEqual('indices', input_details[1]['name'])
222    self.assertEqual(np.int64, input_details[1]['dtype'])
223    self.assertTrue(([3] == input_details[1]['shape']).all())
224    self.assertEqual((0.0, 0), input_details[1]['quantization'])
225    self.assertQuantizationParamsEqual(
226        [], [], 0, input_details[1]['quantization_parameters'])
227
228    output_details = interpreter.get_output_details()
229    self.assertEqual(1, len(output_details))
230    self.assertEqual('output', output_details[0]['name'])
231    self.assertEqual(np.string_, output_details[0]['dtype'])
232    self.assertTrue(([3] == output_details[0]['shape']).all())
233    self.assertEqual((0.0, 0), output_details[0]['quantization'])
234    self.assertQuantizationParamsEqual(
235        [], [], 0, output_details[0]['quantization_parameters'])
236
237    test_input = np.array([1, 2, 3], dtype=np.int64)
238    interpreter.set_tensor(input_details[1]['index'], test_input)
239
240    test_input = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
241    expected_output = np.array([b'b', b'c', b'd'])
242    interpreter.set_tensor(input_details[0]['index'], test_input)
243    interpreter.invoke()
244
245    output_data = interpreter.get_tensor(output_details[0]['index'])
246    self.assertTrue((expected_output == output_data).all())
247
248  def testStringZeroDim(self):
249    data = b'abcd' + bytes(16)
250    interpreter = interpreter_wrapper.Interpreter(
251        model_path=resource_loader.get_path_to_datafile(
252            'testdata/gather_string_0d.tflite'))
253    interpreter.allocate_tensors()
254
255    input_details = interpreter.get_input_details()
256    interpreter.set_tensor(input_details[0]['index'], np.array(data))
257    test_input_tensor = interpreter.get_tensor(input_details[0]['index'])
258    self.assertEqual(len(data), len(test_input_tensor.item(0)))
259
260  def testPerChannelParams(self):
261    interpreter = interpreter_wrapper.Interpreter(
262        model_path=resource_loader.get_path_to_datafile('testdata/pc_conv.bin'))
263    interpreter.allocate_tensors()
264
265    # Tensor index 1 is the weight.
266    weight_details = interpreter.get_tensor_details()[1]
267    qparams = weight_details['quantization_parameters']
268    # Ensure that we retrieve per channel quantization params correctly.
269    self.assertEqual(len(qparams['scales']), 128)
270
271  def testDenseTensorAccess(self):
272    interpreter = interpreter_wrapper.Interpreter(
273        model_path=resource_loader.get_path_to_datafile('testdata/pc_conv.bin'))
274    interpreter.allocate_tensors()
275    weight_details = interpreter.get_tensor_details()[1]
276    s_params = weight_details['sparsity_parameters']
277    self.assertEqual(s_params, {})
278
279  def testSparseTensorAccess(self):
280    interpreter = interpreter_wrapper.InterpreterWithCustomOps(
281        model_path=resource_loader.get_path_to_datafile(
282            '../testdata/sparse_tensor.bin'),
283        custom_op_registerers=['TF_TestRegisterer'])
284    interpreter.allocate_tensors()
285
286    # Tensor at index 0 is sparse.
287    compressed_buffer = interpreter.get_tensor(0)
288    # Ensure that the buffer is of correct size and value.
289    self.assertEqual(len(compressed_buffer), 12)
290    sparse_value = [1, 0, 0, 4, 2, 3, 0, 0, 5, 0, 0, 6]
291    self.assertAllEqual(compressed_buffer, sparse_value)
292
293    tensor_details = interpreter.get_tensor_details()[0]
294    s_params = tensor_details['sparsity_parameters']
295
296    # Ensure sparsity parameter returned is correct
297    self.assertAllEqual(s_params['traversal_order'], [0, 1, 2, 3])
298    self.assertAllEqual(s_params['block_map'], [0, 1])
299    dense_dim_metadata = {'format': 0, 'dense_size': 2}
300    self.assertAllEqual(s_params['dim_metadata'][0], dense_dim_metadata)
301    self.assertAllEqual(s_params['dim_metadata'][2], dense_dim_metadata)
302    self.assertAllEqual(s_params['dim_metadata'][3], dense_dim_metadata)
303    self.assertEqual(s_params['dim_metadata'][1]['format'], 1)
304    self.assertAllEqual(s_params['dim_metadata'][1]['array_segments'],
305                        [0, 2, 3])
306    self.assertAllEqual(s_params['dim_metadata'][1]['array_indices'], [0, 1, 1])
307
308  @mock.patch.object(metrics.TFLiteMetrics,
309                     'increase_counter_interpreter_creation')
310  def testCreationCounter(self, increase_call):
311    interpreter_wrapper.Interpreter(
312        model_path=resource_loader.get_path_to_datafile(
313            'testdata/permute_float.tflite'))
314    increase_call.assert_called_once()
315
316
317class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
318
319  def testInvalidModelContent(self):
320    with self.assertRaisesRegex(ValueError,
321                                'Model provided has model identifier \''):
322      interpreter_wrapper.Interpreter(model_content=b'garbage')
323
324  def testInvalidModelFile(self):
325    with self.assertRaisesRegex(ValueError,
326                                'Could not open \'totally_invalid_file_name\''):
327      interpreter_wrapper.Interpreter(model_path='totally_invalid_file_name')
328
329  def testInvokeBeforeReady(self):
330    interpreter = interpreter_wrapper.Interpreter(
331        model_path=resource_loader.get_path_to_datafile(
332            'testdata/permute_float.tflite'))
333    with self.assertRaisesRegex(RuntimeError,
334                                'Invoke called on model that is not ready'):
335      interpreter.invoke()
336
337  def testInvalidModelFileContent(self):
338    with self.assertRaisesRegex(
339        ValueError, '`model_path` or `model_content` must be specified.'):
340      interpreter_wrapper.Interpreter(model_path=None, model_content=None)
341
342  def testInvalidIndex(self):
343    interpreter = interpreter_wrapper.Interpreter(
344        model_path=resource_loader.get_path_to_datafile(
345            'testdata/permute_float.tflite'))
346    interpreter.allocate_tensors()
347    # Invalid tensor index passed.
348    with self.assertRaisesRegex(ValueError, 'Tensor with no shape found.'):
349      interpreter._get_tensor_details(4)
350    with self.assertRaisesRegex(ValueError, 'Invalid node index'):
351      interpreter._get_op_details(4)
352
353  def testEmptyInputTensor(self):
354
355    class TestModel(tf.keras.models.Model):
356
357      @tf.function(
358          input_signature=[tf.TensorSpec(shape=[None], dtype=tf.float32)])
359      def TestSum(self, x):
360        return tf.raw_ops.Sum(input=x, axis=[0])
361
362    test_model = TestModel()
363    converter = tf.lite.TFLiteConverter.from_concrete_functions([
364        test_model.TestSum.get_concrete_function(
365            tf.TensorSpec([None], tf.float32))
366    ], test_model)
367    model = converter.convert()
368    interpreter = tf.lite.Interpreter(model_content=model)
369    # Make sure that passing empty tensor doesn't cause any errors.
370    interpreter.get_signature_runner()(x=tf.zeros([0], tf.float32))
371
372
373class InterpreterTensorAccessorTest(test_util.TensorFlowTestCase):
374
375  def setUp(self):
376    super(InterpreterTensorAccessorTest, self).setUp()
377    self.interpreter = interpreter_wrapper.Interpreter(
378        model_path=resource_loader.get_path_to_datafile(
379            'testdata/permute_float.tflite'))
380    self.interpreter.allocate_tensors()
381    self.input0 = self.interpreter.get_input_details()[0]['index']
382    self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
383
384  def testTensorAccessor(self):
385    """Check that tensor returns a reference."""
386    array_ref = self.interpreter.tensor(self.input0)
387    np.copyto(array_ref(), self.initial_data)
388    self.assertAllEqual(array_ref(), self.initial_data)
389    self.assertAllEqual(
390        self.interpreter.get_tensor(self.input0), self.initial_data)
391
392  def testGetTensorAccessor(self):
393    """Check that get_tensor returns a copy."""
394    self.interpreter.set_tensor(self.input0, self.initial_data)
395    array_initial_copy = self.interpreter.get_tensor(self.input0)
396    new_value = np.add(1., array_initial_copy)
397    self.interpreter.set_tensor(self.input0, new_value)
398    self.assertAllEqual(array_initial_copy, self.initial_data)
399    self.assertAllEqual(self.interpreter.get_tensor(self.input0), new_value)
400
401  def testBase(self):
402    self.assertTrue(self.interpreter._safe_to_run())
403    _ = self.interpreter.tensor(self.input0)
404    self.assertTrue(self.interpreter._safe_to_run())
405    in0 = self.interpreter.tensor(self.input0)()
406    self.assertFalse(self.interpreter._safe_to_run())
407    in0b = self.interpreter.tensor(self.input0)()
408    self.assertFalse(self.interpreter._safe_to_run())
409    # Now get rid of the buffers so that we can evaluate.
410    del in0
411    del in0b
412    self.assertTrue(self.interpreter._safe_to_run())
413
414  def testBaseProtectsFunctions(self):
415    in0 = self.interpreter.tensor(self.input0)()
416    # Make sure we get an exception if we try to run an unsafe operation
417    with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'):
418      _ = self.interpreter.allocate_tensors()
419    # Make sure we get an exception if we try to run an unsafe operation
420    with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'):
421      _ = self.interpreter.invoke()  # pylint: disable=assignment-from-no-return
422    # Now test that we can run
423    del in0  # this is our only buffer reference, so now it is safe to change
424    in0safe = self.interpreter.tensor(self.input0)
425    _ = self.interpreter.allocate_tensors()
426    del in0safe  # make sure in0Safe is held but lint doesn't complain
427
428
429class InterpreterDelegateTest(test_util.TensorFlowTestCase):
430
431  def setUp(self):
432    super(InterpreterDelegateTest, self).setUp()
433    self._delegate_file = resource_loader.get_path_to_datafile(
434        'testdata/test_delegate.so')
435    self._model_file = resource_loader.get_path_to_datafile(
436        'testdata/permute_float.tflite')
437
438    # Load the library to reset the counters.
439    library = ctypes.pydll.LoadLibrary(self._delegate_file)
440    library.initialize_counters()
441
442  def _TestInterpreter(self, model_path, options=None):
443    """Test wrapper function that creates an interpreter with the delegate."""
444    delegate = interpreter_wrapper.load_delegate(self._delegate_file, options)
445    return interpreter_wrapper.Interpreter(
446        model_path=model_path, experimental_delegates=[delegate])
447
448  def testDelegate(self):
449    """Tests the delegate creation and destruction."""
450    interpreter = self._TestInterpreter(model_path=self._model_file)
451    lib = interpreter._delegates[0]._library
452
453    self.assertEqual(lib.get_num_delegates_created(), 1)
454    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
455    self.assertEqual(lib.get_num_delegates_invoked(), 1)
456
457    del interpreter
458
459    self.assertEqual(lib.get_num_delegates_created(), 1)
460    self.assertEqual(lib.get_num_delegates_destroyed(), 1)
461    self.assertEqual(lib.get_num_delegates_invoked(), 1)
462
463  def testMultipleInterpreters(self):
464    delegate = interpreter_wrapper.load_delegate(self._delegate_file)
465    lib = delegate._library
466
467    self.assertEqual(lib.get_num_delegates_created(), 1)
468    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
469    self.assertEqual(lib.get_num_delegates_invoked(), 0)
470
471    interpreter_a = interpreter_wrapper.Interpreter(
472        model_path=self._model_file, experimental_delegates=[delegate])
473
474    self.assertEqual(lib.get_num_delegates_created(), 1)
475    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
476    self.assertEqual(lib.get_num_delegates_invoked(), 1)
477
478    interpreter_b = interpreter_wrapper.Interpreter(
479        model_path=self._model_file, experimental_delegates=[delegate])
480
481    self.assertEqual(lib.get_num_delegates_created(), 1)
482    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
483    self.assertEqual(lib.get_num_delegates_invoked(), 2)
484
485    del delegate
486    del interpreter_a
487
488    self.assertEqual(lib.get_num_delegates_created(), 1)
489    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
490    self.assertEqual(lib.get_num_delegates_invoked(), 2)
491
492    del interpreter_b
493
494    self.assertEqual(lib.get_num_delegates_created(), 1)
495    self.assertEqual(lib.get_num_delegates_destroyed(), 1)
496    self.assertEqual(lib.get_num_delegates_invoked(), 2)
497
498  def testDestructionOrder(self):
499    """Make sure internal _interpreter object is destroyed before delegate."""
500    self.skipTest('TODO(b/142136355): fix flakiness and re-enable')
501    # Track which order destructions were doned in
502    destructions = []
503
504    def register_destruction(x):
505      destructions.append(x if isinstance(x, str) else x.decode('utf-8'))
506      return 0
507
508    # Make a wrapper for the callback so we can send this to ctypes
509    delegate = interpreter_wrapper.load_delegate(self._delegate_file)
510    # Make an interpreter with the delegate
511    interpreter = interpreter_wrapper.Interpreter(
512        model_path=resource_loader.get_path_to_datafile(
513            'testdata/permute_float.tflite'),
514        experimental_delegates=[delegate])
515
516    class InterpreterDestroyCallback:
517
518      def __del__(self):
519        register_destruction('interpreter')
520
521    interpreter._interpreter.stuff = InterpreterDestroyCallback()
522    # Destroy both delegate and interpreter
523    library = delegate._library
524    prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p))
525    library.set_destroy_callback(prototype(register_destruction))
526    del delegate
527    del interpreter
528    library.set_destroy_callback(None)
529    # check the interpreter was destroyed before the delegate
530    self.assertEqual(destructions, ['interpreter', 'test_delegate'])
531
532  def testOptions(self):
533    delegate_a = interpreter_wrapper.load_delegate(self._delegate_file)
534    lib = delegate_a._library
535
536    self.assertEqual(lib.get_num_delegates_created(), 1)
537    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
538    self.assertEqual(lib.get_num_delegates_invoked(), 0)
539    self.assertEqual(lib.get_options_counter(), 0)
540
541    delegate_b = interpreter_wrapper.load_delegate(
542        self._delegate_file, options={
543            'unused': False,
544            'options_counter': 2
545        })
546    lib = delegate_b._library
547
548    self.assertEqual(lib.get_num_delegates_created(), 2)
549    self.assertEqual(lib.get_num_delegates_destroyed(), 0)
550    self.assertEqual(lib.get_num_delegates_invoked(), 0)
551    self.assertEqual(lib.get_options_counter(), 2)
552
553    del delegate_a
554    del delegate_b
555
556    self.assertEqual(lib.get_num_delegates_created(), 2)
557    self.assertEqual(lib.get_num_delegates_destroyed(), 2)
558    self.assertEqual(lib.get_num_delegates_invoked(), 0)
559    self.assertEqual(lib.get_options_counter(), 2)
560
561  def testFail(self):
562    with self.assertRaisesRegex(
563        # Due to exception chaining in PY3, we can't be more specific here and
564        # check that the phrase 'Fail argument sent' is present.
565        ValueError, 'Failed to load delegate from'):
566      interpreter_wrapper.load_delegate(
567          self._delegate_file, options={'fail': 'fail'})
568
569
570if __name__ == '__main__':
571  test.main()
572