| /external/tensorflow/tensorflow/lite/java/src/test/java/org/tensorflow/lite/ |
| D | InterpreterTest.java | 33 /** Unit tests for {@link org.tensorflow.lite.Interpreter}. */ 75 try (Interpreter interpreter = new Interpreter(MODEL_BUFFER)) { in testInterpreter() argument 76 assertThat(interpreter).isNotNull(); in testInterpreter() 77 assertThat(interpreter.getInputTensorCount()).isEqualTo(1); in testInterpreter() 78 assertThat(interpreter.getInputTensor(0).dataType()).isEqualTo(DataType.FLOAT32); in testInterpreter() 79 assertThat(interpreter.getOutputTensorCount()).isEqualTo(1); in testInterpreter() 80 assertThat(interpreter.getOutputTensor(0).dataType()).isEqualTo(DataType.FLOAT32); in testInterpreter() 87 try (Interpreter interpreter = in testInterpreterWithOptions() argument 88 new Interpreter( in testInterpreterWithOptions() 90 new Interpreter.Options() in testInterpreterWithOptions() [all …]
|
| D | InterpreterApiTest.java | 74 try (InterpreterApi interpreter = InterpreterApi.create(MODEL_BUFFER, TEST_OPTIONS)) { in testInterpreter() argument 75 assertThat(interpreter).isNotNull(); in testInterpreter() 76 assertThat(interpreter.getInputTensorCount()).isEqualTo(1); in testInterpreter() 77 assertThat(interpreter.getInputTensor(0).dataType()).isEqualTo(DataType.FLOAT32); in testInterpreter() 78 assertThat(interpreter.getOutputTensorCount()).isEqualTo(1); in testInterpreter() 79 assertThat(interpreter.getOutputTensor(0).dataType()).isEqualTo(DataType.FLOAT32); in testInterpreter() 86 try (InterpreterApi interpreter = in testInterpreterWithOptions() argument 88 assertThat(interpreter).isNotNull(); in testInterpreterWithOptions() 89 assertThat(interpreter.getInputTensorCount()).isEqualTo(1); in testInterpreterWithOptions() 90 assertThat(interpreter.getInputTensor(0).dataType()).isEqualTo(DataType.FLOAT32); in testInterpreterWithOptions() [all …]
|
| D | InterpreterFlexTest.java | 30 * Unit tests for {@link org.tensorflow.lite.Interpreter} that validate execution with models that 43 Interpreter.Options options = new Interpreter.Options().addDelegate(delegate); in testFlexModel() 44 try (Interpreter interpreter = new Interpreter(FLEX_MODEL_BUFFER, options)) { in testFlexModel() argument 45 testCommon(interpreter); in testFlexModel() 54 try (Interpreter interpreter = new Interpreter(FLEX_MODEL_BUFFER)) { in testFlexModelDelegateAutomaticallyApplied() argument 55 testCommon(interpreter); in testFlexModelDelegateAutomaticallyApplied() 62 Interpreter.Options options = new Interpreter.Options(); in testFlexModelDelegateAutomaticallyAppliedBeforeOtherDelegates() 64 Interpreter interpreter = in testFlexModelDelegateAutomaticallyAppliedBeforeOtherDelegates() 65 new Interpreter(FLEX_MODEL_BUFFER, options.addDelegate(delegate))) { in testFlexModelDelegateAutomaticallyAppliedBeforeOtherDelegates() 66 testCommon(interpreter); in testFlexModelDelegateAutomaticallyAppliedBeforeOtherDelegates() [all …]
|
| /external/tensorflow/tensorflow/lite/ |
| D | model_test.cc | 36 #include "tensorflow/lite/interpreter.h" 135 std::unique_ptr<Interpreter> interpreter; in TEST() local 136 ASSERT_EQ(InterpreterBuilder(*model, TrivialResolver())(&interpreter), in TEST() 138 ASSERT_NE(interpreter, nullptr); in TEST() 155 std::unique_ptr<Interpreter> interpreter; in TEST() local 156 ASSERT_NE(InterpreterBuilder(*m, TrivialResolver())(&interpreter), kTfLiteOk); in TEST() 163 std::unique_ptr<Interpreter> interpreter; in TEST() local 164 ASSERT_EQ(InterpreterBuilder(*m, TrivialResolver())(&interpreter), kTfLiteOk); in TEST() 165 EXPECT_EQ(interpreter->subgraphs_size(), 2); in TEST() 173 std::unique_ptr<Interpreter> interpreter; in TEST() local [all …]
|
| D | interpreter_test.cc | 16 #include "tensorflow/lite/interpreter.h" 60 // Make an interpreter that has no tensors and no nodes 64 Interpreter interpreter; in TEST() local 74 interpreter.SetInputs({}); in TEST() 75 interpreter.SetOutputs({}); in TEST() 76 ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); in TEST() 77 ASSERT_EQ(interpreter.Invoke(), kTfLiteOk); in TEST() 79 // Creating a new interpreter should not redundantly log runtime init. in TEST() 81 Interpreter interpreter2; in TEST() 87 Interpreter interpreter; in TEST() local [all …]
|
| D | optional_debug_tools_test.cc | 21 #include "tensorflow/lite/interpreter.h" 29 void InitInputTensorData(Interpreter* interpreter) { in InitInputTensorData() argument 30 ASSERT_EQ(interpreter->inputs().size(), 1); in InitInputTensorData() 31 TfLiteTensor* t = interpreter->input_tensor(0); in InitInputTensorData() 44 std::unique_ptr<Interpreter> interpreter; in TEST() local 48 &interpreter), in TEST() 51 // Ensure printing the interpreter state doesn't crash: in TEST() 55 PrintInterpreterState(interpreter.get()); in TEST() 57 ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk); in TEST() 58 PrintInterpreterState(interpreter.get()); in TEST() [all …]
|
| /external/tensorflow/tensorflow/lite/python/ |
| D | interpreter_test.py | 33 from tensorflow.lite.python import interpreter as interpreter_wrapper 51 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 55 self.assertTrue(interpreter._safe_to_run()) 59 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 63 self.assertTrue(interpreter._safe_to_run()) 76 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 79 self.assertTrue(interpreter._safe_to_run()) 92 interpreter_wrapper.Interpreter( 100 interpreter_wrapper.Interpreter( 108 interpreter_wrapper.Interpreter( [all …]
|
| D | lite_flex_test.py | 27 from tensorflow.lite.python.interpreter import Interpreter 65 interpreter = Interpreter(model_content=tflite_model) 66 interpreter.allocate_tensors() 67 input_details = interpreter.get_input_details() 69 interpreter.set_tensor(input_details[0]['index'], test_input) 70 interpreter.invoke() 72 output_details = interpreter.get_output_details() 74 output_data = interpreter.get_tensor(output_details[0]['index']) 112 interpreter = Interpreter(model_content=tflite_model) 113 interpreter.allocate_tensors() [all …]
|
| D | interpreter.py | 15 """Python TF-Lite interpreter.""" 26 os.path.join('tflite_runtime', 'interpreter')): 77 # by using explicit closes(). See implementation of Interpreter __del__. 148 interpreter = tf.lite.Interpreter( 152 interpreter = tf.lite.Interpreter(model_path='model.tflite') 184 This class should be instantiated through TFLite Interpreter only using 185 get_signature_runner method on Interpreter. 187 signature = interpreter.get_signature_runner("my_signature") 194 No other function on this object or on the interpreter provided should be 198 def __init__(self, interpreter=None, signature_key=None): argument [all …]
|
| D | lite_test.py | 33 from tensorflow.lite.python.interpreter import Interpreter 167 interpreter = Interpreter(model_content=tflite_model) 168 interpreter.allocate_tensors() 170 input_details = interpreter.get_input_details() 177 output_details = interpreter.get_output_details() 241 interpreter = Interpreter(model_content=tflite_model) 242 interpreter.allocate_tensors() 244 input_details = interpreter.get_input_details() 251 output_details = interpreter.get_output_details() 275 interpreter = Interpreter(model_content=tflite_model) [all …]
|
| /external/tflite-support/tensorflow_lite_support/custom_ops/kernel/ |
| D | ngrams_test.py | 26 from tensorflow.lite.python import interpreter as interpreter_wrapper # pylint: disable=g-direct-t… 118 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 120 interpreter.resize_tensor_input(0, input_tensor.shape) 121 interpreter.allocate_tensors() 122 interpreter.set_tensor(interpreter.get_input_details()[0]['index'], 124 interpreter.invoke() 125 tflite_output = interpreter.get_tensor( 126 interpreter.get_output_details()[0]['index']) 138 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 140 interpreter.resize_tensor_input(0, input_tensor.shape) [all …]
|
| D | whitespace_tokenizer_test.py | 28 from tensorflow.lite.python import interpreter as interpreter_wrapper 73 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 78 interpreter.resize_tensor_input(0, np_test_case.shape) 79 interpreter.allocate_tensors() 80 interpreter.set_tensor(interpreter.get_input_details()[0]['index'], 82 interpreter.invoke() 83 tflite_output = interpreter.get_tensor( 84 interpreter.get_output_details()[0]['index']) 99 interpreter = interpreter_wrapper.InterpreterWithCustomOps( 102 interpreter.resize_tensor_input(0, np_test_case.shape) [all …]
|
| /external/tensorflow/tensorflow/lite/swift/Tests/ |
| D | InterpreterTests.swift | 21 var interpreter: Interpreter! variable 26 interpreter = try! Interpreter(modelPath: AddModel.path) in setUp() 30 interpreter = nil in tearDown() 36 XCTAssertNoThrow(try Interpreter(modelPath: AddModel.path)) in testInit_ValidModelPath() 40 XCTAssertThrowsError(try Interpreter(modelPath: "/invalid/path")) { error in in testInit_InvalidModelPath_ThrowsFailedToLoadModel() 46 var options = Interpreter.Options() in testInitWithOptions() 48 let interpreter = try Interpreter(modelPath: AddQuantizedModel.path, options: options) in testInitWithOptions() variable 49 XCTAssertNotNil(interpreter.options) in testInitWithOptions() 50 XCTAssertNil(interpreter.delegates) in testInitWithOptions() 54 XCTAssertEqual(interpreter.inputTensorCount, AddModel.inputTensorCount) in testInputTensorCount() [all …]
|
| /external/tensorflow/tensorflow/lite/tools/serialization/ |
| D | writer_lib_test.cc | 32 #include "tensorflow/lite/interpreter.h" 53 void WriteToFile(Interpreter* interpreter, const std::string& filename, in WriteToFile() argument 56 SubgraphWriter writer(&interpreter->primary_subgraph()); in WriteToFile() 59 ModelWriter writer(interpreter); in WriteToFile() 66 Interpreter interpreter; in TEST_P() local 67 interpreter.AddTensors(3); in TEST_P() 69 interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3}, in TEST_P() 71 interpreter.SetTensorParametersReadOnly( in TEST_P() 74 interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3}, in TEST_P() 76 interpreter.SetInputs({0, 1}); in TEST_P() [all …]
|
| /external/tensorflow/tensorflow/lite/java/src/testhelper/java/org/tensorflow/lite/ |
| D | TestHelper.java | 25 * @param interpreter an instance of {@code Interpreter}. If it is not initialized, an {@code 28 public static Long getLastNativeInferenceDurationNanoseconds(Interpreter interpreter) { in getLastNativeInferenceDurationNanoseconds() argument 29 if (interpreter != null && interpreter.wrapper != null) { in getLastNativeInferenceDurationNanoseconds() 30 return interpreter.wrapper.getLastNativeInferenceDurationNanoseconds(); in getLastNativeInferenceDurationNanoseconds() 32 throw new IllegalArgumentException("Interpreter has not initialized; Failed to get latency."); in getLastNativeInferenceDurationNanoseconds() 39 * @param interpreter an instance of {@code Interpreter}. If it is not initialized, an {@code 44 public static int[] getInputDims(Interpreter interpreter, int index) { in getInputDims() argument 45 if (interpreter != null && interpreter.wrapper != null) { in getInputDims() 46 return interpreter.wrapper.getInputTensor(index).shape(); in getInputDims() 49 "Interpreter has not initialized;" + " Failed to get input dimensions."); in getInputDims() [all …]
|
| /external/tensorflow/tensorflow/lite/java/src/test/java/org/tensorflow/lite/nnapi/ |
| D | NnApiDelegateTest.java | 26 import org.tensorflow.lite.Interpreter; 39 private static final Interpreter.Options INTERPRETER_OPTIONS = 40 new Interpreter.Options().setRuntime(TfLiteRuntime.PREFER_SYSTEM_OVER_APPLICATION); 49 Interpreter.Options options = new Interpreter.Options(INTERPRETER_OPTIONS); in testBasic() 51 Interpreter interpreter = new Interpreter(MODEL_BUFFER, options.addDelegate(delegate))) { in testBasic() 65 .contains("Should not access delegate before interpreter has been constructed"); in testAccessBeforeInterpreterInitialized() 68 Interpreter.Options options = in testAccessBeforeInterpreterInitialized() 69 new Interpreter.Options(INTERPRETER_OPTIONS).addDelegate(delegate); in testAccessBeforeInterpreterInitialized() 76 .contains("Should not access delegate before interpreter has been constructed"); in testAccessBeforeInterpreterInitialized() 83 Interpreter.Options options = new Interpreter.Options(INTERPRETER_OPTIONS); in testWithoutNnApiDelegateOptions() [all …]
|
| /external/testng/src/main/java/org/testng/internal/ |
| D | Bsh.java | 4 import bsh.Interpreter; 14 private static Interpreter s_interpreter; 20 Interpreter interpreter = getInterpreter(); in includeMethodFromExpression() local 26 setContext(interpreter, tm.getMethod(), groups, tm); in includeMethodFromExpression() 27 Object evalResult = interpreter.eval(expression); in includeMethodFromExpression() 31 Utils.log("bsh.Interpreter", 2, "Cannot evaluate expression:" in includeMethodFromExpression() 35 resetContext(interpreter); in includeMethodFromExpression() 42 private static Interpreter getInterpreter() { in getInterpreter() 44 s_interpreter= new Interpreter(); in getInterpreter() 50 …private void setContext(Interpreter interpreter, Method method, Map<String, String> groups, ITestN… in setContext() argument [all …]
|
| /external/tensorflow/tensorflow/lite/g3doc/guide/ |
| D | inference.md | 6 *interpreter*. The TensorFlow Lite interpreter is designed to be lean and fast. 7 The interpreter uses a static graph ordering and a custom (less-dynamic) memory 10 This page describes how to access to the TensorFlow Lite interpreter and perform 34 involves a few steps such as building the interpreter, and allocating 112 2. Build an `Interpreter` based on an existing model. 128 In Java, you'll use the `Interpreter` class to load a model and drive model 131 You can initialize an `Interpreter` using a `.tflite` file: 134 public Interpreter(@NotNull File modelFile); 140 public Interpreter(@NotNull MappedByteBuffer mappedByteBuffer); 145 `Interpreter`, it must remain unchanged for the whole lifetime of the [all …]
|
| /external/tensorflow/tensorflow/lite/java/src/main/native/ |
| D | nativeinterpreterwrapper_jni.cc | 29 #include "tensorflow/lite/core/shims/cc/interpreter.h" 52 using tflite_shims::Interpreter; 57 Interpreter* convertLongToInterpreter(JNIEnv* env, jlong handle) { in convertLongToInterpreter() 58 return CastLongToPointer<Interpreter>(env, handle); in convertLongToInterpreter() 117 Interpreter* interpreter = convertLongToInterpreter(env, handle); in Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames() local 118 if (interpreter == nullptr) return nullptr; in Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames() 128 size_t size = interpreter->inputs().size(); in Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames() 133 env->NewStringUTF(interpreter->GetInputName(i))); in Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames() 143 Interpreter* interpreter = convertLongToInterpreter(env, handle); in Java_org_tensorflow_lite_NativeInterpreterWrapper_allocateTensors() local 144 if (interpreter == nullptr) return; in Java_org_tensorflow_lite_NativeInterpreterWrapper_allocateTensors() [all …]
|
| /external/libchrome-gestures/src/ |
| D | timestamp_filter_interpreter_unittest.cc | 22 class TimestampFilterInterpreterTestInterpreter : public Interpreter { 25 : Interpreter(nullptr, nullptr, false) {} in TimestampFilterInterpreterTestInterpreter() 41 TimestampFilterInterpreter interpreter(nullptr, base_interpreter, nullptr); in TEST() local 42 TestInterpreterWrapper wrapper(&interpreter); in TEST() 68 TimestampFilterInterpreter interpreter(nullptr, base_interpreter, nullptr); in TEST() local 69 TestInterpreterWrapper wrapper(&interpreter); in TEST() 93 TimestampFilterInterpreter interpreter(nullptr, base_interpreter, nullptr); in TEST() local 94 TestInterpreterWrapper wrapper(&interpreter); in TEST() 128 TimestampFilterInterpreter interpreter(nullptr, base_interpreter, nullptr); in TEST() local 129 TestInterpreterWrapper wrapper(&interpreter); in TEST() [all …]
|
| D | metrics_filter_interpreter_unittest.cc | 30 class MetricsFilterInterpreterTestInterpreter : public Interpreter { 33 : Interpreter(nullptr, nullptr, false), in MetricsFilterInterpreterTestInterpreter() 53 MetricsFilterInterpreter interpreter(nullptr, base_interpreter, nullptr, in TEST() local 56 TestInterpreterWrapper wrapper(&interpreter, &hwprops); in TEST() 107 EXPECT_EQ(interpreter.devclass_, GESTURES_DEVCLASS_TOUCHPAD); in TEST() 108 EXPECT_EQ(interpreter.mouse_movement_session_index_, 0); in TEST() 109 EXPECT_EQ(interpreter.mouse_movement_current_session_length, 0); in TEST() 110 EXPECT_EQ(interpreter.mouse_movement_current_session_start, 0.0); in TEST() 111 EXPECT_EQ(interpreter.mouse_movement_current_session_last, 0.0); in TEST() 112 EXPECT_EQ(interpreter.mouse_movement_current_session_distance, 0.0); in TEST() [all …]
|
| /external/tflite-support/tensorflow_lite_support/cc/task/core/ |
| D | tflite_engine.h | 42 #include "tensorflow/lite/interpreter.h" 58 using Interpreter = struct TfLiteInterpreter; 63 using Interpreter = tflite::Interpreter; variable 65 using InterpreterDeleter = std::default_delete<Interpreter>; 77 static int32_t InputCount(const Interpreter* interpreter) { in InputCount() argument 79 return TfLiteInterpreterGetInputTensorCount(interpreter); in InputCount() 81 return interpreter->inputs().size(); in InputCount() 84 static int32_t OutputCount(const Interpreter* interpreter) { in OutputCount() argument 86 return TfLiteInterpreterGetOutputTensorCount(interpreter); in OutputCount() 88 return interpreter->outputs().size(); in OutputCount() [all …]
|
| /external/tensorflow/tensorflow/lite/java/src/test/java/org/tensorflow/lite/gpu/ |
| D | GpuDelegateTest.java | 35 import org.tensorflow.lite.Interpreter; 60 Interpreter.Options options = new Interpreter.Options(); in testInterpreterWithGpu_FloatModel() 62 Interpreter interpreter = new Interpreter(MODEL_BUFFER, options.addDelegate(delegate))) { in testInterpreterWithGpu_FloatModel() 71 interpreter.runForMultipleInputsOutputs(inputs, outputs); in testInterpreterWithGpu_FloatModel() 85 Interpreter.Options options = new Interpreter.Options(); in testInterpreterWithGpu_QuantModelRunWithDelegate() 88 Interpreter interpreter = in testInterpreterWithGpu_QuantModelRunWithDelegate() 89 new Interpreter(MOBILENET_QUANTIZED_MODEL_BUFFER, options.addDelegate(delegate))) { in testInterpreterWithGpu_QuantModelRunWithDelegate() 91 interpreter.run(img, output); in testInterpreterWithGpu_QuantModelRunWithDelegate() 93 assertThat(InterpreterTestHelper.executionPlanLength(interpreter)).isEqualTo(1); in testInterpreterWithGpu_QuantModelRunWithDelegate() 94 assertThat(interpreter.getInputTensor(0).shape()).isEqualTo(new int[] {1, 224, 224, 3}); in testInterpreterWithGpu_QuantModelRunWithDelegate() [all …]
|
| /external/libtextclassifier/native/utils/ |
| D | tflite-model-executor.h | 27 #include "tensorflow/lite/interpreter.h" 74 // Creates an Interpreter for the model that serves as a scratch-pad for the 75 // inference. The Interpreter is NOT thread-safe. 76 std::unique_ptr<tflite::Interpreter> CreateInterpreter() const; 80 tflite::Interpreter* interpreter) const { in SetInput() argument 81 input_data.copy_to(interpreter->typed_input_tensor<T>(input_index), in SetInput() 87 tflite::Interpreter* interpreter) const { in SetInput() argument 89 interpreter->typed_input_tensor<T>(input_index)); in SetInput() 94 tflite::Interpreter* interpreter) const { in SetInput() argument 96 interpreter->tensor(interpreter->inputs()[input_index]); in SetInput() [all …]
|
| /external/sl4a/Common/src/com/googlecode/android_scripting/interpreter/ |
| D | InterpreterConfiguration.java | 17 package com.googlecode.android_scripting.interpreter; 34 import com.googlecode.android_scripting.interpreter.shell.ShellInterpreter; 52 private final Set<Interpreter> mInterpreterSet; 65 private final Map<String, Interpreter> mmDiscoveredInterpreters; 71 mmDiscoveredInterpreters = new HashMap<String, Interpreter>(); in InterpreterListener() 118 Interpreter discoveredInterpreter = buildInterpreter(packageName); in addInterpreter() 124 Log.v("Interpreter discovered: " + packageName + "\nBinary: " in addInterpreter() 135 Interpreter interpreter = mmDiscoveredInterpreters.get(packageName); in remove() 136 if (interpreter == null) { in remove() 137 Log.v("Interpreter for " + packageName + " not installed."); in remove() [all …]
|