| /test/mlts/models/ |
| D | README.txt | 22 MobileNet tensorflow lite model based on: 34 8bit quantized MobileNet tensorflow lite model based on: 46 MobileNet v2 tensorflow lite model based on: 58 8bit quantized MobileNet v2 tensorflow lite model based on: 70 Float version of MobileNet SSD tensorflow model based on: 83 8bit quantized MobileNet SSD tensorflow lite model based on: 96 TTS tensorflow lite model based on: 102 Note that the tensorflow lite model is the acoustic model in the paper. It is used because it is 103 much heavier than the duration model. 106 ASR tensorflow lite model based on the ASR acoustic model in: [all …]
|
| /test/mlts/benchmark/jni/ |
| D | random_graph_test_jni.cpp | 190 GeneratedModel model; in runRandomModel() local 191 createModel(*testModel, &model); in runRandomModel() 192 if (!model.isValid()) { in runRandomModel() 196 auto modelFinishResult = model.finish(); in runRandomModel() 212 model.getHandle(), &mDevice, 1, opsSupportedFlags.get()); in runRandomModel() 247 auto [compilationResult, compilation] = CreateCompilation(model, devices); in runRandomModel() 404 android::nn::generated_tests::GeneratedModel model; in OperationsSupportedByDevice() local 405 createModel(*testModel, &model); in OperationsSupportedByDevice() 406 if (!model.isValid()) { in OperationsSupportedByDevice() 410 auto modelFinishResult = model.finish(); in OperationsSupportedByDevice() [all …]
|
| D | benchmark_jni.cpp | 113 BenchmarkModel* model = (BenchmarkModel *) _modelHandle; in Java_com_android_nn_benchmark_core_NNTestBase_destroyModel() local 114 delete(model); in Java_com_android_nn_benchmark_core_NNTestBase_destroyModel() 125 BenchmarkModel* model = (BenchmarkModel *) _modelHandle; in Java_com_android_nn_benchmark_core_NNTestBase_resizeInputTensors() local 130 return model->resizeInputTensors(std::move(shape)); in Java_com_android_nn_benchmark_core_NNTestBase_resizeInputTensors() 322 BenchmarkModel* model = reinterpret_cast<BenchmarkModel*>(_modelHandle); in Java_com_android_nn_benchmark_core_NNTestBase_runBenchmark() local 343 bool success = model->benchmark(data.data(), inferencesSeqMaxCount, timeoutSec, flags, &result); in Java_com_android_nn_benchmark_core_NNTestBase_runBenchmark() 425 BenchmarkModel* model = reinterpret_cast<BenchmarkModel*>(_modelHandle); in Java_com_android_nn_benchmark_core_NNTestBase_dumpAllLayers() local 433 model->dumpAllLayers(dumpPathStr, data.data()); in Java_com_android_nn_benchmark_core_NNTestBase_dumpAllLayers() 506 BenchmarkModel* model = reinterpret_cast<BenchmarkModel*>(_modelHandle); in Java_com_android_nn_benchmark_core_NNTestBase_runCompilationBenchmark() local 515 model->benchmarkCompilation(maxNumIterations, warmupTimeoutSec, in Java_com_android_nn_benchmark_core_NNTestBase_runCompilationBenchmark()
|
| D | run_tflite.cpp | 106 BenchmarkModel* model = new BenchmarkModel(); in create() local 107 if (!model->init(modelfile, tfliteBackend, enable_intermediate_tensors_dump, nnapiErrno, in create() 110 delete model; in create() 113 return model; in create()
|
| /test/mlts/benchmark/src/com/android/nn/benchmark/app/ |
| D | AcceleratorSpecificTestSupport.java | 47 for (TestModels.TestModelEntry model : TestModels.modelsList()) { in findTestModelRunningOnAccelerator() 48 if (Processor.isTestModelSupportedByAccelerator(context, model, acceleratorName)) { in findTestModelRunningOnAccelerator() 49 return Optional.of(model); in findTestModelRunningOnAccelerator() 58 for (TestModels.TestModelEntry model : TestModels.modelsList()) { in findAllTestModelsRunningOnAccelerator() 59 if (Processor.isTestModelSupportedByAccelerator(context, model, acceleratorName)) { in findAllTestModelsRunningOnAccelerator() 60 result.add(model); in findAllTestModelsRunningOnAccelerator()
|
| D | NNTest.java | 41 public NNTest(TestModels.TestModelEntry model) { in NNTest() argument 42 super(model, /*acceleratorName=*/null); in NNTest()
|
| D | TFLiteTest.java | 31 public TFLiteTest(TestModelEntry model) { in TFLiteTest() argument 32 super(model, /*acceleratorName=*/null); in TFLiteTest()
|
| D | NNInferenceStressTest.java | 44 public NNInferenceStressTest(TestModels.TestModelEntry model, String acceleratorName) { in NNInferenceStressTest() argument 45 super(model, acceleratorName); in NNInferenceStressTest()
|
| D | NNCrystalBallTest.java | 49 public NNCrystalBallTest(TestModels.TestModelEntry model, String acceleratorName) { in NNCrystalBallTest() argument 50 super(model, acceleratorName); in NNCrystalBallTest()
|
| D | NNModelLoadingStressTest.java | 47 public NNModelLoadingStressTest(TestModels.TestModelEntry model, String acceleratorName) { in NNModelLoadingStressTest() argument 48 super(model, acceleratorName); in NNModelLoadingStressTest()
|
| D | NNScoringTest.java | 53 public NNScoringTest(TestModels.TestModelEntry model, String acceleratorName) { in NNScoringTest() argument 54 super(model, acceleratorName); in NNScoringTest()
|
| D | BenchmarkTestBase.java | 87 public BenchmarkTestBase(TestModelEntry model, String acceleratorName) { in BenchmarkTestBase() argument 89 mModel = model; in BenchmarkTestBase() 304 … TestModels.modelsList().stream().map(model -> new Object[] {model}).collect(Collectors.toList()) in modelsOnAccelerators()
|
| /test/mlts/benchmark/ |
| D | build_and_run_benchmark.sh | 60 -m|--filter-model) 105 model-loading-stress) 125 multi-process-model-load-stress) 130 memory-mapped-model-load-stress) 134 model-load-random-stress)
|
| D | README.txt | 44 partition the model and assign the best available one(s) by using the 62 on a single model in multiple processes and threads with different probabilities in client process 65 * multi-process-model-load-stress: this extends the `parallel-inference-stress` running inference 66 on a single model in multiple processes and threads with different probabilities in client process 69 * memory-mapped-model-load-stress: runs a series of parallel model compilation with memory mapped 72 * model-load-random-stress: test compiling a large set of randomly generated models
|
| /test/mlts/benchmark/native/ |
| D | multi_process_test.cpp | 115 std::unique_ptr<BenchmarkModel> model(BenchmarkModel::create( in runModel() local 122 if (!model) { in runModel() 137 std::unique_ptr<BenchmarkModel> model(BenchmarkModel::create( in runModel() local 144 if (!model) { in runModel() 152 return model->benchmark(data, std::numeric_limits<int>::max(), in runModel()
|
| /test/app_compat/csuite/harness/src/main/java/com/android/csuite/core/ |
| D | DeviceJUnit4ClassRunner.java | 21 import org.junit.runners.model.InitializationError;
|
| /test/mlts/benchmark/src/com/android/nn/benchmark/core/ |
| D | TestModels.java | 130 static public void registerModel(TestModelEntry model) { in registerModel() argument 134 sTestModelEntryList.add(model); in registerModel()
|
| /test/mlts/benchmark/tools/ |
| D | tensor_utils.py | 106 for model in topk_aosp['models']: 107 self.nnapi_to_tflite_name[model['name']] = model['modelFile'] 108 self.tflite_to_nnapi_name[model['modelFile']] = model['name'] 118 model = self.ModelMetaData(self.__get_model_json_path(tflite_model_name)) 120 self.models[nnapi_model_name] = model
|
| /test/mlts/benchmark/crashtest/src/com/android/nn/crashtest/ |
| D | MainActivity.java | 176 model -> { in modelsForAccelerator() 180 model, acceleratorName); in modelsForAccelerator() 186 acceleratorName, model.mModelName)); in modelsForAccelerator()
|
| /test/cts-root/hostsidetests/packageinstaller/src/com/android/cts_root/packageinstaller/host/ |
| D | SessionCleanUpHostTest.java | 34 import org.junit.runners.model.Statement;
|
| /test/vts-testcase/nbu/src/ |
| D | README.md | 39 The two devices should be of the same model and build (identical fingerprint).
|
| /test/mlts/benchmark/results/ |
| D | Chart.bundle.min.js | 10 …model:t._model,deltaK:0,mK:0}}),c=h.length;for(e=0;e<c;++e)if(!(n=h[e]).model.skip){if(i=e>0?h[e-1…
|
| /test/mlts/models/assets/image_classification/ |
| D | labels.txt | 663 model t
|