/external/armnn/src/timelineDecoder/ |
D | JSONTimelineDecoder.cpp | 33 this->m_Model.jsonEntities.insert({entity.m_Guid, jsonEntity}); in CreateEntity() 41 this->m_Model.eventClasses.insert({eventClass.m_Guid, eventClass}); in CreateEventClass() 42 this->m_Model.jsonEntities.insert({eventClass.m_Guid, jsonEntity}); in CreateEventClass() 50 this->m_Model.events.insert({event.m_Guid, event}); in CreateEvent() 51 this->m_Model.jsonEntities.insert({jsonEntity.GetGuid(), jsonEntity}); in CreateEvent() 57 this->m_Model.labels.insert({label.m_Guid, label}); in CreateLabel() 80 m_Model.relationships.insert({relationship.m_Guid, relationship}); in CreateRelationship() 92 if (m_Model.jsonEntities.count(relationship.m_HeadGuid) != 0) in HandleExecutionLink() 94 JSONEntity& tailJSONEntity = m_Model.jsonEntities.at(tailGuid); in HandleExecutionLink() 95 JSONEntity& headJSONEntity = m_Model.jsonEntities.at(headGuid); in HandleExecutionLink() [all …]
|
D | JSONTimelineDecoder.hpp | 66 Model m_Model; member in armnn::timelinedecoder::JSONTimelineDecoder
|
/external/armnn/src/armnnTfLiteParser/ |
D | TfLiteParser.cpp | 816 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex]; in InputTensorInfo() 817 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; in InputTensorInfo() 840 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex]; in OutputTensorInfoFromInputs() 841 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; in OutputTensorInfoFromInputs() 864 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex); in OutputTensorInfoFromInputs() 879 m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; in OutputTensorInfoFromInputs() 898 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex]; in OutputTensorInfoFromShapes() 899 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; in OutputTensorInfoFromShapes() 917 m_Model = nullptr; in ResetParser() 928 m_Model = LoadModelFromFile(graphFile); in CreateNetworkFromBinaryFile() [all …]
|
/external/android-nn-driver/ |
D | ModelToINetworkConverter.cpp | 26 , m_Model(model) in ModelToINetworkConverter() 48 ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str()); in Convert() 53 if (!setRunTimePoolInfosFromHidlMemories(&m_Data.m_MemPools, m_Model.pools)) in Convert() 55 …if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, uncheckedConvert(m_Model.pools))) in Convert() 65 for (auto&& pool : m_Model.pools) in Convert() 85 …m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(getMainModel(m_Model).operands.si… in Convert() 89 for (uint32_t i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++) in Convert() 93 uint32_t inputIndex = getMainModel(m_Model).inputIndexes[i]; in Convert() 95 const HalOperand& operand = getMainModel(m_Model).operands[inputIndex]; in Convert() 122 …for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationI… in Convert() [all …]
|
D | ArmnnPreparedModel.hpp | 74 m_Model(model), in ArmnnThreadPoolCallback() 83 ArmnnPreparedModel<HalVersion>* m_Model; member in armnn_driver::ArmnnPreparedModel::ArmnnThreadPoolCallback 103 HalModel m_Model; member in armnn_driver::ArmnnPreparedModel
|
D | ArmnnPreparedModel.cpp | 127 , m_Model(model) in ArmnnPreparedModel() 186 ALOGV("ArmnnPreparedModel::execute(): %s", GetModelSummary(m_Model).c_str()); in execute() 194 if (!android::nn::validateRequest(request, m_Model)) in execute() 402 for (unsigned int i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++) in ExecuteWithDummyInputs() 416 for (unsigned int i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++) in ExecuteWithDummyInputs() 506 m_Model->DumpTensorsIfRequired("Output", *m_OutputTensors); in Notify()
|
D | RequestThread.hpp | 58 : m_Model(model) in AsyncExecuteData() 66 PreparedModel<HalVersion>* m_Model; member
|
D | RequestThread_1_3.hpp | 57 : m_Model(model) in AsyncExecuteData() 65 PreparedModel<HalVersion>* m_Model; member
|
D | ArmnnPreparedModel_1_2.hpp | 105 m_Model(model), in ArmnnThreadPoolCallback_1_2() 115 ArmnnPreparedModel_1_2<HalVersion>* m_Model; member in armnn_driver::ArmnnPreparedModel_1_2::ArmnnThreadPoolCallback_1_2 158 V1_2::Model m_Model; member in armnn_driver::ArmnnPreparedModel_1_2
|
D | ArmnnPreparedModel_1_3.cpp | 183 , m_Model(model) in ArmnnPreparedModel_1_3() 403 …if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model, /*allowUnspecifiedOutp… in executeFenced() 419 ALOGV("ArmnnPreparedModel_1_3::executeFenced(): %s", GetModelSummary(m_Model).c_str()); in executeFenced() 678 if (!m_PreparedFromCache && !android::nn::validateRequest(convertToV1_3(request), m_Model)) in ExecuteSynchronously() 688 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model)) in ExecuteSynchronously() 727 … ALOGV("ArmnnPreparedModel_1_3::executeSynchronously(): %s", GetModelSummary(m_Model).c_str()); in executeSynchronously() 763 … ALOGV("ArmnnPreparedModel_1_3::executeSynchronously_1_3(): %s", GetModelSummary(m_Model).c_str()); in executeSynchronously_1_3() 1024 ALOGV("ArmnnPreparedModel_1_3::execute(): %s", GetModelSummary(m_Model).c_str()); in Execute() 1028 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model)) in Execute() 1124 m_Model->DumpTensorsIfRequired("Output", *m_OutputTensors); in Notify()
|
D | ArmnnPreparedModel_1_3.hpp | 140 m_Model(model), in ArmnnThreadPoolCallback_1_3() 150 ArmnnPreparedModel_1_3<HalVersion>* m_Model; member in armnn_driver::ArmnnPreparedModel_1_3::ArmnnThreadPoolCallback_1_3 193 V1_3::Model m_Model; member in armnn_driver::ArmnnPreparedModel_1_3
|
D | ArmnnPreparedModel_1_2.cpp | 162 , m_Model(model) in ArmnnPreparedModel_1_2() 465 … ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str()); in executeSynchronously() 482 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model)) in executeSynchronously() 684 ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str()); in Execute() 688 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model)) in Execute() 813 m_Model->DumpTensorsIfRequired("Output", *m_OutputTensors); in Notify()
|
D | ModelToINetworkConverter.hpp | 52 const HalModel& m_Model; member in armnn_driver::ModelToINetworkConverter
|
/external/armnn/profiling/server/src/timelineDecoder/ |
D | TimelineDecoder.cpp | 160 if (m_Model.m_Labels.empty() && m_Model.m_Entities.empty() && m_Model.m_EventClasses.empty() && in print() 161 m_Model.m_Events.empty() && m_Model.m_Relationships.empty()) in print() 189 for (uint32_t i = 0; i < m_Model.m_Labels.size(); ++i) in printLabels() 193 … body.append(arm::pipe::CentreAlignFormatting(std::to_string(m_Model.m_Labels[i].m_Guid), 12)); in printLabels() 195 body.append(arm::pipe::CentreAlignFormatting(m_Model.m_Labels[i].m_Name, 30)); in printLabels() 215 for (uint32_t i = 0; i < m_Model.m_Entities.size(); ++i) in printEntities() 219 … body.append(arm::pipe::CentreAlignFormatting(std::to_string(m_Model.m_Entities[i].m_Guid), 12)); in printEntities() 239 for (uint32_t i = 0; i < m_Model.m_EventClasses.size(); ++i) in printEventClasses() 243 …body.append(arm::pipe::CentreAlignFormatting(std::to_string(m_Model.m_EventClasses[i].m_Guid), 12)… in printEventClasses() 268 for (uint32_t i = 0; i < m_Model.m_Events.size(); ++i) in printEvents() [all …]
|
/external/armnn/shim/sl/canonical/ |
D | ModelToINetworkTransformer.cpp | 23 , m_Model(model) in ModelToINetworkTransformer() 46 if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, m_Model.pools)) in Convert() 68 …m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.main.operands.size(), nul… in Convert() 72 for (uint32_t i = 0; i < m_Model.main.inputIndexes.size(); i++) in Convert() 76 uint32_t inputIndex = m_Model.main.inputIndexes[i]; in Convert() 78 const Operand& operand = m_Model.main.operands[inputIndex]; in Convert() 107 for (uint32_t operationIdx = 0; operationIdx < m_Model.main.operations.size(); operationIdx++) in Convert() 109 const auto& operation = m_Model.main.operations[operationIdx]; in Convert() 122 ok = Converter::ConvertOperation(operation, m_Model, m_Data); in Convert() 174 for (uint32_t i = 0; i < m_Model.main.outputIndexes.size(); i++) in Convert() [all …]
|
D | ArmnnPreparedModel.cpp | 134 , m_Model(model) in ArmnnPreparedModel() 314 const auto modelRequest = validateRequestForModel(request, m_Model); in execute() 319 VLOG(DRIVER) << "ArmnnPreparedModel::execute(): " << GetModelSummary(m_Model).c_str(); in execute() 491 const auto modelRequest = validateRequestForModel(request, m_Model); in executeFenced() 496 VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced(): " << GetModelSummary(m_Model).c_str(); in executeFenced() 611 return &m_Model; in getUnderlyingResource()
|
D | ModelToINetworkTransformer.hpp | 51 const Model& m_Model; member in armnn_driver::ModelToINetworkTransformer
|
/external/armnn/tests/ |
D | MobileNetSsdInferenceTest.hpp | 175 m_Model = m_ConstructModel(commonOptions, m_ModelCommandLineOptions); in ProcessCommandLineOptions() 176 if (!m_Model) in ProcessCommandLineOptions() 180 std::pair<float, int32_t> qParams = m_Model->GetInputQuantizationParams(); in ProcessCommandLineOptions() 198 return std::make_unique<MobileNetSsdTestCase<Model>>(*m_Model, testCaseId, *testCaseData); in GetTestCase() 205 std::unique_ptr<Model> m_Model; member in __anon034aa0710111::MobileNetSsdTestCaseProvider
|
D | DeepSpeechV1InferenceTest.hpp | 155 m_Model = m_ConstructModel(commonOptions, m_ModelCommandLineOptions); in ProcessCommandLineOptions() 156 if (!m_Model) in ProcessCommandLineOptions() 179 return std::make_unique<DeepSpeechV1TestCase<Model>>(*m_Model, testCaseId, *testCaseData); in GetTestCase() 186 std::unique_ptr<Model> m_Model; member in __anondf4298250111::DeepSpeechV1TestCaseProvider
|
D | InferenceTest.hpp | 120 : m_Model(model) in InferenceModelTestCase() 136 m_Model.Run(m_Inputs, m_Outputs); in Run() 144 TModel& m_Model; member in armnn::test::InferenceModelTestCase 196 std::unique_ptr<InferenceModel> m_Model; member in armnn::test::ClassifierTestCaseProvider
|
D | YoloInferenceTest.hpp | 203 m_Model = m_ConstructModel(commonOptions, m_ModelCommandLineOptions); in ProcessCommandLineOptions() 204 if (!m_Model) in ProcessCommandLineOptions() 226 return std::make_unique<YoloTestCase<Model>>(*m_Model, testCaseId, *testCaseData); in GetTestCase() 233 std::unique_ptr<Model> m_Model; member in YoloTestCaseProvider
|
D | InferenceTest.inl | 222 m_Model = m_ConstructModel(commonOptions, m_ModelCommandLineOptions); 223 if (!m_Model) 228 m_Database = std::make_unique<TDatabase>(m_ConstructDatabase(m_DataDir.c_str(), *m_Model)); 252 *m_Model,
|
/external/armnn/profiling/server/include/timelineDecoder/ |
D | TimelineDecoder.hpp | 48 return f(m_Model); in ApplyToModel() 62 Model m_Model; member in arm::pipe::TimelineDecoder
|
/external/armnn/tests/ExecuteNetwork/ |
D | TfliteExecutor.cpp | 12 m_Model = tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str()); in TfLiteExecutor() 13 if (!m_Model) in TfLiteExecutor() 20 tflite::InterpreterBuilder builder(*m_Model, resolver); in TfLiteExecutor()
|
D | TfliteExecutor.hpp | 31 std::unique_ptr<tflite::FlatBufferModel> m_Model; member in TfLiteExecutor
|