• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonWorkloadFactoryHelper.hpp"
7 
8 #include <Graph.hpp>
9 #include <Network.hpp>
10 
11 #include <neon/NeonWorkloadFactory.hpp>
12 
13 #include <boost/test/unit_test.hpp>
14 
15 BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
16 
BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)17 BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
18 {
19     // build up the structure of the network
20     armnn::INetworkPtr net(armnn::INetwork::Create());
21 
22     armnn::IConnectableLayer* input  = net->AddInputLayer(0);
23     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
24 
25     input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
26     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
27 
28     armnn::IRuntime::CreationOptions options;
29     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
30 
31     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
32     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
33     BOOST_CHECK(optNet);
34     // validate workloads
35     armnn::NeonWorkloadFactory fact =
36         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
37 
38     for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
39     {
40         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
41         BOOST_CHECK_NO_THROW(
42             layer->CreateWorkload(fact));
43     }
44 }
45 
BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)46 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
47 {
48     // build up the structure of the network
49     armnn::INetworkPtr net(armnn::INetwork::Create());
50 
51     armnn::IConnectableLayer* input = net->AddInputLayer(0);
52 
53     // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
54     armnn::NormalizationDescriptor descriptor;
55     armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
56 
57     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
58 
59     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
60     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
61 
62     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
63     normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
64 
65     armnn::IRuntime::CreationOptions options;
66     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
67 
68     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
69     std::vector<std::string> errMessages;
70 
71     try
72     {
73         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
74         BOOST_FAIL("Should have thrown an exception.");
75     }
76     catch (const armnn::InvalidArgumentException& e)
77     {
78         // Different exceptions are thrown on different backends
79     }
80     BOOST_CHECK(errMessages.size() > 0);
81 }
82 
BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)83 BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
84 {
85     armnn::INetworkPtr net(armnn::INetwork::Create());
86 
87     armnn::IConnectableLayer* input  = net->AddInputLayer(0);
88     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
89 
90     input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
91     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
92 
93     armnn::IRuntime::CreationOptions options;
94     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
95 
96     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
97     armnn::OptimizerOptions optimizerOptions;
98     armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
99     optimizerOptions.m_ModelOptions.push_back(modelOptions);
100 
101     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
102     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
103 
104     BOOST_CHECK(optimizedNet);
105 
106     auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
107 
108     BOOST_TEST(modelOptionsOut.size() == 1);
109     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
110     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
111 }
112 
113 BOOST_AUTO_TEST_SUITE_END()