• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 
7 #include <Graph.hpp>
8 #include <Network.hpp>
9 
10 #include <reference/RefWorkloadFactory.hpp>
11 
12 #include <boost/test/unit_test.hpp>
13 
14 BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
15 
BOOST_AUTO_TEST_CASE(SerializeToDot)16 BOOST_AUTO_TEST_CASE(SerializeToDot)
17 {
18     armnn::Network net;
19 
20     //Defines layers.
21     auto input = net.AddInputLayer(0);
22     auto add = net.AddAdditionLayer();
23     auto output = net.AddOutputLayer(0);
24 
25     // Connects layers.
26     input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
27     input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
28     add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
29 
30     armnn::TensorShape shape({4});
31     armnn::TensorInfo info(shape, armnn::DataType::Float32);
32     input->GetOutputSlot(0).SetTensorInfo(info);
33     add->GetOutputSlot(0).SetTensorInfo(info);
34 
35     armnn::IRuntime::CreationOptions options;
36     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
37 
38     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
39     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
40 
41     std::ostringstream ss;
42     optimizedNet->SerializeToDot(ss);
43 
44     auto inputId = input->GetGuid();
45     auto addId = add->GetGuid();
46     auto outputId = output->GetGuid();
47 
48     std::stringstream expected;
49     expected <<
50         "digraph Optimized {\n"
51         "    node [shape=\"record\"];\n"
52         "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
53         "    " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
54                              "BackendID : CpuRef\\l}\"];\n"
55         "    " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
56                            "BackendID : CpuRef\\l}\"];\n"
57         "    " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
58                               "BackendID : CpuRef\\l}\"];\n"
59         "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
60         "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
61         "    " << addId << " -> " << outputId << " [label=< [4] >];\n"
62         "}\n";
63 
64     BOOST_TEST(ss.str() == expected.str());
65 }
66 
BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)67 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
68 {
69     // build up the structure of the network
70     armnn::INetworkPtr net(armnn::INetwork::Create());
71 
72     armnn::IConnectableLayer* input = net->AddInputLayer(0);
73 
74     // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
75     armnn::NormalizationDescriptor descriptor;
76     armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
77 
78     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
79 
80     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
81     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
82 
83     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
84     normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
85 
86     armnn::IRuntime::CreationOptions options;
87     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
88 
89     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
90     std::vector<std::string> errMessages;
91 
92     try
93     {
94         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
95         BOOST_FAIL("Should have thrown an exception.");
96     }
97     catch (const armnn::InvalidArgumentException& e)
98     {
99         // Different exceptions are thrown on different backends
100     }
101     BOOST_CHECK(errMessages.size() > 0);
102 }
103 
BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)104 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
105 {
106     // build up the structure of the network
107     armnn::INetworkPtr net(armnn::INetwork::Create());
108 
109     armnn::IConnectableLayer* input = net->AddInputLayer(0);
110 
111     // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
112     armnn::NormalizationDescriptor descriptor;
113     armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
114 
115     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
116 
117     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
118     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
119 
120     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
121     normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
122 
123     armnn::IRuntime::CreationOptions options;
124     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
125 
126     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
127     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
128     BOOST_REQUIRE(optNet);
129 
130     for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
131     {
132         // If NEON is enabled, Input and Output layers are supported by CpuAcc,
133         // the other layers are supported by CpuRef.
134         // If NEON is not enabled, all layers are supported by CpuRef.
135 #if defined(ARMCOMPUTENEON_ENABLED)
136         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
137         {
138             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
139         }
140         else if (layer->GetType() == armnn::LayerType::Normalization)
141         {
142             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
143         }
144 #else
145         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
146 #endif
147     }
148 }
149 
BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)150 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
151 {
152     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
153 
154     armnn::Network  net;
155 
156     armnn::NormalizationDescriptor nmDesc;
157     armnn::ActivationDescriptor acDesc;
158 
159     //    in
160     //     |
161     //    nm
162     //   /  |
163     //  ac  |
164     //   \  |
165     //    ml
166     //     |
167     //    sm
168     //     |
169     //    ot
170     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
171     layer->GetOutputSlot(0).SetTensorInfo(desc);
172 
173     armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
174 
175     layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
176     normLayer->GetOutputSlot(0).SetTensorInfo(desc);
177 
178     layer = net.AddActivationLayer(acDesc, "ac");
179 
180     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
181     layer->GetOutputSlot(0).SetTensorInfo(desc);
182 
183     armnn::IConnectableLayer* prevLayer = layer;
184     layer = net.AddMultiplicationLayer("ml");
185 
186     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
187     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
188     layer->GetOutputSlot(0).SetTensorInfo(desc);
189 
190     prevLayer = layer;
191     armnn::SoftmaxDescriptor softmaxDescriptor;
192     layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
193 
194     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
195     layer->GetOutputSlot(0).SetTensorInfo(desc);
196 
197     prevLayer = layer;
198     layer = net.AddOutputLayer(0, "ot");
199 
200     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
201 
202     armnn::IRuntime::CreationOptions options;
203     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
204 
205     std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
206     std::vector<std::string> errMessages;
207 
208     try
209     {
210         Optimize(net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
211         BOOST_FAIL("Should have thrown an exception.");
212     }
213     catch (const armnn::InvalidArgumentException& e)
214     {
215         // Different exceptions are thrown on different backends
216     }
217     BOOST_CHECK(errMessages.size() > 0);
218 }
219 
BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)220 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
221 {
222     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
223 
224     armnn::Network  net;
225 
226     armnn::NormalizationDescriptor nmDesc;
227     armnn::ActivationDescriptor acDesc;
228 
229     //    in
230     //     |
231     //    nm
232     //   /  |
233     //  ac  |
234     //   \  |
235     //    ml
236     //     |
237     //    sm
238     //     |
239     //    ot
240     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
241     layer->GetOutputSlot(0).SetTensorInfo(desc);
242 
243     armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
244 
245     layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
246     normLayer->GetOutputSlot(0).SetTensorInfo(desc);
247 
248     layer = net.AddActivationLayer(acDesc, "ac");
249 
250     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
251     layer->GetOutputSlot(0).SetTensorInfo(desc);
252 
253     armnn::IConnectableLayer* prevLayer = layer;
254     layer = net.AddMultiplicationLayer("ml");
255 
256     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
257     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
258     layer->GetOutputSlot(0).SetTensorInfo(desc);
259 
260     prevLayer = layer;
261     armnn::SoftmaxDescriptor softmaxDescriptor;
262     layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
263 
264     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
265     layer->GetOutputSlot(0).SetTensorInfo(desc);
266 
267     prevLayer = layer;
268     layer = net.AddOutputLayer(0, "ot");
269 
270     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
271 
272     armnn::IRuntime::CreationOptions options;
273     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
274 
275     std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
276 
277     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
278     BOOST_CHECK(optNet);
279 
280     // validate workloads
281     armnn::RefWorkloadFactory fact;
282     for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
283     {
284         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
285         BOOST_CHECK_NO_THROW(
286             layer->CreateWorkload(fact));
287     }
288 }
289 
BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)290 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
291 {
292     // build up the structure of the network
293     armnn::INetworkPtr net(armnn::INetwork::Create());
294 
295     armnn::IConnectableLayer* input = net->AddInputLayer(0);
296 
297     // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
298     armnn::NormalizationDescriptor descriptor;
299     armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
300 
301     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
302 
303     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
304     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
305 
306     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
307     normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
308 
309     armnn::IRuntime::CreationOptions options;
310     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
311 
312     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
313                                              armnn::Compute::GpuAcc,
314                                              armnn::Compute::CpuRef };
315 
316     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
317     BOOST_REQUIRE(optNet);
318 
319     for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
320     {
321         // If NEON is enabled, Input and Output layers are supported by CpuAcc,
322         // the other layers are supported by CpuRef.
323         // If only CL is enabled, Input and Output layers are supported by GpuAcc,
324         // the other layers are supported by CpuRef.
325         // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
326 #if defined(ARMCOMPUTENEON_ENABLED)
327         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
328         {
329             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
330         }
331         else if (layer->GetType() == armnn::LayerType::Normalization)
332         {
333             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
334         }
335 #elif defined(ARMCOMPUTECL_ENABLED)
336         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
337         {
338             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
339         }
340         else if (layer->GetType() == armnn::LayerType::Normalization)
341         {
342             BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
343         }
344 #else
345         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
346 #endif
347     }
348 }
349 
350 BOOST_AUTO_TEST_SUITE_END()
351