• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClWorkloadFactoryHelper.hpp"
7 
8 #include <Network.hpp>
9 
10 #include <test/GraphUtils.hpp>
11 
12 #include <cl/ClWorkloadFactory.hpp>
13 
14 #include <boost/test/unit_test.hpp>
15 
16 BOOST_AUTO_TEST_SUITE(ClOptimizedNetwork)
17 
BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)18 BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
19 {
20     // build up the structure of the network
21     armnn::INetworkPtr net(armnn::INetwork::Create());
22 
23     armnn::IConnectableLayer* input  = net->AddInputLayer(0);
24     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
25 
26     input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
27     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
28 
29     armnn::IRuntime::CreationOptions options;
30     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
31 
32     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
33     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
34     BOOST_CHECK(optNet);
35     // validate workloads
36     armnn::ClWorkloadFactory fact =
37         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
38     for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
39     {
40         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
41         BOOST_CHECK_NO_THROW(
42             layer->CreateWorkload(fact));
43     }
44 }
45 
BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)46 BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
47 {
48     // Test to check when Fp16 Turbo mode set
49     // it converts the Fp32 network to Fp16 Network
50     // add Fp32ToFp16 conversion layer after the InputLayer
51     // add Fp16ToFp32 conversion layer after the OutputLayer
52     // checks the other layers if they are supported in Fp16
53     // if they are not put the conversion layers before and after
54     // if they are not supported in Fp16 use Fp32 instead
55     // if there are inverse conversion layers remove them with optimization
56     // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
57     // and inverse conversion layers are removed by the optimizer
58     armnn::Network net;
59 
60     // Defines layers.
61     auto input = net.AddInputLayer(0, "input layer");
62     // ReLu1
63     armnn::ActivationDescriptor activation1Descriptor;
64     activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
65     activation1Descriptor.m_A = 1.f;
66     activation1Descriptor.m_B = -1.f;
67     auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
68     auto output = net.AddOutputLayer(0, "output layer");
69 
70     // Connects layers.
71     input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
72     activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
73 
74     armnn::TensorShape shape({4});
75     armnn::TensorInfo info(shape, armnn::DataType::Float32);
76     input->GetOutputSlot(0).SetTensorInfo(info);
77     activation->GetOutputSlot(0).SetTensorInfo(info);
78 
79     armnn::IRuntime::CreationOptions options;
80     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
81 
82     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
83 
84     armnn::OptimizerOptions optimizerOptions;
85     optimizerOptions.m_ReduceFp32ToFp16 = true;
86 
87     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
88             net, backends, runtime->GetDeviceSpec(), optimizerOptions);
89 
90     const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
91 
92     // Tests that all layers are present in the graph.
93     BOOST_TEST(graph.GetNumLayers() == 5);
94 
95     // Tests that the vertices exist and have correct names.
96     BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
97     BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
98     BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
99     BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
100     BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
101 }
102 
BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)103 BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
104 {
105     armnn::INetworkPtr net(armnn::INetwork::Create());
106 
107     armnn::IConnectableLayer* input  = net->AddInputLayer(0);
108     armnn::IConnectableLayer* output = net->AddOutputLayer(0);
109 
110     input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
111     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
112 
113     armnn::IRuntime::CreationOptions options;
114     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
115 
116     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
117     armnn::OptimizerOptions optimizerOptions;
118     armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
119     optimizerOptions.m_ModelOptions.push_back(modelOptions);
120 
121     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
122     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
123 
124     BOOST_CHECK(optimizedNet);
125 
126     auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
127 
128     BOOST_TEST(modelOptionsOut.size() == 1);
129     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
130     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
131 }
132 
133 BOOST_AUTO_TEST_SUITE_END();
134