• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "../TestUtils.hpp"
7 
8 #include <Optimizer.hpp>
9 
10 #include <boost/test/unit_test.hpp>
11 
12 BOOST_AUTO_TEST_SUITE(Optimizer)
13 using namespace armnn::optimizations;
14 
BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)15 BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
16 {
17     armnn::Graph graph;
18 
19     const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
20 
21     // Create the simple test network without Conv2D/FullyConnected.
22     auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
23     input->GetOutputSlot().SetTensorInfo(infoFP32);
24 
25     auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
26     floor->GetOutputSlot().SetTensorInfo(infoFP32);
27 
28     auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
29 
30     // Connect up the layers
31     input->GetOutputSlot().Connect(floor->GetInputSlot(0));
32     floor->GetOutputSlot().Connect(output->GetInputSlot(0));
33 
34     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
35                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
36 
37     // Run the optimizer
38     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
39 
40     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
41                              &IsLayerOfType<armnn::FloorLayer>,
42                              &IsLayerOfType<armnn::OutputLayer>));
43 }
44 
BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)45 BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
46 {
47     armnn::Graph graph;
48 
49     const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
50 
51     // Create const tensor fp32 data
52     unsigned int dims[] = { 4, 2, 1, 1 };
53     std::vector<float> floatWeights{ 0.0f, -1.0f,
54                                      3.8f, // 0x40733333 Round down
55                                      3.1055E+29f, // 0x707ADC3C Round up
56                                      9.149516E-10f, // 0x307B7FFF Round down
57                                     -3.8f, // 0xC0733333 Round down
58                                     -3.1055E+29f, // 0xF07ADC3C Round up
59                                     -9.149516E-10f // 0xB07B7FFF Round down
60                                    };
61     armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
62 
63     // Create const bias fp32 data
64     unsigned int biasDims[] {4};
65     std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
66     armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
67 
68     // A network with Convolution2d layer
69     auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
70     input->GetOutputSlot().SetTensorInfo(infoFP32);
71 
72     armnn::Convolution2dDescriptor descriptor;
73 
74     auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
75     conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
76     conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
77     conv->GetOutputSlot().SetTensorInfo(infoFP32);
78 
79     auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
80 
81     // Connect up the layers
82     input->GetOutputSlot().Connect(conv->GetInputSlot(0));
83     conv->GetOutputSlot().Connect(output->GetInputSlot(0));
84 
85     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
86                              &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
87 
88     // Run the optimizer
89     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
90 
91     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
92                              &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
93                              &IsLayerOfType<armnn::OutputLayer>));
94 
95     armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
96     armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
97     BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16));
98     BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
99     BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
100     BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
101     BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
102 
103     // Check whether data matches expected Bf16 data
104     armnn::BFloat16* data = conv->m_Weight->GetTensor<armnn::BFloat16>();
105     BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
106     BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
107     BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
108     BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
109     BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
110     BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
111     BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
112     BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
113 }
114 
BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)115 BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
116 {
117     armnn::Graph graph;
118 
119     const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
120 
121     // Create const tensor fp32 data
122     unsigned int dims[] = { 4, 2, 1, 1 };
123     std::vector<float> floatWeights{ 0.0f, -1.0f,
124                                      3.8f, // 0x40733333 Round down
125                                      3.1055E+29f, // 0x707ADC3C Round up
126                                      9.149516E-10f, // 0x307B7FFF Round down
127                                     -3.8f, // 0xC0733333 Round down
128                                     -3.1055E+29f, // 0xF07ADC3C Round up
129                                     -9.149516E-10f // 0xB07B7FFF Round down
130                                    };
131     armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
132 
133     // Create const bias fp32 data
134     unsigned int biasDims[] {4};
135     std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
136     armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
137 
138     // A network with FullyConnected layer
139     auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
140     input->GetOutputSlot().SetTensorInfo(infoFP32);
141 
142     armnn::FullyConnectedDescriptor descriptor;
143 
144     auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
145     fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
146     fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
147     fc->GetOutputSlot().SetTensorInfo(infoFP32);
148 
149     auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
150 
151     // Connect up the layers
152     input->GetOutputSlot().Connect(fc->GetInputSlot(0));
153     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
154 
155     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
156                              &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
157 
158     // Run the optimizer
159     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
160 
161     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
162                              &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
163                              &IsLayerOfType<armnn::OutputLayer>));
164 
165     armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
166     armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
167     BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16));
168     BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
169     BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
170     BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
171     BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
172 
173     // Check whether data matches expected Bf16 data
174     armnn::BFloat16* data = fc->m_Weight->GetTensor<armnn::BFloat16>();
175     BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
176     BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
177     BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
178     BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
179     BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
180     BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
181     BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
182     BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
183 }
184 
185 BOOST_AUTO_TEST_SUITE_END()