• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <GraphUtils.hpp>
7 #include <TestUtils.hpp>
8 
9 #include <armnn/INetwork.hpp>
10 
11 #include <doctest/doctest.h>
12 
13 using namespace armnn;
14 
15 namespace
16 {
17 #if defined(ARMNNREF_ENABLED)||defined(ARMCOMPUTECL_ENABLED)
FoldPadIntoQuantizedAvgPoolTest(Compute backendId)18 void FoldPadIntoQuantizedAvgPoolTest(Compute backendId)
19 {
20     // Create a network
21     INetworkPtr network = INetwork::Create();
22 
23     const unsigned int inputShape[]  = {1, 2, 2, 3};
24     const unsigned int paddedShape[] = {1, 4, 4, 3};
25     const unsigned int outputShape[] = {1, 2, 2, 3};
26 
27     TensorInfo inputInfo(4, inputShape, DataType::QAsymmU8, 1.0f, 0.0f);
28     TensorInfo paddedInfo(4, paddedShape, DataType::QAsymmU8, 1.0f, 0.0f);
29     TensorInfo outputInfo(4, outputShape, DataType::QAsymmU8, 1.0f, 0.0f);
30 
31     IConnectableLayer* input = network->AddInputLayer(0, "input");
32     input->GetOutputSlot(0).SetTensorInfo(inputInfo);
33 
34     PadDescriptor padDescriptor({{0, 0},
35                                  {1, 1},
36                                  {1, 1},
37                                  {0, 0}});
38 
39     IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "pad");
40     padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
41 
42     Pooling2dDescriptor pooling2dDescriptor;
43     pooling2dDescriptor.m_PoolType   = PoolingAlgorithm::Average;
44     pooling2dDescriptor.m_PoolWidth  = 3;
45     pooling2dDescriptor.m_PoolHeight = 3;
46     pooling2dDescriptor.m_StrideX    = 1;
47     pooling2dDescriptor.m_StrideY    = 1;
48     pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
49 
50     IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "pool2d");
51     pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
52 
53     IConnectableLayer* output = network->AddOutputLayer(0, "output");
54 
55     // Connect up layers - input -> pad -> pool2d -> output
56     input->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
57     padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0));
58     pool2dLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
59 
60     // Create ArmNN runtime
61     IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions());
62 
63     // Optimise ArmNN network
64     IOptimizedNetworkPtr optNet = Optimize(*network, {backendId}, run->GetDeviceSpec());
65 
66     auto checkPadFoldedIntoPool2d = [&](const Layer* const layer) {
67         if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
68         {
69             return false;
70         }
71 
72         const auto                pool2dLayer       = static_cast<const Pooling2dLayer*>(layer);
73         const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
74 
75         Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
76         pool2dLayerParamsNoPad.m_PadLeft       = 0;
77         pool2dLayerParamsNoPad.m_PadRight      = 0;
78         pool2dLayerParamsNoPad.m_PadTop        = 0;
79         pool2dLayerParamsNoPad.m_PadBottom     = 0;
80         // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude.
81         pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude;
82 
83         return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) &&
84             (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) &&
85             (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
86     };
87 
88     Graph& graph = GetGraphForTesting(optNet.get());
89     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
90                         &IsLayerOfType<InputLayer>,
91                         checkPadFoldedIntoPool2d,
92                         &IsLayerOfType<OutputLayer>));
93 }
94 #endif
95 }
96 
97 
98 #if defined(ARMNNREF_ENABLED)
99 TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolCpuRef")
100 {
101 TEST_CASE("FoldPadIntoQuantizedAvgPoolCpuRefTest")
102 {
103     FoldPadIntoQuantizedAvgPoolTest(Compute::CpuRef);
104 }
105 }
106 #endif
107 
108 #if defined(ARMCOMPUTECL_ENABLED)
109 TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolGpuAcc")
110 {
111 TEST_CASE("FoldPadIntoQuantizedAvgPoolGpuAccTest")
112 {
113     FoldPadIntoQuantizedAvgPoolTest(Compute::GpuAcc);
114 }
115 }
116 #endif
117