• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017, 2023 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <armnn/Descriptors.hpp>
7 #include <armnn/IRuntime.hpp>
8 #include <armnn/INetwork.hpp>
9 #include <armnn/Types.hpp>
10 #include <Runtime.hpp>
11 
12 #include <doctest/doctest.h>
13 
14 TEST_SUITE("DebugCallback")
15 {
16 namespace
17 {
18 
19 using namespace armnn;
20 
CreateSimpleNetwork()21 INetworkPtr CreateSimpleNetwork()
22 {
23     INetworkPtr net(INetwork::Create());
24 
25     IConnectableLayer* input = net->AddInputLayer(0, "Input");
26 
27     ActivationDescriptor descriptor;
28     descriptor.m_Function = ActivationFunction::ReLu;
29     IConnectableLayer* activationLayer = net->AddActivationLayer(descriptor, "Activation:ReLu");
30 
31     IConnectableLayer* output = net->AddOutputLayer(0);
32 
33     input->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
34     activationLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
35 
36     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 5 }, DataType::Float32));
37     activationLayer->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 5 }, DataType::Float32));
38 
39     return net;
40 }
41 
42 TEST_CASE("RuntimeRegisterDebugCallback")
43 {
44     INetworkPtr net = CreateSimpleNetwork();
45 
46     IRuntime::CreationOptions options;
47     IRuntimePtr runtime(IRuntime::Create(options));
48 
49     // Optimize the network with debug option
50     OptimizerOptionsOpaque optimizerOptions(false, true);
51     std::vector<BackendId> backends = { "CpuRef" };
52     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
53 
54     NetworkId netId;
55     CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
56 
57     // Set up callback function
58     int callCount = 0;
59     std::vector<TensorShape> tensorShapes;
60     std::vector<unsigned int> slotIndexes;
61     auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
__anon70b418c60202(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor) 62     {
63         armnn::IgnoreUnused(guid);
64         slotIndexes.push_back(slotIndex);
65         tensorShapes.push_back(tensor->GetShape());
66         callCount++;
67     };
68 
69     runtime->RegisterDebugCallback(netId, mockCallback);
70 
71     std::vector<float> inputData({-2, -1, 0, 1, 2});
72     std::vector<float> outputData(5);
73 
74     TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
75     inputTensorInfo.SetConstant(true);
76     InputTensors inputTensors
77     {
78         {0, ConstTensor(inputTensorInfo, inputData.data())}
79     };
80     OutputTensors outputTensors
81     {
82         {0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
83     };
84 
85     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
86 
87     // Check that the callback was called twice
88     CHECK(callCount == 2);
89 
90     // Check that tensor handles passed to callback have correct shapes
91     const std::vector<TensorShape> expectedShapes({TensorShape({1, 1, 1, 5}), TensorShape({1, 1, 1, 5})});
92     CHECK(tensorShapes == expectedShapes);
93 
94     // Check that slot indexes passed to callback are correct
95     const std::vector<unsigned int> expectedSlotIndexes({0, 0});
96     CHECK(slotIndexes == expectedSlotIndexes);
97 }
98 
99 } // anonymous namespace
100 
101 }
102