• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConvertFp16ToFp32Layer.hpp"
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
11 #include <armnn/backends/WorkloadData.hpp>
12 #include <armnn/backends/WorkloadFactory.hpp>
13 
14 namespace armnn
15 {
16 
ConvertFp16ToFp32Layer(const char * name)17 ConvertFp16ToFp32Layer::ConvertFp16ToFp32Layer(const char* name)
18     : Layer(1, 1, LayerType::ConvertFp16ToFp32, name)
19 {
20 }
21 
CreateWorkload(const IWorkloadFactory & factory) const22 std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
24     ConvertFp16ToFp32QueueDescriptor descriptor;
25     SetAdditionalInfo(descriptor);
26 
27     return factory.CreateWorkload(LayerType::ConvertFp16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
28 }
29 
Clone(Graph & graph) const30 ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
31 {
32     return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName());
33 }
34 
ValidateTensorShapesFromInputs()35 void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
36 {
37     VerifyLayerConnections(1, CHECK_LOCATION());
38 
39     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
40 
41     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
42 
43     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
44 
45     ARMNN_ASSERT(inferredShapes.size() == 1);
46 
47     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
48 }
49 
ExecuteStrategy(IStrategy & strategy) const50 void ConvertFp16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
51 {
52     strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
53 }
54 
55 } // namespace armnn
56