1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ConvertBf16ToFp32Layer.hpp"
7 #include "LayerCloneBase.hpp"
8
9 #include <armnn/TypesUtils.hpp>
10
11 #include <backendsCommon/WorkloadData.hpp>
12 #include <backendsCommon/WorkloadFactory.hpp>
13
14 namespace armnn
15 {
16
ConvertBf16ToFp32Layer(const char * name)17 ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name)
18 : Layer(1, 1, LayerType::ConvertBf16ToFp32, name)
19 {
20 }
21
CreateWorkload(const IWorkloadFactory & factory) const22 std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
24 ConvertBf16ToFp32QueueDescriptor descriptor;
25 SetAdditionalInfo(descriptor);
26
27 return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
28 }
29
Clone(Graph & graph) const30 ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
31 {
32 return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
33 }
34
ValidateTensorShapesFromInputs()35 void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
36 {
37 VerifyLayerConnections(1, CHECK_LOCATION());
38
39 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
40
41 VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
42
43 auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
44
45 ARMNN_ASSERT(inferredShapes.size() == 1);
46
47 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
48 }
49
Accept(ILayerVisitor & visitor) const50 void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
51 {
52 // these conversion layers are only inserted by the
53 // optimizer and so will never be in an input graph.
54 IgnoreUnused(visitor);
55 throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
56 }
57
58 } // namespace armnn
59