• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph/Utils.h"
25 
26 #include "arm_compute/graph/GraphContext.h"
27 #include "arm_compute/graph/backends/BackendRegistry.h"
28 #include "arm_compute/graph/mutators/GraphMutators.h"
29 
30 namespace arm_compute
31 {
32 namespace graph
33 {
is_target_supported(Target target)34 bool is_target_supported(Target target)
35 {
36     return backends::BackendRegistry::get().contains(target) && backends::BackendRegistry::get().find_backend(target)->is_backend_supported();
37 }
38 
get_default_target()39 Target get_default_target()
40 {
41     if(is_target_supported(Target::NEON))
42     {
43         return Target::NEON;
44     }
45     if(is_target_supported(Target::CL))
46     {
47         return Target::CL;
48     }
49     if(is_target_supported(Target::GC))
50     {
51         return Target::GC;
52     }
53     ARM_COMPUTE_ERROR("No backend exists!");
54 }
55 
force_target_to_graph(Graph & g,Target target)56 void force_target_to_graph(Graph &g, Target target)
57 {
58     auto &nodes = g.nodes();
59     for(auto &node : nodes)
60     {
61         if(node)
62         {
63             node->set_assigned_target(target);
64         }
65     }
66 
67     auto &tensors = g.tensors();
68     for(auto &tensor : tensors)
69     {
70         if(tensor)
71         {
72             tensor->desc().target = target;
73         }
74     }
75 }
76 
create_default_pass_manager(Target target,const GraphConfig & cfg)77 PassManager create_default_pass_manager(Target target, const GraphConfig &cfg)
78 {
79     PassManager pm;
80 
81     const bool is_target_gc = target == Target::GC;
82 
83     // Passes that mutate graph IR
84     if(cfg.convert_to_uint8)
85     {
86         pm.append(support::cpp14::make_unique<SyntheticDataTypeMutator>(), !is_target_gc);
87     }
88     pm.append(support::cpp14::make_unique<NodeFusionMutator>(), !is_target_gc);
89     pm.append(support::cpp14::make_unique<GroupedConvolutionMutator>());
90     pm.append(support::cpp14::make_unique<InPlaceOperationMutator>(), !is_target_gc);
91 
92     // Passes that mutate backend information
93     pm.append(support::cpp14::make_unique<DepthConcatSubTensorMutator>(), !is_target_gc);
94     pm.append(support::cpp14::make_unique<SplitLayerSubTensorMutator>(), !is_target_gc);
95     pm.append(support::cpp14::make_unique<NodeExecutionMethodMutator>());
96 
97     return pm;
98 }
99 
release_default_graph_context(GraphContext & ctx)100 void release_default_graph_context(GraphContext &ctx)
101 {
102     for(const auto &backend : backends::BackendRegistry::get().backends())
103     {
104         if(backend.second->is_backend_supported())
105         {
106             backend.second->release_backend_context(ctx);
107         }
108     }
109 }
110 
setup_requested_backend_context(GraphContext & ctx,Target target)111 void setup_requested_backend_context(GraphContext &ctx, Target target)
112 {
113     if(backends::BackendRegistry::get().contains(target))
114     {
115         const auto &backend = backends::BackendRegistry::get().find_backend(target);
116         if(backend->is_backend_supported())
117         {
118             backend->setup_backend_context(ctx);
119         }
120     }
121 }
122 
get_dimension_size(const TensorDescriptor & descriptor,const DataLayoutDimension data_layout_dimension)123 size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
124 {
125     ARM_COMPUTE_ERROR_ON_MSG(descriptor.layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
126     return descriptor.shape[get_dimension_idx(descriptor.layout, data_layout_dimension)];
127 }
128 
get_dimension_idx(DataLayout data_layout,const DataLayoutDimension data_layout_dimension)129 size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
130 {
131     ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
132 
133     /* Return the index based on the data layout
134      * [N C H W]
135      * [3 2 1 0]
136      * [N H W C]
137      */
138     switch(data_layout_dimension)
139     {
140         case DataLayoutDimension::CHANNEL:
141             return (data_layout == DataLayout::NCHW) ? 2 : 0;
142             break;
143         case DataLayoutDimension::HEIGHT:
144             return (data_layout == DataLayout::NCHW) ? 1 : 2;
145             break;
146         case DataLayoutDimension::WIDTH:
147             return (data_layout == DataLayout::NCHW) ? 0 : 1;
148             break;
149         case DataLayoutDimension::BATCHES:
150             return 3;
151             break;
152         default:
153             break;
154     }
155     ARM_COMPUTE_ERROR("Data layout index not supported!");
156 }
157 
get_driving_nodes(const INode & node)158 std::vector<NodeIdxPair> get_driving_nodes(const INode &node)
159 {
160     std::vector<NodeIdxPair> driving_nodes;
161 
162     const Graph *g = node.graph();
163     ARM_COMPUTE_ERROR_ON(g == nullptr);
164 
165     for(auto &output_edge_id : node.output_edges())
166     {
167         auto output_edge = g->edge(output_edge_id);
168         if(output_edge != nullptr)
169         {
170             ARM_COMPUTE_ERROR_ON(output_edge->consumer() == nullptr);
171             driving_nodes.push_back({ output_edge->consumer_id(), output_edge->consumer_idx() });
172         }
173     }
174 
175     return driving_nodes;
176 }
177 
configure_tensor(Tensor * tensor)178 void configure_tensor(Tensor *tensor)
179 {
180     if(tensor != nullptr && tensor->handle() == nullptr)
181     {
182         Target                         target  = tensor->desc().target;
183         backends::IDeviceBackend      &backend = backends::BackendRegistry::get().get_backend(target);
184         std::unique_ptr<ITensorHandle> handle  = backend.create_tensor(*tensor);
185         ARM_COMPUTE_ERROR_ON_MSG(!handle, "Couldn't create backend handle!");
186         tensor->set_handle(std::move(handle));
187     }
188 }
189 } // namespace graph
190 } // namespace arm_compute
191