• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph.h"
25 #include "arm_compute/graph/Types.h"
26 #include "support/ToolchainSupport.h"
27 #include "utils/CommonGraphOptions.h"
28 #include "utils/GraphUtils.h"
29 #include "utils/Utils.h"
30 
31 using namespace arm_compute::utils;
32 using namespace arm_compute::graph;
33 using namespace arm_compute::graph::frontend;
34 using namespace arm_compute::graph_utils;
35 
36 /** Example demonstrating how to implement DeepSpeech v0.4.1's network using the Compute Library's graph API */
37 class GraphDeepSpeechExample : public Example
38 {
39 public:
GraphDeepSpeechExample()40     GraphDeepSpeechExample()
41         : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "DeepSpeech v0.4.1")
42     {
43     }
do_setup(int argc,char ** argv)44     bool do_setup(int argc, char **argv) override
45     {
46         // Parse arguments
47         cmd_parser.parse(argc, argv);
48         cmd_parser.validate();
49 
50         // Consume common parameters
51         common_params = consume_common_graph_parameters(common_opts);
52 
53         // Return when help menu is requested
54         if(common_params.help)
55         {
56             cmd_parser.print_help(argv[0]);
57             return false;
58         }
59 
60         // Print parameter values
61         std::cout << common_params << std::endl;
62 
63         // Get trainable parameters data path
64         std::string       data_path  = common_params.data_path;
65         const std::string model_path = "/cnn_data/deepspeech_model/";
66 
67         if(!data_path.empty())
68         {
69             data_path += model_path;
70         }
71 
72         // How many timesteps to process at once, higher values mean more latency
73         // Notice that this corresponds to the number of LSTM cells that will be instantiated
74         const unsigned int n_steps = 16;
75 
76         // ReLU clipping value for non-recurrent layers
77         const float cell_clip = 20.f;
78 
79         // Create input descriptor
80         const TensorShape tensor_shape     = permute_shape(TensorShape(26U, 19U, n_steps, 1U), DataLayout::NHWC, common_params.data_layout);
81         TensorDescriptor  input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
82 
83         // Set weights trained layout
84         const DataLayout weights_layout = DataLayout::NHWC;
85 
86         graph << common_params.target
87               << common_params.fast_math_hint
88               << InputLayer(input_descriptor,
89                             get_weights_accessor(data_path, "input_values_x" + std::to_string(n_steps) + ".npy", weights_layout))
90               .set_name("input_node");
91 
92         if(common_params.data_layout == DataLayout::NCHW)
93         {
94             graph << PermuteLayer(PermutationVector(2U, 0U, 1U), common_params.data_layout).set_name("permute_to_nhwc");
95         }
96 
97         graph << ReshapeLayer(TensorShape(494U, n_steps)).set_name("Reshape_input")
98               // Layer 1
99               << FullyConnectedLayer(
100                   2048U,
101                   get_weights_accessor(data_path, "h1_transpose.npy", weights_layout),
102                   get_weights_accessor(data_path, "MatMul_bias.npy"))
103               .set_name("fc0")
104               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
105               .set_name("Relu")
106               // Layer 2
107               << FullyConnectedLayer(
108                   2048U,
109                   get_weights_accessor(data_path, "h2_transpose.npy", weights_layout),
110                   get_weights_accessor(data_path, "MatMul_1_bias.npy"))
111               .set_name("fc1")
112               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
113               .set_name("Relu_1")
114               // Layer 3
115               << FullyConnectedLayer(
116                   2048U,
117                   get_weights_accessor(data_path, "h3_transpose.npy", weights_layout),
118                   get_weights_accessor(data_path, "MatMul_2_bias.npy"))
119               .set_name("fc2")
120               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
121               .set_name("Relu_2")
122               // Layer 4
123               << ReshapeLayer(TensorShape(2048U, 1U, n_steps)).set_name("Reshape_1");
124 
125         // Unstack Layer (using SplitLayerNode)
126         NodeParams unstack_params = { "unstack", graph.hints().target_hint };
127         NodeID     unstack_nid    = GraphBuilder::add_split_node(graph.graph(), unstack_params, { graph.tail_node(), 0 }, n_steps, 2);
128 
129         // Create input state descriptor
130         TensorDescriptor state_descriptor = TensorDescriptor(TensorShape(2048U), common_params.data_type).set_layout(common_params.data_layout);
131         SubStream        previous_state(graph);
132         SubStream        add_y(graph);
133 
134         // Initial state for LSTM is all zeroes for both state_h and state_c, therefore only one input is created
135         previous_state << InputLayer(state_descriptor,
136                                      get_weights_accessor(data_path, "zeros.npy"))
137                        .set_name("previous_state_c_h");
138         add_y << InputLayer(state_descriptor,
139                             get_weights_accessor(data_path, "ones.npy"))
140               .set_name("add_y");
141 
142         // Create LSTM Fully Connected weights and bias descriptors
143         TensorDescriptor lstm_weights_descriptor = TensorDescriptor(TensorShape(4096U, 8192U), common_params.data_type).set_layout(common_params.data_layout);
144         TensorDescriptor lstm_bias_descriptor    = TensorDescriptor(TensorShape(8192U), common_params.data_type).set_layout(common_params.data_layout);
145         SubStream        lstm_fc_weights(graph);
146         SubStream        lstm_fc_bias(graph);
147         lstm_fc_weights << ConstantLayer(lstm_weights_descriptor,
148                                          get_weights_accessor(data_path, "rnn_lstm_cell_kernel_transpose.npy", weights_layout))
149                         .set_name("h5/transpose");
150         lstm_fc_bias << ConstantLayer(lstm_bias_descriptor,
151                                       get_weights_accessor(data_path, "rnn_lstm_cell_MatMul_bias.npy"))
152                      .set_name("MatMul_3_bias");
153 
154         // LSTM Block
155         std::pair<SubStream, SubStream> new_state_1  = add_lstm_cell(unstack_nid, 0, previous_state, previous_state, add_y, lstm_fc_weights, lstm_fc_bias);
156         std::pair<SubStream, SubStream> new_state_2  = add_lstm_cell(unstack_nid, 1, new_state_1.first, new_state_1.second, add_y, lstm_fc_weights, lstm_fc_bias);
157         std::pair<SubStream, SubStream> new_state_3  = add_lstm_cell(unstack_nid, 2, new_state_2.first, new_state_2.second, add_y, lstm_fc_weights, lstm_fc_bias);
158         std::pair<SubStream, SubStream> new_state_4  = add_lstm_cell(unstack_nid, 3, new_state_3.first, new_state_3.second, add_y, lstm_fc_weights, lstm_fc_bias);
159         std::pair<SubStream, SubStream> new_state_5  = add_lstm_cell(unstack_nid, 4, new_state_4.first, new_state_4.second, add_y, lstm_fc_weights, lstm_fc_bias);
160         std::pair<SubStream, SubStream> new_state_6  = add_lstm_cell(unstack_nid, 5, new_state_5.first, new_state_5.second, add_y, lstm_fc_weights, lstm_fc_bias);
161         std::pair<SubStream, SubStream> new_state_7  = add_lstm_cell(unstack_nid, 6, new_state_6.first, new_state_6.second, add_y, lstm_fc_weights, lstm_fc_bias);
162         std::pair<SubStream, SubStream> new_state_8  = add_lstm_cell(unstack_nid, 7, new_state_7.first, new_state_7.second, add_y, lstm_fc_weights, lstm_fc_bias);
163         std::pair<SubStream, SubStream> new_state_9  = add_lstm_cell(unstack_nid, 8, new_state_8.first, new_state_8.second, add_y, lstm_fc_weights, lstm_fc_bias);
164         std::pair<SubStream, SubStream> new_state_10 = add_lstm_cell(unstack_nid, 9, new_state_9.first, new_state_9.second, add_y, lstm_fc_weights, lstm_fc_bias);
165         std::pair<SubStream, SubStream> new_state_11 = add_lstm_cell(unstack_nid, 10, new_state_10.first, new_state_10.second, add_y, lstm_fc_weights, lstm_fc_bias);
166         std::pair<SubStream, SubStream> new_state_12 = add_lstm_cell(unstack_nid, 11, new_state_11.first, new_state_11.second, add_y, lstm_fc_weights, lstm_fc_bias);
167         std::pair<SubStream, SubStream> new_state_13 = add_lstm_cell(unstack_nid, 12, new_state_12.first, new_state_12.second, add_y, lstm_fc_weights, lstm_fc_bias);
168         std::pair<SubStream, SubStream> new_state_14 = add_lstm_cell(unstack_nid, 13, new_state_13.first, new_state_13.second, add_y, lstm_fc_weights, lstm_fc_bias);
169         std::pair<SubStream, SubStream> new_state_15 = add_lstm_cell(unstack_nid, 14, new_state_14.first, new_state_14.second, add_y, lstm_fc_weights, lstm_fc_bias);
170         std::pair<SubStream, SubStream> new_state_16 = add_lstm_cell(unstack_nid, 15, new_state_15.first, new_state_15.second, add_y, lstm_fc_weights, lstm_fc_bias);
171 
172         // Concatenate new states on height
173         const int axis = 1;
174         graph << StackLayer(axis,
175                             std::move(new_state_1.second),
176                             std::move(new_state_2.second),
177                             std::move(new_state_3.second),
178                             std::move(new_state_4.second),
179                             std::move(new_state_5.second),
180                             std::move(new_state_6.second),
181                             std::move(new_state_7.second),
182                             std::move(new_state_8.second),
183                             std::move(new_state_9.second),
184                             std::move(new_state_10.second),
185                             std::move(new_state_11.second),
186                             std::move(new_state_12.second),
187                             std::move(new_state_13.second),
188                             std::move(new_state_14.second),
189                             std::move(new_state_15.second),
190                             std::move(new_state_16.second))
191               .set_name("concat");
192 
193         graph << FullyConnectedLayer(
194                   2048U,
195                   get_weights_accessor(data_path, "h5_transpose.npy", weights_layout),
196                   get_weights_accessor(data_path, "MatMul_3_bias.npy"))
197               .set_name("fc3")
198               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
199               .set_name("Relu3")
200               << FullyConnectedLayer(
201                   29U,
202                   get_weights_accessor(data_path, "h6_transpose.npy", weights_layout),
203                   get_weights_accessor(data_path, "MatMul_4_bias.npy"))
204               .set_name("fc3")
205               << SoftmaxLayer().set_name("logits");
206 
207         graph << OutputLayer(get_output_accessor(common_params, 5));
208 
209         // Finalize graph
210         GraphConfig config;
211         config.num_threads      = common_params.threads;
212         config.use_tuner        = common_params.enable_tuner;
213         config.tuner_file       = common_params.tuner_file;
214         config.convert_to_uint8 = (common_params.data_type == DataType::QASYMM8);
215 
216         graph.finalize(common_params.target, config);
217 
218         return true;
219     }
do_run()220     void do_run() override
221     {
222         // Run graph
223         graph.run();
224     }
225 
226 private:
227     CommandLineParser  cmd_parser;
228     CommonGraphOptions common_opts;
229     CommonGraphParams  common_params;
230     Stream             graph;
231 
set_node_params(Graph & g,NodeID nid,NodeParams & params)232     Status set_node_params(Graph &g, NodeID nid, NodeParams &params)
233     {
234         INode *node = g.node(nid);
235         ARM_COMPUTE_RETURN_ERROR_ON(!node);
236 
237         node->set_common_node_parameters(params);
238 
239         return Status{};
240     }
241 
add_lstm_cell(NodeID unstack_nid,unsigned int unstack_idx,SubStream previous_state_c,SubStream previous_state_h,SubStream add_y,SubStream lstm_fc_weights,SubStream lstm_fc_bias)242     std::pair<SubStream, SubStream> add_lstm_cell(NodeID unstack_nid,
243                                                   unsigned int unstack_idx,
244                                                   SubStream    previous_state_c,
245                                                   SubStream    previous_state_h,
246                                                   SubStream    add_y,
247                                                   SubStream    lstm_fc_weights,
248                                                   SubStream    lstm_fc_bias)
249     {
250         const std::string         cell_name("rnn/lstm_cell_" + std::to_string(unstack_idx));
251         const DataLayoutDimension concat_dim = (common_params.data_layout == DataLayout::NHWC) ? DataLayoutDimension::CHANNEL : DataLayoutDimension::WIDTH;
252 
253         // Concatenate result of Unstack with previous_state_h
254         NodeParams concat_params = { cell_name + "/concat", graph.hints().target_hint };
255         NodeID     concat_nid    = graph.graph().add_node<ConcatenateLayerNode>(2, concat_dim);
256         graph.graph().add_connection(unstack_nid, unstack_idx, concat_nid, 0);
257         graph.graph().add_connection(previous_state_h.tail_node(), 0, concat_nid, 1);
258         set_node_params(graph.graph(), concat_nid, concat_params);
259         graph.forward_tail(concat_nid);
260 
261         graph << FullyConnectedLayer(
262                   8192U,
263                   lstm_fc_weights,
264                   lstm_fc_bias)
265               .set_name(cell_name + "/BiasAdd");
266 
267         // Split Layer
268         const unsigned int num_splits = 4;
269         const unsigned int split_axis = 0;
270 
271         NodeParams split_params = { cell_name + "/split", graph.hints().target_hint };
272         NodeID     split_nid    = GraphBuilder::add_split_node(graph.graph(), split_params, { graph.tail_node(), 0 }, num_splits, split_axis);
273 
274         NodeParams sigmoid_1_params = { cell_name + "/Sigmoid_1", graph.hints().target_hint };
275         NodeParams add_params       = { cell_name + "/add", graph.hints().target_hint };
276         NodeParams sigmoid_2_params = { cell_name + "/Sigmoid_2", graph.hints().target_hint };
277         NodeParams tanh_params      = { cell_name + "/Tanh", graph.hints().target_hint };
278 
279         // Sigmoid 1 (first split)
280         NodeID sigmoid_1_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
281         graph.graph().add_connection(split_nid, 0, sigmoid_1_nid, 0);
282         set_node_params(graph.graph(), sigmoid_1_nid, sigmoid_1_params);
283 
284         // Tanh (second split)
285         NodeID tanh_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f));
286         graph.graph().add_connection(split_nid, 1, tanh_nid, 0);
287         set_node_params(graph.graph(), tanh_nid, tanh_params);
288 
289         SubStream tanh_ss(graph);
290         tanh_ss.forward_tail(tanh_nid);
291 
292         // Add (third split)
293         NodeID add_nid = graph.graph().add_node<EltwiseLayerNode>(descriptors::EltwiseLayerDescriptor{ EltwiseOperation::Add });
294         graph.graph().add_connection(split_nid, 2, add_nid, 0);
295         graph.graph().add_connection(add_y.tail_node(), 0, add_nid, 1);
296         set_node_params(graph.graph(), add_nid, add_params);
297 
298         // Sigmoid 2 (fourth split)
299         NodeID sigmoid_2_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
300         graph.graph().add_connection(split_nid, 3, sigmoid_2_nid, 0);
301         set_node_params(graph.graph(), sigmoid_2_nid, sigmoid_2_params);
302 
303         SubStream sigmoid_1_ss(graph);
304         sigmoid_1_ss.forward_tail(sigmoid_1_nid);
305         SubStream mul_1_ss(sigmoid_1_ss);
306         mul_1_ss << EltwiseLayer(std::move(sigmoid_1_ss), std::move(tanh_ss), EltwiseOperation::Mul)
307                  .set_name(cell_name + "/mul_1");
308 
309         SubStream tanh_1_ss_tmp(graph);
310         tanh_1_ss_tmp.forward_tail(add_nid);
311 
312         tanh_1_ss_tmp << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))
313                       .set_name(cell_name + "/Sigmoid");
314         SubStream tanh_1_ss_tmp2(tanh_1_ss_tmp);
315         tanh_1_ss_tmp2 << EltwiseLayer(std::move(tanh_1_ss_tmp), std::move(previous_state_c), EltwiseOperation::Mul)
316                        .set_name(cell_name + "/mul");
317         SubStream tanh_1_ss(tanh_1_ss_tmp2);
318         tanh_1_ss << EltwiseLayer(std::move(tanh_1_ss_tmp2), std::move(mul_1_ss), EltwiseOperation::Add)
319                   .set_name(cell_name + "/new_state_c");
320         SubStream new_state_c(tanh_1_ss);
321 
322         tanh_1_ss << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f))
323                   .set_name(cell_name + "/Tanh_1");
324 
325         SubStream sigmoid_2_ss(graph);
326         sigmoid_2_ss.forward_tail(sigmoid_2_nid);
327         graph << EltwiseLayer(std::move(sigmoid_2_ss), std::move(tanh_1_ss), EltwiseOperation::Mul)
328               .set_name(cell_name + "/new_state_h");
329 
330         SubStream new_state_h(graph);
331         return std::pair<SubStream, SubStream>(new_state_c, new_state_h);
332     }
333 };
334 
335 /** Main program for DeepSpeech v0.4.1
336  *
337  * Model is based on:
338  *      https://arxiv.org/abs/1412.5567
339  *      "Deep Speech: Scaling up end-to-end speech recognition"
340  *      Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro, Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh, Shubho Sengupta, Adam Coates, Andrew Y. Ng
341  *
342  * Provenance: https://github.com/mozilla/DeepSpeech
343  *
344  * @note To list all the possible arguments execute the binary appended with the --help option
345  *
346  * @param[in] argc Number of arguments
347  * @param[in] argv Arguments
348  *
349  * @return Return code
350  */
main(int argc,char ** argv)351 int main(int argc, char **argv)
352 {
353     return arm_compute::utils::run_example<GraphDeepSpeechExample>(argc, argv);
354 }
355