• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_MICRO_CODER_CODER_CONTEXT_H_
18 #define MINDSPORE_LITE_MICRO_CODER_CODER_CONTEXT_H_
19 #include <map>
20 #include <memory>
21 #include <set>
22 #include <stack>
23 #include <string>
24 #include <utility>
25 #include <vector>
26 #include "src/tensor.h"
27 namespace mindspore::lite::micro {
28 class CoderContext {
29  public:
30   CoderContext();
31 
32   ~CoderContext() = default;
33 
init_contents()34   std::vector<std::string> init_contents() const { return initialContent_; }
35 
set_code_blocks(const std::vector<std::string> & code_block)36   void set_code_blocks(const std::vector<std::string> &code_block) { code_blocks_ = code_block; }
code_blocks()37   std::vector<std::string> code_blocks() const { return code_blocks_; }
38 
set_inference_blocks(const std::vector<std::string> & inference_blocks)39   void set_inference_blocks(const std::vector<std::string> &inference_blocks) { inference_blocks_ = inference_blocks; }
inference_blocks()40   std::vector<std::string> inference_blocks() const { return inference_blocks_; }
41 
set_train_blocks(const std::vector<std::string> & train_blocks)42   void set_train_blocks(const std::vector<std::string> &train_blocks) { train_blocks_ = train_blocks; }
train_blocks()43   std::vector<std::string> train_blocks() const { return train_blocks_; }
44 
set_tensor_map(const std::map<Tensor *,std::string> & tensor_map)45   void set_tensor_map(const std::map<Tensor *, std::string> &tensor_map) {
46     tensors_map_.insert(tensor_map.begin(), tensor_map.end());
47   }
tensors_map()48   std::map<Tensor *, std::string> tensors_map() const { return tensors_map_; }
set_saved_weights(const std::map<std::string,Tensor * > & saved_weights)49   void set_saved_weights(const std::map<std::string, Tensor *> &saved_weights) { saved_weights_ = saved_weights; }
saved_weights()50   std::map<std::string, Tensor *> saved_weights() const { return saved_weights_; }
51 
set_total_buffer_size(size_t size)52   void set_total_buffer_size(size_t size) { total_buffer_size_ = size; }
total_buffer_size()53   size_t total_buffer_size() const { return total_buffer_size_; }
54 
set_graph_inputs(const std::vector<Tensor * > & graph_inputs)55   void set_graph_inputs(const std::vector<Tensor *> &graph_inputs) { graph_inputs_ = graph_inputs; }
set_graph_outputs(const std::vector<Tensor * > & graph_outputs)56   void set_graph_outputs(const std::vector<Tensor *> &graph_outputs) { graph_outputs_ = graph_outputs; }
57 
graph_inputs()58   std::vector<Tensor *> graph_inputs() const { return graph_inputs_; }
graph_outputs()59   std::vector<Tensor *> graph_outputs() const { return graph_outputs_; }
60 
input_name()61   std::string input_name() { return input_name_; }
output_name()62   std::string output_name() { return output_name_; }
buffer_name()63   std::string buffer_name() { return buffer_name_; }
weight_name()64   std::string weight_name() { return weight_name_; }
65 
66   void AppendCode(const std::string &codeBlock);
67 
68   void AppendInitCode(const std::string &codeBlock);
69 
c_files()70   std::set<std::string> c_files() const { return c_files_; }
set_c_files(const std::set<std::string> & files)71   void set_c_files(const std::set<std::string> &files) { c_files_.insert(files.begin(), files.end()); }
72 
h_files()73   std::set<std::string> h_files() const { return h_files_; }
set_h_files(const std::set<std::string> & files)74   void set_h_files(const std::set<std::string> &files) { h_files_.insert(files.begin(), files.end()); }
75 
asm_files()76   std::set<std::string> asm_files() const { return asm_files_; }
set_asm_files(const std::set<std::string> & files)77   void set_asm_files(const std::set<std::string> &files) { asm_files_.insert(files.begin(), files.end()); }
78 
79  private:
80   std::vector<Tensor *> graph_inputs_;
81   std::vector<Tensor *> graph_outputs_;
82   // primitive const tensors, parsed from model, without packed.
83   std::map<std::string, Tensor *> saved_weights_;
84   // all tensors, include parsed from model and packed tensors.
85   std::map<Tensor *, std::string> tensors_map_;
86   // workspace's size.
87   size_t total_buffer_size_{0};
88   // model's input tensor data's address.
89   std::string input_name_;
90   // model's output tensor's address
91   std::string output_name_;
92   // the address of workspace, use for inference or train.
93   std::string buffer_name_;
94   // model's weight tensors' address.
95   std::string weight_name_;
96   // code blocks store the tensor will be packed runtime
97   std::vector<std::string> initialContent_;
98   // operator C Lang files list, depended by the net.c. it will be add to CMakeLists.txt
99   std::set<std::string> c_files_;
100   // when codegen generate the code for ARM64 OR ARM32, we provide server optimized artimetic used the assembly
101   // instructions. asm_files store the assembly file names
102   std::set<std::string> asm_files_;
103   // operator header files
104   std::set<std::string> h_files_;
105   // net.c's content, include the Inference and Training implementation
106   std::vector<std::string> code_blocks_;
107   std::vector<std::string> train_blocks_;
108   std::vector<std::string> inference_blocks_;
109 };
110 
111 }  // namespace mindspore::lite::micro
112 #endif  // MINDSPORE_LITE_MICRO_CODER_CONTEXT_H_
113