1 /** 2 * Copyright 2019-2024 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_PIPELINE_JIT_RESOURCE_H_ 18 #define MINDSPORE_CCSRC_PIPELINE_JIT_RESOURCE_H_ 19 20 #include <iostream> 21 #include <vector> 22 #include <string> 23 #include <memory> 24 #include <future> 25 #include <mutex> 26 #include <utility> 27 #include <functional> 28 29 #include "utils/hash_map.h" 30 #include "utils/hash_set.h" 31 #include "pybind11/pybind11.h" 32 #include "pybind11/stl.h" 33 34 #include "utils/any.h" 35 #include "utils/profile.h" 36 #include "ir/manager.h" 37 38 #include "pipeline/jit/ps/resource_base.h" 39 #include "pipeline/jit/ps/static_analysis/prim.h" 40 #include "pipeline/jit/ps/static_analysis/static_analysis.h" 41 #include "load_mindir/load_model.h" 42 #include "pipeline/jit/ps/compile_cache_manager.h" 43 44 namespace mindspore { 45 namespace compile { 46 class Backend; 47 using BackendPtr = std::shared_ptr<Backend>; 48 } // namespace compile 49 namespace pipeline { 50 namespace py = pybind11; 51 52 const char kStepParallelGraph[] = "step_parallel"; 53 const char kOutput[] = "output"; 54 const char kPynativeGraphId[] = "graph_id"; 55 const char kActorInfo[] = "actor_info"; 56 const char kCompiler[] = "Compiler"; 57 const char kBootstrap[] = "bootstrap"; 58 const char kParse[] = "parse"; 59 const char kSymbolResolve[] = "symbol_resolve"; 60 const char kSetMixedPrecisionFlag[] = "set_mixed_precision_flag"; 61 const char kCombineLikeGraphs[] = "combine_like_graphs"; 62 const char kGraphReusing[] = "graph_reusing"; 63 const char kPreCConv[] = "pre_cconv"; 64 const char kTypeInference[] = "type_inference"; 65 const char kAutoMonad[] = "auto_monad"; 66 const char kInline[] = "inline"; 67 const char kPreAutoParallel[] = "pre_auto_parallel"; 68 const char kPipelineSplit[] = "pipeline_split"; 69 const char kPipelineParallelScheduler[] = "pipeline_parallel_scheduler"; 70 const char kOptimize[] = "optimize"; 71 const char kAutoMonadReorder[] = "auto_monad_reorder"; 72 const char kGetJitBpropGraph[] = "get_jit_bprop_graph"; 73 const char kRewriterAfterJitBprop[] = "rewriter_after_jit_bprop_graph"; 74 const char kEliminateSpecialOpNode[] = "eliminate_special_op_node"; 75 const char kValidate[] = "validate"; 76 const char kLoadMindir[] = "load_mindir"; 77 const char kInferMindir[] = "infer_mindir"; 78 const char kModifyMindirGraph[] = "modify_mindir_graph"; 79 const char kDistributedSplit[] = "distribtued_split"; 80 const char kTaskEmit[] = "task_emit"; 81 const char kExecute[] = "execute"; 82 const char kAbstractAnalyze[] = "AbstractAnalyze"; 83 const char kProgramSpecialize[] = "ProgramSpecialize"; 84 const char kCreateBackend[] = "create_backend"; 85 const char kPipelineClean[] = "pipeline_clean"; 86 87 using BuiltInTypeMap = mindspore::HashMap<int64_t, mindspore::HashMap<std::string, Any>>; 88 89 BuiltInTypeMap &GetMethodMap(); 90 91 BuiltInTypeMap &GetAttrMap(); 92 93 class Resource : public ResourceBase { 94 public: 95 explicit Resource(const py::object &obj = py::none()); 96 97 ~Resource() override; 98 engine()99 abstract::AnalysisEnginePtr engine() { return engine_; } 100 101 static bool IsTypeInBuiltInMap(const TypeId &type); 102 103 static Any GetMethodPtr(const TypeId &type, const std::string &name); 104 105 static Any GetAttrPtr(const TypeId &type, const std::string &name); 106 source_input()107 const py::object &source_input() const { return source_input_; } 108 func_graph()109 FuncGraphPtr func_graph() const { return func_graph_; } set_func_graph(const FuncGraphPtr & func_graph)110 void set_func_graph(const FuncGraphPtr &func_graph) { func_graph_ = func_graph; } 111 optimize_graph()112 FuncGraphPtr optimize_graph() const { return optimize_graph_; } set_optimize_graph(const FuncGraphPtr & optimize_graph)113 void set_optimize_graph(const FuncGraphPtr &optimize_graph) { optimize_graph_ = optimize_graph; } 114 args_abs()115 const abstract::AbstractBasePtrList &args_abs() const { return args_abs_; } set_args_abs(const abstract::AbstractBasePtrList & args_abs)116 void set_args_abs(const abstract::AbstractBasePtrList &args_abs) { args_abs_ = args_abs; } 117 arguments()118 const ValuePtrList &arguments() const { return arguments_; } set_arguments(const ValuePtrList & arguments)119 void set_arguments(const ValuePtrList &arguments) { arguments_ = arguments; } 120 set_vm_loop(const bool & flag,const int64_t size)121 void set_vm_loop(const bool &flag, const int64_t size) { 122 vm_loop_flag_ = flag; 123 loop_size_ = size; 124 } set_is_load(bool flag)125 void set_is_load(bool flag) { is_load_ = flag; } is_load()126 bool is_load() const { return is_load_; } vm_loop_flag()127 bool vm_loop_flag() const { return vm_loop_flag_; } loop_size()128 int64_t loop_size() const { return loop_size_; } 129 layout_map()130 const LayoutMap &layout_map() const { return layout_map_; } 131 132 // Get the cached func_graph and parameters layout map. 133 void GetCompileCacheResource(const py::list &compile_cache_dep_files, const py::dict &weights, 134 const std::string &queue_name, size_t compile_cache_id, bool *compile_cache_consistent, 135 bool has_python_script); 136 void CacheFuncGraph() const; EnableCompileCache()137 bool EnableCompileCache() const { return compile_cache_manager_ != nullptr; } 138 139 // Reclaim resource and clear the cache. 140 // GraphExecutorPy::Compile() can be called multiple times, so cache 141 // should be cleared. 142 void Clean(); 143 144 // Get the backend object. if the backend is being initialized, wait until it completes. 145 compile::BackendPtr GetBackend() const; 146 147 // Set backend asynchronously, the input function should return a Backend pointer, 148 // and it will be called in a background thread. 149 void SetBackendAsync(std::function<compile::BackendPtr()> func); 150 151 // Get the mutex for backend initializing. GetBackendInitMutex()152 static std::mutex &GetBackendInitMutex() { return backend_init_mutex_; } 153 154 private: 155 abstract::AnalysisEnginePtr engine_; 156 FuncGraphPtr func_graph_; 157 FuncGraphPtr optimize_graph_; 158 // The arguments may contain a Parameter, we need connect it to the Parameter default value of func graph. 159 // We keep all arguments inputs here for subsequent procedure. 160 std::vector<ValuePtr> arguments_; 161 abstract::AbstractBasePtrList args_abs_; 162 // The source obj to compile, usually a `Cell` or `jit` decorated function. 163 py::object source_input_; 164 bool is_cleaned_; 165 // The func_graph_ is loaded from mindir 166 bool is_load_{false}; 167 bool vm_loop_flag_{false}; 168 int64_t loop_size_{1}; 169 LayoutMap layout_map_{}; 170 CompileCacheManagerPtr compile_cache_manager_{nullptr}; 171 // The backend related fields for async initializing. 172 mutable compile::BackendPtr backend_; 173 mutable std::future<compile::BackendPtr> backend_future_; 174 // Mutex to ensure backend creating task is running exclusively. 175 static std::mutex backend_init_mutex_; 176 }; 177 178 using ResourcePtr = std::shared_ptr<pipeline::Resource>; 179 180 } // namespace pipeline 181 } // namespace mindspore 182 183 #endif // MINDSPORE_CCSRC_PIPELINE_JIT_RESOURCE_H_ 184