• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "pipeline/jit/pi/graph_guard/strategy.h"
17 #include <algorithm>
18 #include <limits>
19 #include <string>
20 #include <vector>
21 #include <map>
22 #include "pybind11/pybind11.h"
23 #include "pybind_api/ir/primitive_py.h"
24 #include "include/common/utils/convert_utils_py.h"
25 #include "pipeline/jit/ps/pipeline.h"
26 #include "pipeline/jit/pi/utils/utils.h"
27 #include "pipeline/jit/pi/pydef.h"
28 
29 namespace mindspore {
30 namespace pijit {
31 
MakeExecStrategyByPerf(OptPerfPtr graph_perf,OptPerfPtr pynative_perf,int count,double adj_coef)32 OptStrategy::ExecKind OptStrategy::MakeExecStrategyByPerf(OptPerfPtr graph_perf, OptPerfPtr pynative_perf, int count,
33                                                           double adj_coef) {
34   PerfStatisticsPtr graph_stat = graph_perf->GetStatistics();
35   PerfStatisticsPtr pynative_stat = graph_perf->GetStatistics();
36   if (graph_stat->GetTotalCount() < count) {
37     return ExecKind::kExecGraph;
38   } else if (pynative_stat->GetTotalCount() < count) {
39     return ExecKind::kExecPyNative;
40   } else {
41     if (graph_stat->GetAverageDuration() * (1 + adj_coef) > pynative_stat->GetAverageDuration()) {
42       return ExecKind::kExecPyNative;
43     } else {
44       return ExecKind::kExecGraph;
45     }
46   }
47 }
48 
MakeExecStrategyByComplex(PyCodeObject * co,int threshold)49 OptStrategy::ExecKind OptStrategy::MakeExecStrategyByComplex(PyCodeObject *co, int threshold) {
50   // currently just use instruction count to judge whether to use graph build
51   // later it need cost model to make judgement here
52   if (co != nullptr && SizeToInt(PyBytes_GET_SIZE(co->co_code) / sizeof(_Py_CODEUNIT)) < threshold) {
53     return ExecKind::kExecPyNative;
54   } else {
55     return ExecKind::kExecGraph;
56   }
57 }
58 
CompareOptCodeByCount(OptCodePtr a,OptCodePtr b)59 static bool CompareOptCodeByCount(OptCodePtr a, OptCodePtr b) {
60   if (a->Count() > b->Count()) {
61     return true;
62   } else {
63     return false;
64   }
65 }
66 
ShrinkCodeSet(OptCodeSet * set,OptCodePtr target)67 void ShrinkCodeSet(OptCodeSet *set, OptCodePtr target) {
68   OptCodeSet match;
69   OptCodeSet mismatch;
70   auto guard_target = target->GetGuard();
71   for (size_t i = set->size(); i != 0;) {
72     i--;
73     auto item = set->at(i);
74     auto guard_item = item->GetGuard();
75     if (guard_target->MatchShape(guard_item)) {
76       match.insert(match.begin(), item);
77     } else {
78       mismatch.insert(mismatch.begin(), item);
79     }
80   }
81   set->clear();
82   set->insert(set->begin(), mismatch.begin(), mismatch.end());
83   set->insert(set->end(), match.begin(), match.end());
84 }
85 
86 static constexpr int64_t kDynamicShapeLimitCount = 3;
87 
MakeGCStrategy(OptCodeHubPtr hub,int limit_size,int limit_count,bool enable_dynamicshape,OptCodePtr except)88 void OptStrategy::MakeGCStrategy(OptCodeHubPtr hub, int limit_size, int limit_count, bool enable_dynamicshape,
89                                  OptCodePtr except) {
90   if (limit_size <= 0 && limit_count <= 0) {
91     if (!enable_dynamicshape) {
92       return;
93     }
94     limit_count = kDynamicShapeLimitCount;
95   }
96   std::vector<OptCodeSet> vec = hub->GetAllOptTarget();
97   for (auto set : vec) {
98     std::sort(set.begin(), set.end(), CompareOptCodeByCount);
99     auto it = std::find(set.begin(), set.end(), except);
100     if (it != set.end()) {
101       set.erase(it);
102     }
103     if (limit_count > 0) {
104       if (set.size() > (size_t)limit_count) {
105         OptCodeSet toDel;
106         if (enable_dynamicshape) {
107           ShrinkCodeSet(&set, except);
108         }
109         toDel.insert(toDel.begin(), set.begin() + limit_count, set.end());
110         for (auto item : toDel) {
111           hub->DelOptTarget(item);
112         }
113       }
114     }
115     if (limit_size > 0) {
116       auto graph_executor = mindspore::pipeline::GraphExecutorPy::GetInstance();
117       OptCodeSet toDel;
118       if (enable_dynamicshape) {
119         ShrinkCodeSet(&set, except);
120       }
121       for (auto item : set) {
122         if (limit_size == 0) {
123           toDel.push_back(item);
124         }
125         std::string phase = item->GetPhase();
126         if (phase.size() > 0) {
127           FuncGraphPtr ms_func_graph = graph_executor->GetFuncGraph(phase);
128           int node_count = SizeToInt(ms_func_graph->nodes().size());
129           for (auto fg : ms_func_graph->func_graphs_used_total()) {
130             node_count += SizeToInt(fg->nodes().size());
131           }
132           if (limit_size > node_count) {
133             limit_size -= node_count;
134           } else {
135             limit_size = 0;
136           }
137         }
138       }
139       for (auto item : toDel) {
140         hub->DelOptTarget(item);
141       }
142     }
143   }
144 }
145 
146 constexpr int64_t kMaxCalcDim = 1;
147 constexpr int64_t kCompareDim = std::numeric_limits<int64_t>::max();
148 
TensorComputable(PyObject * obj,ssize_t max_dim)149 static OptStrategy::CalcKind TensorComputable(PyObject *obj, ssize_t max_dim) {
150   if (py::isinstance<mindspore::tensor::Tensor>(obj) || py::isinstance<mindspore::tensor::MetaTensor>(obj)) {
151     auto tensor_ptr = py::cast<mindspore::tensor::MetaTensorPtr>(obj);
152     auto shape = tensor_ptr->shape();
153     if (!std::any_of(shape.begin(), shape.end(), [max_dim](const int64_t dim) { return dim > max_dim; })) {
154       return OptStrategy::CalcKind::kCalcValue;
155     }
156   }
157   return OptStrategy::CalcKind::kCalcShape;
158 }
159 
StubTensorComputable(PyObject * obj,ssize_t max_dim)160 static OptStrategy::CalcKind StubTensorComputable(PyObject *obj, ssize_t max_dim) {
161   auto stub = PyObject_GetAttrString(obj, "stub");
162   if (stub != nullptr && stub != Py_None) {
163     auto pyObj = py::cast<py::object>(stub);
164     auto ptr = pyObj.cast<mindspore::stub::StubNodePtr>();
165     auto base = ptr->ToAbstract();
166     auto shape = base->BuildShape()->cast<abstract::ShapePtr>();
167     Py_DECREF(stub);
168     if (shape && !shape->IsDynamic()) {
169       if (!std::any_of(shape->shape().begin(), shape->shape().end(),
170                        [max_dim](const int64_t dim) { return dim > max_dim; })) {
171         return OptStrategy::CalcKind::kCalcValue;
172       }
173     } else {
174       return OptStrategy::CalcKind::kCalcUnsupported;
175     }
176   } else {
177     obj = PyObject_GetAttrString(obj, "tensor");
178     auto pyObj = py::cast<py::object>(obj);
179     Py_DECREF(obj);
180     auto tensor_ptr = pyObj.cast<mindspore::tensor::TensorPtr>();
181     auto shape = tensor_ptr->shape();
182     if (!std::any_of(shape.begin(), shape.end(), [max_dim](const int64_t dim) { return dim > max_dim; })) {
183       return OptStrategy::CalcKind::kCalcValue;
184     }
185   }
186   return OptStrategy::CalcKind::kCalcShape;
187 }
188 
ObjectComputable(PyObject * obj,ssize_t max_dim=kMaxCalcDim)189 static OptStrategy::CalcKind ObjectComputable(PyObject *obj, ssize_t max_dim = kMaxCalcDim) {
190   static const std::vector<bool (*)(PyObject *)> computable = {
191     [](PyObject *op) { return op == Py_None || op == Py_True || op == Py_False || op == Py_Ellipsis; },
192     CheckScalar,
193     CheckContainer,
194     [](PyObject *op) { return IsMsClass(reinterpret_cast<PyObject *>(Py_TYPE(op))); },
195     IsNumpyObject,
196   };
197   if (obj == nullptr) {
198     return OptStrategy::CalcKind::kCalcUnsupported;
199   } else if (std::any_of(computable.begin(), computable.end(), [&obj](auto check) { return check(obj); })) {
200     return OptStrategy::CalcKind::kCalcValue;
201   } else if (IsTensorPyObject(obj)) {
202     return TensorComputable(obj, max_dim);
203   } else if (IsStubTensor(py::cast<py::object>(obj))) {
204     return StubTensorComputable(obj, max_dim);
205   } else {
206     return OptStrategy::CalcKind::kCalcUnsupported;
207   }
208 }
209 
210 using CheckPyObjectFunc = OptStrategy::CalcKind (*)(int bytecode, int opargs, const PyObjectArray &objs);
211 
MakeCalcStrategyByObject(int bytecode,int opargs,const PyObjectArray & objs)212 OptStrategy::CalcKind MakeCalcStrategyByObject(int bytecode, int opargs, const PyObjectArray &objs) {
213   return ObjectComputable(objs[0]);
214 }
215 
MakeInplaceCalcStrategyByObject(int bytecode,int opargs,const PyObjectArray & objs)216 OptStrategy::CalcKind MakeInplaceCalcStrategyByObject(int bytecode, int opargs, const PyObjectArray &objs) {
217   std::set<std::string> inplace = {"numpy.ndarray", "list", "<unnamed>"};
218   const char *tp_name = Py_TYPE(objs[0])->tp_name ? (Py_TYPE(objs[0]))->tp_name : "<unnamed>";
219   return inplace.find(tp_name) == inplace.end() ? ObjectComputable(objs[0]) : OptStrategy::CalcKind::kCalcUnsupported;
220 }
221 
MakeCalcStrategyByMatMul(int bytecode,int opargs,const PyObjectArray & objs)222 OptStrategy::CalcKind MakeCalcStrategyByMatMul(int bytecode, int opargs, const PyObjectArray &objs) {
223   auto oc1 = ObjectComputable(objs[0]);
224   auto oc2 = ObjectComputable(objs[1]);
225   if (oc1 == OptStrategy::CalcKind::kCalcValue && oc2 == OptStrategy::CalcKind::kCalcValue) {
226     return OptStrategy::CalcKind::kCalcValue;
227   } else {
228     return OptStrategy::CalcKind::kCalcUnsupported;
229   }
230 }
231 
MakeCalcStrategyByCompare(int bytecode,int opargs,const PyObjectArray & objs)232 OptStrategy::CalcKind MakeCalcStrategyByCompare(int bytecode, int opargs, const PyObjectArray &objs) {
233   if (objs[0] == Py_None || objs[1] == Py_None) {
234     return OptStrategy::CalcKind::kCalcValue;
235   }
236   if (py::isinstance<mindspore::Type>(objs[0]) || PyType_Check(objs[0])) {
237     return OptStrategy::CalcKind::kCalcValue;
238   }
239   if (py::isinstance<mindspore::Type>(objs[1]) || PyType_Check(objs[1])) {
240     return OptStrategy::CalcKind::kCalcValue;
241   }
242   auto oc1 = ObjectComputable(objs[0], kCompareDim);
243   auto oc2 = ObjectComputable(objs[1], kCompareDim);
244   if (oc1 == OptStrategy::CalcKind::kCalcValue && oc2 == OptStrategy::CalcKind::kCalcValue) {
245     return OptStrategy::CalcKind::kCalcValue;
246   } else if (oc1 == OptStrategy::CalcKind::kCalcUnsupported || oc2 == OptStrategy::CalcKind::kCalcUnsupported) {
247     return OptStrategy::CalcKind::kCalcUnsupported;
248   } else {
249     return OptStrategy::CalcKind::kCalcShape;
250   }
251 }
252 
253 static std::map<int, CheckPyObjectFunc> kBytecodeStrategy = {
254   {UNARY_POSITIVE, MakeCalcStrategyByObject},
255   {UNARY_NEGATIVE, MakeCalcStrategyByObject},
256   {UNARY_NOT, MakeCalcStrategyByObject},
257   {UNARY_INVERT, MakeCalcStrategyByObject},
258   {BINARY_LSHIFT, MakeCalcStrategyByObject},
259   {BINARY_RSHIFT, MakeCalcStrategyByObject},
260   {BINARY_AND, MakeCalcStrategyByObject},
261   {BINARY_XOR, MakeCalcStrategyByObject},
262   {BINARY_OR, MakeCalcStrategyByObject},
263   {BINARY_FLOOR_DIVIDE, MakeCalcStrategyByObject},
264   {BINARY_TRUE_DIVIDE, MakeCalcStrategyByObject},
265   {INPLACE_LSHIFT, MakeInplaceCalcStrategyByObject},
266   {INPLACE_RSHIFT, MakeInplaceCalcStrategyByObject},
267   {INPLACE_AND, MakeInplaceCalcStrategyByObject},
268   {INPLACE_XOR, MakeInplaceCalcStrategyByObject},
269   {INPLACE_OR, MakeInplaceCalcStrategyByObject},
270   {INPLACE_FLOOR_DIVIDE, MakeInplaceCalcStrategyByObject},
271   {INPLACE_TRUE_DIVIDE, MakeInplaceCalcStrategyByObject},
272   {BINARY_POWER, MakeCalcStrategyByObject},
273   {BINARY_ADD, MakeCalcStrategyByObject},
274   {BINARY_SUBTRACT, MakeCalcStrategyByObject},
275   {BINARY_MULTIPLY, MakeCalcStrategyByObject},
276   {BINARY_MODULO, MakeCalcStrategyByObject},
277   {INPLACE_POWER, MakeInplaceCalcStrategyByObject},
278   {INPLACE_ADD, MakeInplaceCalcStrategyByObject},
279   {INPLACE_SUBTRACT, MakeInplaceCalcStrategyByObject},
280   {INPLACE_MULTIPLY, MakeInplaceCalcStrategyByObject},
281   {INPLACE_MODULO, MakeInplaceCalcStrategyByObject},
282   {BINARY_MATRIX_MULTIPLY, MakeCalcStrategyByMatMul},
283   {INPLACE_MATRIX_MULTIPLY, MakeInplaceCalcStrategyByObject},
284   {BINARY_SUBSCR,
__anon732f77730702() 285    [](int bytecode, int opargs, const PyObjectArray &objs) { return OptStrategy::CalcKind::kCalcValue; }},
286   {COMPARE_OP, MakeCalcStrategyByCompare},
287 };
288 
MakeCalcStrategyByInputs(int bytecode,int opargs,const PyObjectArray & objs)289 OptStrategy::CalcKind OptStrategy::MakeCalcStrategyByInputs(int bytecode, int opargs, const PyObjectArray &objs) {
290   auto iter = kBytecodeStrategy.find(bytecode);
291   if (iter != kBytecodeStrategy.end()) {
292     return iter->second(bytecode, opargs, objs);
293   }
294   return CalcKind::kCalcUnsupported;
295 }
296 
MakeCalcStrategyByShape(const ShapeVector & shape)297 OptStrategy::CalcKind OptStrategy::MakeCalcStrategyByShape(const ShapeVector &shape) {
298   if (!std::any_of(shape.begin(), shape.end(), [](const int64_t dim) { return dim > kMaxCalcDim; })) {
299     return CalcKind::kCalcValue;
300   } else {
301     return CalcKind::kCalcShape;
302   }
303 }
304 
MakeGuardListStrategyByFrame(const PyFrameObject * frame,const OptCodeSet & codes)305 OptCodeSet OptStrategy::MakeGuardListStrategyByFrame(const PyFrameObject *frame, const OptCodeSet &codes) {
306   OptCodeSet ret;
307   std::transform(codes.begin(), codes.end(), std::back_inserter(ret), [](const OptCodePtr &code) { return code; });
308   return ret;
309 }
310 
MakeGuardItemListStrategyByFrame(const PyFrameObject * frame,const GuardItemVector & list)311 GuardItemVector OptStrategy::MakeGuardItemListStrategyByFrame(const PyFrameObject *frame, const GuardItemVector &list) {
312   GuardItemVector ret;
313   std::transform(list.begin(), list.end(), std::back_inserter(ret), [](const GuardItemPtr &code) { return code; });
314   return ret;
315 }
316 }  // namespace pijit
317 }  // namespace mindspore
318