1 /** 2 * Copyright 2020 Huawei Technologies Co., Ltd 3 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 8 * http://www.apache.org/licenses/LICENSE-2.0 9 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_ALG_H_ 18 #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_ALG_H_ 19 20 #include <algorithm> 21 #include <cassert> 22 #include <chrono> 23 #include <cstddef> 24 #include <cstring> 25 #include <list> 26 #include <memory> 27 #include <numeric> 28 #include <set> 29 #include <stack> 30 #include <unordered_map> 31 #include <utility> 32 #include <vector> 33 34 #include "backend/optimizer/somas/somas_solver_pre.h" 35 #include "utils/ms_context.h" 36 37 using std::pair; 38 using std::set; 39 using std::stack; 40 using std::unordered_map; 41 using std::vector; 42 43 namespace mindspore { 44 namespace somas { 45 class Interval { 46 public: Interval()47 Interval() { m_a_ = m_b_ = 0; } Interval(SomasSolverTensorDescPtr t)48 explicit Interval(SomasSolverTensorDescPtr t) { 49 m_a_ = t->offset_; 50 m_b_ = m_a_ + t->size_; 51 } Interval(const size_t & a,const size_t & b)52 Interval(const size_t &a, const size_t &b) { 53 m_a_ = a; 54 m_b_ = b; 55 } 56 ~Interval() = default; 57 intersect(const Interval & i)58 bool intersect(const Interval &i) { return (in(i.m_a_) || in(i.m_b_)); } in(const size_t & a)59 bool in(const size_t &a) { return ((a > m_a_) && (a < m_b_)); } intersection(const Interval & i)60 Interval intersection(const Interval &i) { 61 if (m_a_ < i.m_a_) 62 return Interval(m_a_, i.m_b_); 63 else 64 return Interval(i.m_a_, m_b_); 65 } merge(const Interval & i)66 void merge(const Interval &i) { 67 m_a_ = std::min(m_a_, i.m_a_); 68 m_b_ = std::max(m_b_, i.m_b_); 69 } lb()70 size_t &lb() { return m_a_; } ub()71 size_t &ub() { return m_b_; } contains(size_t width)72 bool contains(size_t width) { return (m_b_ - m_a_) >= width; } contains(const Interval & a)73 bool contains(const Interval &a) { return ((a.m_a_ >= m_a_) && (a.m_b_ <= m_b_)); } 74 Interval &operator=(const Interval &in) { 75 m_a_ = in.m_a_; 76 m_b_ = in.m_b_; 77 return *this; 78 } 79 80 private: 81 size_t m_a_; 82 size_t m_b_; 83 }; 84 85 class BlockTensor { 86 public: 87 SomasSolverTensorDescPtr m_start_tensor_; 88 unordered_map<uint32_t, 89 std::set<pair<size_t, size_t>, bool (*)(const pair<size_t, size_t> &, const pair<size_t, size_t> &)>> 90 offsets_candidates_; 91 uint32_t m_current_sol_; 92 bool m_bre_allocate_; 93 unordered_map<uint32_t, size_t> offsets_; 94 size_t m_size_; BlockTensor()95 BlockTensor() 96 : m_start_tensor_(nullptr), 97 offsets_candidates_(), 98 m_current_sol_(0), 99 m_bre_allocate_(true), 100 offsets_(), 101 m_size_(0) {} 102 ~BlockTensor() = default; 103 104 BlockTensor &operator=(const BlockTensor &bt) { 105 m_bre_allocate_ = bt.m_bre_allocate_; 106 m_current_sol_ = 0; 107 m_start_tensor_ = bt.m_start_tensor_; 108 offsets_candidates_ = bt.offsets_candidates_; 109 offsets_ = bt.offsets_; 110 m_size_ = bt.m_size_; 111 return *this; 112 } Alone()113 bool Alone() const { return ((nullptr == m_start_tensor_->right_) && (nullptr == m_start_tensor_->left_)); } 114 }; 115 116 class FootPrint : public std::enable_shared_from_this<FootPrint> { 117 public: 118 uint32_t m_solId_; 119 FootPrint()120 FootPrint() 121 : m_solId_(0), 122 m_foot_print_next_(nullptr), 123 m_offset_(0), 124 m_starts_({}), 125 m_alignment_(0), 126 m_branching_strategy_(0), 127 m_algorithm_(0) {} 128 ~FootPrint() = default; setAlignment(const size_t a)129 void setAlignment(const size_t a) { m_alignment_ = a; } setBranchingStrategy(uint32_t bs)130 void setBranchingStrategy(uint32_t bs) { m_branching_strategy_ = bs; } setCurrentSol(uint32_t solId)131 void setCurrentSol(uint32_t solId) { m_solId_ = solId; } setAlgorithm(uint32_t algorithm)132 void setAlgorithm(uint32_t algorithm) { m_algorithm_ = algorithm; } addStart(BlockTensor * elemIndex)133 void addStart(BlockTensor *elemIndex) { m_starts_.push_back(elemIndex); } 134 void addElem(BlockTensor *block, const size_t &offset); Next()135 std::shared_ptr<FootPrint> &Next() { return m_foot_print_next_; } getStarts()136 vector<BlockTensor *> &getStarts() { return m_starts_; } 137 void Destroy(); getOffset()138 const size_t getOffset() { return m_offset_; } setOffset(const size_t & offset)139 void setOffset(const size_t &offset) { m_offset_ = offset; } 140 bool findOffset(const std::vector<DynamicBitSet> *constraints, const BlockTensor &block, size_t *offset); 141 void Merge(vector<Interval> *l_interval, stack<Interval> *l_merged); 142 bool findFirst(stack<Interval> *merged, const BlockTensor &block, size_t *offset); 143 size_t Result(); 144 void printStats(); 145 146 private: 147 std::shared_ptr<FootPrint> m_foot_print_next_; 148 size_t m_offset_; 149 vector<BlockTensor *> m_starts_; 150 size_t m_alignment_; 151 uint32_t m_branching_strategy_; 152 uint32_t m_algorithm_; 153 }; 154 155 class FastHeuristic { 156 public: FastHeuristic()157 FastHeuristic() : m_alignment_(512), m_tensors_allocated_(0) {} 158 ~FastHeuristic() = default; 159 setAlignment(const size_t & a)160 void setAlignment(const size_t &a) { m_alignment_ = a; } 161 void Destroy(); 162 bool Eval(vector<BlockTensor> *block_tensors_v, const std::shared_ptr<FootPrint> &foot_print, 163 const std::vector<DynamicBitSet> *pConstraints); 164 165 private: 166 size_t m_alignment_; 167 size_t m_tensors_allocated_; 168 }; 169 } // namespace somas 170 } // namespace mindspore 171 172 #endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_ALG_H_ 173