1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "sched/load_tracking.h"
17
18 #include <unordered_set>
19 #include <unordered_map>
20
21 #include <unistd.h>
22
23 #include "core/entity.h"
24 #include "core/dependence_manager.h"
25 #include "sched/interval.h"
26
27 namespace ffrt {
28 #define perf_mmap_read_current() (static_cast<uint64_t>(0))
29 static UserSpaceLoadRecord uRecord;
30
UpdateTaskSwitch(TaskCtx * prev,TaskCtx * next)31 void UserSpaceLoadRecord::UpdateTaskSwitch(TaskCtx* prev, TaskCtx* next)
32 {
33 if (!uRecord.Enable() || (!prev && !next)) {
34 return;
35 }
36
37 auto getIntersect = [](const std::unordered_set<Interval*>& thisSet,
38 const std::unordered_set<Interval*>& otherSet) {
39 std::unordered_set<Interval*> set;
40 for (auto it : thisSet) {
41 if (otherSet.find(it) != otherSet.end()) {
42 set.insert(it);
43 }
44 }
45 return set;
46 };
47
48 auto updateLt = [](const std::unordered_set<Interval*>& set, const std::unordered_set<Interval*>& intersetSet,
49 TaskSwitchState state) {
50 for (auto it : set) {
51 if (intersetSet.find(it) != intersetSet.end()) {
52 continue;
53 }
54 it->UpdateTaskSwitch(state);
55 }
56 };
57
58 std::unordered_set<Interval*> intersectSet;
59 if (prev && next) {
60 intersectSet = getIntersect(prev->relatedIntervals, next->relatedIntervals);
61 for (auto it : intersectSet) {
62 it->UpdateTaskSwitch(TaskSwitchState::UPDATE);
63 }
64 }
65
66 if (prev) {
67 updateLt(prev->relatedIntervals, intersectSet, TaskSwitchState::END);
68 }
69
70 if (next) {
71 updateLt(next->relatedIntervals, intersectSet, TaskSwitchState::BEGIN);
72 }
73 }
74
BeginImpl()75 void KernelLoadTracking::BeginImpl()
76 {
77 it.Ctrl().Begin();
78 }
79
EndImpl()80 void KernelLoadTracking::EndImpl()
81 {
82 it.Ctrl().End();
83 }
84
GetLoadImpl()85 uint64_t KernelLoadTracking::GetLoadImpl()
86 {
87 return it.Ctrl().GetLoad();
88 }
89
90 struct UserSpaceLoadTracking::HistPoint {
91 TaskSwitchRecord* record;
92 TaskSwitchState state;
93 double load;
94 std::chrono::time_point<std::chrono::steady_clock> tp;
95 };
96
UserSpaceLoadTracking(DefaultInterval & it)97 UserSpaceLoadTracking::UserSpaceLoadTracking(DefaultInterval& it) : LoadTracking<UserSpaceLoadTracking>(it)
98 {
99 uRecord.SetEnable(true);
100 }
101
BeginImpl()102 void UserSpaceLoadTracking::BeginImpl()
103 {
104 auto ctx = ExecuteCtx::Cur();
105 auto task = ctx->task ? ctx->task : DependenceManager::Root();
106 if (task->IsRoot() || it.Qos() == task->qos) {
107 task->relatedIntervals.insert(&it);
108 }
109 }
110
EndImpl()111 void UserSpaceLoadTracking::EndImpl()
112 {
113 DependenceManager::Root()->relatedIntervals.erase(&it);
114 records.clear();
115 }
116
RecordImpl(TaskSwitchState state)117 void UserSpaceLoadTracking::RecordImpl(TaskSwitchState state)
118 {
119 RecordSwitchPoint(state);
120 }
121
GetLoadImpl()122 uint64_t UserSpaceLoadTracking::GetLoadImpl()
123 {
124 auto histList = CollectHistList();
125
126 double totalLoad = 0;
127 std::unordered_map<TaskSwitchRecord*, double> filter;
128
129 auto updateTotalLoad = [&](size_t i) {
130 if (filter.size() == 0) {
131 return;
132 }
133
134 double delta = (histList[i].tp - histList[i - 1].tp).count();
135 if (delta <= 0) {
136 return;
137 }
138
139 double maxLps = 0;
140 for (const auto& f : filter) {
141 maxLps = std::max(maxLps, f.second);
142 }
143
144 totalLoad += maxLps * delta;
145 };
146
147 for (size_t i = 0; i < histList.size(); ++i) {
148 updateTotalLoad(i);
149
150 switch (histList[i].state) {
151 case TaskSwitchState::BEGIN:
152 filter[histList[i].record] = histList[i].load;
153 break;
154 default:
155 filter.erase(histList[i].record);
156 break;
157 }
158 }
159
160 return static_cast<uint64_t>(totalLoad);
161 }
162
RecordSwitchPoint(TaskSwitchState state,bool force)163 void UserSpaceLoadTracking::RecordSwitchPoint(TaskSwitchState state, bool force)
164 {
165 auto& record = records[std::this_thread::get_id()];
166 auto tp = std::chrono::steady_clock::now();
167 if (state == TaskSwitchState::UPDATE && !force && !record.empty() &&
168 tp - record.back().tp < std::chrono::milliseconds(1)) {
169 return;
170 }
171
172 record.emplace_back(TaskSwitchRecord {perf_mmap_read_current(), state, tp});
173 }
174
CollectHistList()175 std::vector<UserSpaceLoadTracking::HistPoint> UserSpaceLoadTracking::CollectHistList()
176 {
177 std::vector<HistPoint> histList;
178
179 auto collectHist = [&histList](RecordList::iterator& it, size_t index, size_t size) {
180 auto& cur = *it;
181
182 // deal task begin
183 if (cur.state != TaskSwitchState::END && index + 1 < size) {
184 const auto& next = *++it;
185 double load = static_cast<double>(next.load - cur.load) / static_cast<double>((next.tp - cur.tp).count());
186 histList.emplace_back(HistPoint {&cur, TaskSwitchState::BEGIN, load, cur.tp});
187 --it;
188 }
189
190 // deal task end
191 if (cur.state != TaskSwitchState::BEGIN && index > 0) {
192 auto& prev = *--it;
193 histList.emplace_back(HistPoint {&prev, TaskSwitchState::END, 0, cur.tp});
194 ++it;
195 }
196 };
197
198 RecordSwitchPoint(TaskSwitchState::UPDATE, true);
199
200 for (auto& record : records) {
201 auto& list = record.second;
202
203 size_t index = 0;
204 size_t size = list.size();
205 for (auto it = list.begin(); it != list.end(); ++it, ++index) {
206 collectHist(it, index, size);
207 }
208 }
209
210 std::sort(histList.begin(), histList.end(),
211 [](const HistPoint& x, const HistPoint& y) { return x.tp < y.tp || ((x.tp == y.tp) && (x.state > y.state)); });
212
213 return histList;
214 }
215 }; // namespace ffrt
216