1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "sched/interval.h"
17
18 #include "core/dependence_manager.h"
19 #include "eu/execute_unit.h"
20 #include "dfx/trace/ffrt_trace.h"
21
22 namespace ffrt {
Update(uint64_t deadlineUs)23 void Deadline::Update(uint64_t deadlineUs)
24 {
25 if (deadlineUs != ToUs()) {
26 deadlineNs = deadlineUs < 1 ? 1 : deadlineUs * 1000;
27 }
28
29 absDeadlineNs = deadlineNs + AbsNowNs();
30
31 FFRT_LOGI("Deadline %lu Update %lu Abs %lu", deadlineUs, deadlineNs, absDeadlineNs);
32 }
33
AbsNowNs()34 uint64_t Deadline::AbsNowNs()
35 {
36 return std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now().time_since_epoch())
37 .count();
38 }
39
PerfCtrl(const QoS & qos)40 PerfCtrl::PerfCtrl(const QoS& qos) : qos(qos)
41 {
42 if (qos == qos_inherit) {
43 FFRT_LOGW("Invalid Thread Group");
44 return;
45 }
46
47 tg = ExecuteUnit::Instance().BindTG(DevType::CPU, this->qos);
48 }
49
~PerfCtrl()50 PerfCtrl::~PerfCtrl()
51 {
52 if (tg) {
53 tg = nullptr;
54 ExecuteUnit::Instance().UnbindTG(DevType::CPU, qos);
55 }
56 }
57
Update(bool force)58 void PerfCtrl::Update(bool force)
59 {
60 if (!force && predUtil == curUtil) {
61 FFRT_LOGW("Predict Util Same as Current Util %lu", predUtil);
62 return;
63 }
64
65 curUtil = predUtil;
66
67 if (tg) {
68 tg->UpdateUitl(curUtil);
69 }
70 }
71
Update(uint64_t deadlineNs,uint64_t load,bool force)72 void PerfCtrl::Update(uint64_t deadlineNs, uint64_t load, bool force)
73 {
74 if (deadlineNs == 0) {
75 deadlineNs = 1;
76 }
77 predUtil = (load << SCHED_CAPACITY_SHIFT) / deadlineNs;
78 if (predUtil > SCHED_MAX_CAPACITY) {
79 FFRT_LOGW("Predict Util %lu Exceeds Max Capacity", predUtil);
80 predUtil = SCHED_MAX_CAPACITY;
81 }
82
83 FFRT_LOGI("Update Load %lu, Deadline %lu, Util %lu\n", load, deadlineNs, predUtil);
84
85 Update(force);
86 }
87
UpdateTotalLoad(uint64_t load)88 void IntervalLoadPredictor::UpdateTotalLoad(uint64_t load)
89 {
90 totalLoad.UpdateLoad(load);
91 }
92
UpdateCPLoad(uint64_t load)93 void IntervalLoadPredictor::UpdateCPLoad(uint64_t load)
94 {
95 if (cpLoadIndex + 1 > cpLoad.size()) {
96 cpLoad.resize(cpLoadIndex + 1);
97 }
98
99 cpLoad[cpLoadIndex++].UpdateLoad(load);
100 }
101
GetTotalLoad()102 uint64_t IntervalLoadPredictor::GetTotalLoad()
103 {
104 return totalLoad.GetPredictLoad();
105 }
106
GetCPLoad()107 uint64_t IntervalLoadPredictor::GetCPLoad()
108 {
109 uint64_t load = cpLoad[cpLoadIndex].GetPredictLoad();
110 if (load == 0) {
111 return 0UL;
112 }
113
114 uint64_t predictLoad = totalLoad.GetPredictLoad();
115 return (predictLoad < load) ? 0 : (predictLoad - load);
116 }
117
DefaultInterval(uint64_t deadlineUs,const QoS & qos)118 DefaultInterval::DefaultInterval(uint64_t deadlineUs, const QoS& qos) : Interval(deadlineUs, qos), lt(*this), ctrl(qos)
119 {
120 ctrl.SetWindowSize(Ddl().ToNs());
121 }
122
~DefaultInterval()123 DefaultInterval::~DefaultInterval()
124 {
125 std::unique_lock lock(mutex);
126 ctrl.Update(1, 0, true);
127 }
128
Begin()129 int DefaultInterval::Begin()
130 {
131 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalBegin);
132 std::unique_lock lock(mutex);
133
134 if (Enabled()) {
135 FFRT_LOGE("interval already begin\n");
136 return -1;
137 }
138
139 if (ctrl.isBusy()) {
140 FFRT_LOGE("qos interval is busy, please retry later\n");
141 return -1;
142 }
143
144 enabled = true;
145
146 lt.Begin();
147
148 ctrl.Update(Ddl().ToNs(), lp.GetTotalLoad(), true);
149 lp.ResetCPIndex();
150
151 return 0;
152 }
153
Update(uint64_t deadlineUs)154 void DefaultInterval::Update(uint64_t deadlineUs)
155 {
156 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalUpdate);
157 std::unique_lock lock(mutex);
158
159 if (!Enabled()) {
160 return;
161 }
162
163 Ddl().Update(deadlineUs);
164 ctrl.SetWindowSize(Ddl().ToNs());
165 }
166
End()167 void DefaultInterval::End()
168 {
169 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalEnd);
170 std::unique_lock lock(mutex);
171
172 if (!Enabled()) {
173 return;
174 }
175
176 enabled = false;
177
178 lp.UpdateTotalLoad(lt.GetLoad());
179
180 lt.End();
181 }
182
CheckPoint()183 void DefaultInterval::CheckPoint()
184 {
185 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalCheckPoint);
186 std::unique_lock lock(mutex);
187
188 if (!Enabled()) {
189 return;
190 }
191
192 ctrl.Update(Ddl().LeftNs(), lp.GetCPLoad());
193 lp.UpdateCPLoad(lt.GetLoad());
194 }
195
Join()196 void DefaultInterval::Join()
197 {
198 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalJoin);
199 std::unique_lock lock(mutex);
200 if (!ctrl.Join()) {
201 FFRT_LOGE("Failed to Join Thread %d", ThreadGroup::GetTID());
202 }
203 }
204
Leave()205 void DefaultInterval::Leave()
206 {
207 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalLeave);
208 std::unique_lock lock(mutex);
209 if (!ctrl.Leave()) {
210 FFRT_LOGE("Failed to Leave Thread %d", ThreadGroup::GetTID());
211 }
212 }
213
UpdateTaskSwitch(TaskSwitchState state)214 void DefaultInterval::UpdateTaskSwitch(TaskSwitchState state)
215 {
216 FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalUpdateTaskSwitch);
217 std::unique_lock lock(mutex);
218
219 switch (state) {
220 case TaskSwitchState::BEGIN:
221 ctrl.Update(true);
222 break;
223 case TaskSwitchState::UPDATE:
224 ctrl.Update();
225 break;
226 case TaskSwitchState::END:
227 ctrl.clear();
228 ctrl.Update(true);
229 break;
230 default:
231 break;
232 }
233
234 lt.Record(state);
235 }
236 } // namespace ffrt
237