• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "los_sched_pri.h"
33 #include "los_hw_pri.h"
34 #include "los_task_pri.h"
35 #include "los_swtmr_pri.h"
36 #include "los_process_pri.h"
37 #include "los_arch_mmu.h"
38 #include "los_hook.h"
39 #ifdef LOSCFG_KERNEL_CPUP
40 #include "los_cpup_pri.h"
41 #endif
42 #include "los_hw_tick_pri.h"
43 #include "los_tick_pri.h"
44 #ifdef LOSCFG_BASE_CORE_TSK_MONITOR
45 #include "los_stackinfo_pri.h"
46 #endif
47 #include "los_mp.h"
48 
49 SchedRunqueue g_schedRunqueue[LOSCFG_KERNEL_CORE_NUM];
50 
SchedNextExpireTimeSet(UINT32 responseID,UINT64 taskEndTime,UINT32 oldResponseID)51 STATIC INLINE VOID SchedNextExpireTimeSet(UINT32 responseID, UINT64 taskEndTime, UINT32 oldResponseID)
52 {
53     SchedRunqueue *rq = OsSchedRunqueue();
54     BOOL isTimeSlice = FALSE;
55     UINT64 currTime = OsGetCurrSchedTimeCycle();
56     UINT64 nextExpireTime = OsGetSortLinkNextExpireTime(&rq->timeoutQueue, currTime, OS_TICK_RESPONSE_PRECISION);
57 
58     rq->schedFlag &= ~INT_PEND_TICK;
59     if (rq->responseID == oldResponseID) {
60         /* This time has expired, and the next time the theory has expired is infinite */
61         rq->responseTime = OS_SCHED_MAX_RESPONSE_TIME;
62     }
63 
64     /* The current thread's time slice has been consumed, but the current system lock task cannot
65      * trigger the schedule to release the CPU
66      */
67     if ((nextExpireTime > taskEndTime) && ((nextExpireTime - taskEndTime) > OS_SCHED_MINI_PERIOD)) {
68         nextExpireTime = taskEndTime;
69         isTimeSlice = TRUE;
70     }
71 
72     if ((rq->responseTime <= nextExpireTime) ||
73         ((rq->responseTime - nextExpireTime) < OS_TICK_RESPONSE_PRECISION)) {
74         return;
75     }
76 
77     if (isTimeSlice) {
78         /* The expiration time of the current system is the thread's slice expiration time */
79         rq->responseID = responseID;
80     } else {
81         rq->responseID = OS_INVALID_VALUE;
82     }
83 
84     UINT64 nextResponseTime = nextExpireTime - currTime;
85     rq->responseTime = currTime + HalClockTickTimerReload(nextResponseTime);
86 }
87 
OsSchedExpireTimeUpdate(VOID)88 VOID OsSchedExpireTimeUpdate(VOID)
89 {
90     UINT32 intSave;
91     if (!OS_SCHEDULER_ACTIVE || OS_INT_ACTIVE) {
92         OsSchedRunqueuePendingSet();
93         return;
94     }
95 
96     LosTaskCB *runTask = OsCurrTaskGet();
97     SCHEDULER_LOCK(intSave);
98     UINT64 deadline = runTask->ops->deadlineGet(runTask);
99     SCHEDULER_UNLOCK(intSave);
100     SchedNextExpireTimeSet(runTask->taskID, deadline, runTask->taskID);
101 }
102 
SchedTimeoutTaskWake(SchedRunqueue * rq,UINT64 currTime,LosTaskCB * taskCB,BOOL * needSched)103 STATIC INLINE VOID SchedTimeoutTaskWake(SchedRunqueue *rq, UINT64 currTime, LosTaskCB *taskCB, BOOL *needSched)
104 {
105 #ifndef LOSCFG_SCHED_DEBUG
106     (VOID)currTime;
107 #endif
108 
109     LOS_SpinLock(&g_taskSpin);
110     UINT16 tempStatus = taskCB->taskStatus;
111     if (tempStatus & (OS_TASK_STATUS_PENDING | OS_TASK_STATUS_DELAY)) {
112         taskCB->taskStatus &= ~(OS_TASK_STATUS_PENDING | OS_TASK_STATUS_PEND_TIME | OS_TASK_STATUS_DELAY);
113         if (tempStatus & OS_TASK_STATUS_PENDING) {
114             taskCB->taskStatus |= OS_TASK_STATUS_TIMEOUT;
115             LOS_ListDelete(&taskCB->pendList);
116             taskCB->taskMux = NULL;
117             OsTaskWakeClearPendMask(taskCB);
118         }
119 
120         if (!(tempStatus & OS_TASK_STATUS_SUSPENDED)) {
121 #ifdef LOSCFG_SCHED_DEBUG
122             taskCB->schedStat.pendTime += currTime - taskCB->startTime;
123             taskCB->schedStat.pendCount++;
124 #endif
125             taskCB->ops->enqueue(rq, taskCB);
126             *needSched = TRUE;
127         }
128     }
129 
130     LOS_SpinUnlock(&g_taskSpin);
131 }
132 
SchedTimeoutQueueScan(SchedRunqueue * rq)133 STATIC INLINE BOOL SchedTimeoutQueueScan(SchedRunqueue *rq)
134 {
135     BOOL needSched = FALSE;
136     SortLinkAttribute *timeoutQueue = &rq->timeoutQueue;
137     LOS_DL_LIST *listObject = &timeoutQueue->sortLink;
138     /*
139      * When task is pended with timeout, the task block is on the timeout sortlink
140      * (per cpu) and ipc(mutex,sem and etc.)'s block at the same time, it can be waken
141      * up by either timeout or corresponding ipc it's waiting.
142      *
143      * Now synchronize sortlink procedure is used, therefore the whole task scan needs
144      * to be protected, preventing another core from doing sortlink deletion at same time.
145      */
146     LOS_SpinLock(&timeoutQueue->spinLock);
147 
148     if (LOS_ListEmpty(listObject)) {
149         LOS_SpinUnlock(&timeoutQueue->spinLock);
150         return needSched;
151     }
152 
153     SortLinkList *sortList = LOS_DL_LIST_ENTRY(listObject->pstNext, SortLinkList, sortLinkNode);
154     UINT64 currTime = OsGetCurrSchedTimeCycle();
155     while (sortList->responseTime <= currTime) {
156         LosTaskCB *taskCB = LOS_DL_LIST_ENTRY(sortList, LosTaskCB, sortList);
157         OsDeleteNodeSortLink(timeoutQueue, &taskCB->sortList);
158         LOS_SpinUnlock(&timeoutQueue->spinLock);
159 
160         SchedTimeoutTaskWake(rq, currTime, taskCB, &needSched);
161 
162         LOS_SpinLock(&timeoutQueue->spinLock);
163         if (LOS_ListEmpty(listObject)) {
164             break;
165         }
166 
167         sortList = LOS_DL_LIST_ENTRY(listObject->pstNext, SortLinkList, sortLinkNode);
168     }
169 
170     LOS_SpinUnlock(&timeoutQueue->spinLock);
171 
172     return needSched;
173 }
174 
OsSchedTick(VOID)175 VOID OsSchedTick(VOID)
176 {
177     SchedRunqueue *rq = OsSchedRunqueue();
178 
179     if (rq->responseID == OS_INVALID_VALUE) {
180         if (SchedTimeoutQueueScan(rq)) {
181             LOS_MpSchedule(OS_MP_CPU_ALL);
182             rq->schedFlag |= INT_PEND_RESCH;
183         }
184     }
185     rq->schedFlag |= INT_PEND_TICK;
186     rq->responseTime = OS_SCHED_MAX_RESPONSE_TIME;
187 }
188 
OsSchedResponseTimeReset(UINT64 responseTime)189 VOID OsSchedResponseTimeReset(UINT64 responseTime)
190 {
191     OsSchedRunqueue()->responseTime = responseTime;
192 }
193 
OsSchedRunqueueInit(VOID)194 VOID OsSchedRunqueueInit(VOID)
195 {
196     if (ArchCurrCpuid() != 0) {
197         return;
198     }
199 
200     for (UINT16 index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
201         SchedRunqueue *rq = OsSchedRunqueueByID(index);
202         OsSortLinkInit(&rq->timeoutQueue);
203         rq->responseTime = OS_SCHED_MAX_RESPONSE_TIME;
204     }
205 }
206 
OsSchedRunqueueIdleInit(UINT32 idleTaskID)207 VOID OsSchedRunqueueIdleInit(UINT32 idleTaskID)
208 {
209     SchedRunqueue *rq = OsSchedRunqueue();
210     rq->idleTaskID = idleTaskID;
211 }
212 
OsSchedInit(VOID)213 UINT32 OsSchedInit(VOID)
214 {
215     for (UINT16 cpuId = 0; cpuId < LOSCFG_KERNEL_CORE_NUM; cpuId++) {
216         HPFSchedPolicyInit(OsSchedRunqueueByID(cpuId));
217     }
218 
219 #ifdef LOSCFG_SCHED_TICK_DEBUG
220     UINT32 ret = OsSchedDebugInit();
221     if (ret != LOS_OK) {
222         return ret;
223     }
224 #endif
225     return LOS_OK;
226 }
227 
228 /*
229  * If the return value greater than 0,  task1 has a lower priority than task2.
230  * If the return value less than 0, task1 has a higher priority than task2.
231  * If the return value is 0, task1 and task2 have the same priority.
232  */
OsSchedParamCompare(const LosTaskCB * task1,const LosTaskCB * task2)233 INT32 OsSchedParamCompare(const LosTaskCB *task1, const LosTaskCB *task2)
234 {
235     SchedHPF *rp1 = (SchedHPF *)&task1->sp;
236     SchedHPF *rp2 = (SchedHPF *)&task2->sp;
237 
238     if (rp1->policy == rp2->policy) {
239         return task1->ops->schedParamCompare(&task1->sp, &task2->sp);
240     }
241 
242     if (rp1->policy == LOS_SCHED_IDLE) {
243         return 1;
244     } else if (rp2->policy == LOS_SCHED_IDLE) {
245         return -1;
246     }
247     return 0;
248 }
249 
OsSchedParamInit(LosTaskCB * taskCB,UINT16 policy,const SchedParam * parentParam,const TSK_INIT_PARAM_S * param)250 UINT32 OsSchedParamInit(LosTaskCB *taskCB, UINT16 policy, const SchedParam *parentParam, const TSK_INIT_PARAM_S *param)
251 {
252     switch (policy) {
253         case LOS_SCHED_FIFO:
254         case LOS_SCHED_RR:
255             HPFTaskSchedParamInit(taskCB, policy, parentParam, param);
256             break;
257         case LOS_SCHED_IDLE:
258             IdleTaskSchedParamInit(taskCB);
259             break;
260         default:
261             return LOS_NOK;
262     }
263 
264     return LOS_OK;
265 }
266 
OsSchedProcessDefaultSchedParamGet(UINT16 policy,SchedParam * param)267 VOID OsSchedProcessDefaultSchedParamGet(UINT16 policy, SchedParam *param)
268 {
269     switch (policy) {
270         case LOS_SCHED_FIFO:
271         case LOS_SCHED_RR:
272             HPFProcessDefaultSchedParamGet(param);
273             break;
274         case LOS_SCHED_IDLE:
275         default:
276             PRINT_ERR("Invalid process-level scheduling policy, %u\n", policy);
277             break;
278     }
279     return;
280 }
281 
TopTaskGet(SchedRunqueue * rq)282 STATIC LosTaskCB *TopTaskGet(SchedRunqueue *rq)
283 {
284     LosTaskCB *newTask = HPFRunqueueTopTaskGet(rq->hpfRunqueue);
285 
286     if (newTask == NULL) {
287         newTask = OS_TCB_FROM_TID(rq->idleTaskID);
288     }
289 
290     newTask->ops->start(rq, newTask);
291     return newTask;
292 }
293 
OsSchedStart(VOID)294 VOID OsSchedStart(VOID)
295 {
296     UINT32 cpuid = ArchCurrCpuid();
297     UINT32 intSave;
298 
299     PRINTK("cpu %d entering scheduler\n", cpuid);
300 
301     SCHEDULER_LOCK(intSave);
302 
303     OsTickStart();
304 
305     SchedRunqueue *rq = OsSchedRunqueue();
306     LosTaskCB *newTask = TopTaskGet(rq);
307     newTask->taskStatus |= OS_TASK_STATUS_RUNNING;
308 
309 #ifdef LOSCFG_KERNEL_SMP
310     /*
311      * attention: current cpu needs to be set, in case first task deletion
312      * may fail because this flag mismatch with the real current cpu.
313      */
314     newTask->currCpu = cpuid;
315 #endif
316 
317     OsCurrTaskSet((VOID *)newTask);
318 
319     newTask->startTime = OsGetCurrSchedTimeCycle();
320 
321     OsSwtmrResponseTimeReset(newTask->startTime);
322 
323     /* System start schedule */
324     OS_SCHEDULER_SET(cpuid);
325 
326     rq->responseID = OS_INVALID;
327     UINT64 deadline = newTask->ops->deadlineGet(newTask);
328     SchedNextExpireTimeSet(newTask->taskID, deadline, OS_INVALID);
329     OsTaskContextLoad(newTask);
330 }
331 
332 #ifdef LOSCFG_KERNEL_SMP
OsSchedToUserReleaseLock(VOID)333 VOID OsSchedToUserReleaseLock(VOID)
334 {
335     /* The scheduling lock needs to be released before returning to user mode */
336     LOCKDEP_CHECK_OUT(&g_taskSpin);
337     ArchSpinUnlock(&g_taskSpin.rawLock);
338 
339     OsSchedUnlock();
340 }
341 #endif
342 
343 #ifdef LOSCFG_BASE_CORE_TSK_MONITOR
TaskStackCheck(LosTaskCB * runTask,LosTaskCB * newTask)344 STATIC VOID TaskStackCheck(LosTaskCB *runTask, LosTaskCB *newTask)
345 {
346     if (!OS_STACK_MAGIC_CHECK(runTask->topOfStack)) {
347         LOS_Panic("CURRENT task ID: %s:%d stack overflow!\n", runTask->taskName, runTask->taskID);
348     }
349 
350     if (((UINTPTR)(newTask->stackPointer) <= newTask->topOfStack) ||
351         ((UINTPTR)(newTask->stackPointer) > (newTask->topOfStack + newTask->stackSize))) {
352         LOS_Panic("HIGHEST task ID: %s:%u SP error! StackPointer: %p TopOfStack: %p\n",
353                   newTask->taskName, newTask->taskID, newTask->stackPointer, newTask->topOfStack);
354     }
355 }
356 #endif
357 
SchedSwitchCheck(LosTaskCB * runTask,LosTaskCB * newTask)358 STATIC INLINE VOID SchedSwitchCheck(LosTaskCB *runTask, LosTaskCB *newTask)
359 {
360 #ifdef LOSCFG_BASE_CORE_TSK_MONITOR
361     TaskStackCheck(runTask, newTask);
362 #endif /* LOSCFG_BASE_CORE_TSK_MONITOR */
363     OsHookCall(LOS_HOOK_TYPE_TASK_SWITCHEDIN, newTask, runTask);
364 }
365 
SchedTaskSwitch(SchedRunqueue * rq,LosTaskCB * runTask,LosTaskCB * newTask)366 STATIC VOID SchedTaskSwitch(SchedRunqueue *rq, LosTaskCB *runTask, LosTaskCB *newTask)
367 {
368     SchedSwitchCheck(runTask, newTask);
369 
370     runTask->taskStatus &= ~OS_TASK_STATUS_RUNNING;
371     newTask->taskStatus |= OS_TASK_STATUS_RUNNING;
372 
373 #ifdef LOSCFG_KERNEL_SMP
374     /* mask new running task's owner processor */
375     runTask->currCpu = OS_TASK_INVALID_CPUID;
376     newTask->currCpu = ArchCurrCpuid();
377 #endif
378 
379     OsCurrTaskSet((VOID *)newTask);
380 #ifdef LOSCFG_KERNEL_VM
381     if (newTask->archMmu != runTask->archMmu) {
382         LOS_ArchMmuContextSwitch((LosArchMmu *)newTask->archMmu);
383     }
384 #endif
385 
386 #ifdef LOSCFG_KERNEL_CPUP
387     OsCpupCycleEndStart(runTask->taskID, newTask->taskID);
388 #endif
389 
390 #ifdef LOSCFG_SCHED_DEBUG
391     UINT64 waitStartTime = newTask->startTime;
392 #endif
393     if (runTask->taskStatus & OS_TASK_STATUS_READY) {
394         /* When a thread enters the ready queue, its slice of time is updated */
395         newTask->startTime = runTask->startTime;
396     } else {
397         /* The currently running task is blocked */
398         newTask->startTime = OsGetCurrSchedTimeCycle();
399         /* The task is in a blocking state and needs to update its time slice before pend */
400         runTask->ops->timeSliceUpdate(rq, runTask, newTask->startTime);
401 
402         if (runTask->taskStatus & (OS_TASK_STATUS_PEND_TIME | OS_TASK_STATUS_DELAY)) {
403             OsSchedTimeoutQueueAdd(runTask, runTask->startTime + runTask->waitTime);
404         }
405     }
406 
407     UINT64 deadline = newTask->ops->deadlineGet(newTask);
408     SchedNextExpireTimeSet(newTask->taskID, deadline, runTask->taskID);
409 
410 #ifdef LOSCFG_SCHED_DEBUG
411     newTask->schedStat.waitSchedTime += newTask->startTime - waitStartTime;
412     newTask->schedStat.waitSchedCount++;
413     runTask->schedStat.runTime = runTask->schedStat.allRuntime;
414     runTask->schedStat.switchCount++;
415 #endif
416     /* do the task context switch */
417     OsTaskSchedule(newTask, runTask);
418 }
419 
OsSchedIrqEndCheckNeedSched(VOID)420 VOID OsSchedIrqEndCheckNeedSched(VOID)
421 {
422     SchedRunqueue *rq = OsSchedRunqueue();
423     LosTaskCB *runTask = OsCurrTaskGet();
424 
425     runTask->ops->timeSliceUpdate(rq, runTask, OsGetCurrSchedTimeCycle());
426 
427     if (OsPreemptable() && (rq->schedFlag & INT_PEND_RESCH)) {
428         rq->schedFlag &= ~INT_PEND_RESCH;
429 
430         LOS_SpinLock(&g_taskSpin);
431 
432         runTask->ops->enqueue(rq, runTask);
433 
434         LosTaskCB *newTask = TopTaskGet(rq);
435         if (runTask != newTask) {
436             SchedTaskSwitch(rq, runTask, newTask);
437             LOS_SpinUnlock(&g_taskSpin);
438             return;
439         }
440 
441         LOS_SpinUnlock(&g_taskSpin);
442     }
443 
444     if (rq->schedFlag & INT_PEND_TICK) {
445         OsSchedExpireTimeUpdate();
446     }
447 }
448 
OsSchedResched(VOID)449 VOID OsSchedResched(VOID)
450 {
451     LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));
452     SchedRunqueue *rq = OsSchedRunqueue();
453 #ifdef LOSCFG_KERNEL_SMP
454     LOS_ASSERT(rq->taskLockCnt == 1);
455 #else
456     LOS_ASSERT(rq->taskLockCnt == 0);
457 #endif
458 
459     rq->schedFlag &= ~INT_PEND_RESCH;
460     LosTaskCB *runTask = OsCurrTaskGet();
461     LosTaskCB *newTask = TopTaskGet(rq);
462     if (runTask == newTask) {
463         return;
464     }
465 
466     SchedTaskSwitch(rq, runTask, newTask);
467 }
468 
LOS_Schedule(VOID)469 VOID LOS_Schedule(VOID)
470 {
471     UINT32 intSave;
472     LosTaskCB *runTask = OsCurrTaskGet();
473     SchedRunqueue *rq = OsSchedRunqueue();
474 
475     if (OS_INT_ACTIVE) {
476         OsSchedRunqueuePendingSet();
477         return;
478     }
479 
480     if (!OsPreemptable()) {
481         return;
482     }
483 
484     /*
485      * trigger schedule in task will also do the slice check
486      * if necessary, it will give up the timeslice more in time.
487      * otherwise, there's no other side effects.
488      */
489     SCHEDULER_LOCK(intSave);
490 
491     runTask->ops->timeSliceUpdate(rq, runTask, OsGetCurrSchedTimeCycle());
492 
493     /* add run task back to ready queue */
494     runTask->ops->enqueue(rq, runTask);
495 
496     /* reschedule to new thread */
497     OsSchedResched();
498 
499     SCHEDULER_UNLOCK(intSave);
500 }
501 
SchedLockPendFindPosSub(const LosTaskCB * runTask,const LOS_DL_LIST * lockList)502 STATIC INLINE LOS_DL_LIST *SchedLockPendFindPosSub(const LosTaskCB *runTask, const LOS_DL_LIST *lockList)
503 {
504     LosTaskCB *pendedTask = NULL;
505 
506     LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, lockList, LosTaskCB, pendList) {
507         INT32 ret = OsSchedParamCompare(pendedTask, runTask);
508         if (ret < 0) {
509             continue;
510         } else if (ret > 0) {
511             return &pendedTask->pendList;
512         } else {
513             return pendedTask->pendList.pstNext;
514         }
515     }
516     return NULL;
517 }
518 
OsSchedLockPendFindPos(const LosTaskCB * runTask,LOS_DL_LIST * lockList)519 LOS_DL_LIST *OsSchedLockPendFindPos(const LosTaskCB *runTask, LOS_DL_LIST *lockList)
520 {
521     if (LOS_ListEmpty(lockList)) {
522         return lockList;
523     }
524 
525     LosTaskCB *pendedTask1 = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(lockList));
526     INT32 ret = OsSchedParamCompare(pendedTask1, runTask);
527     if (ret > 0) {
528         return lockList->pstNext;
529     }
530 
531     LosTaskCB *pendedTask2 = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_LAST(lockList));
532     ret = OsSchedParamCompare(pendedTask2, runTask);
533     if (ret <= 0) {
534         return lockList;
535     }
536 
537     return SchedLockPendFindPosSub(runTask, lockList);
538 }
539