• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2  * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019. All rights reserved.
3  * Description: Scheduler Private HeadFile
4  * Author: Huawei LiteOS Team
5  * Create: 2018-09-10
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  * conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
11  * of conditions and the following disclaimer in the documentation and/or other materials
12  * provided with the distribution.
13  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
14  * to endorse or promote products derived from this software without specific prior written
15  * permission.
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  * --------------------------------------------------------------------------- */
28 
29 #ifndef _LOS_SCHED_PRI_H
30 #define _LOS_SCHED_PRI_H
31 
32 #include "los_task_base.h"
33 #include "los_priqueue_pri.h"
34 #include "los_percpu_pri.h"
35 #include "los_hwi.h"
36 #include "los_spinlock_pri.h"
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif /* __cplusplus */
41 
42 extern UINT32 g_taskScheduled;
43 
44 /*
45  * Schedule flag, one bit represents one core.
46  * This flag is used to prevent kernel scheduling before OSStartToRun.
47  */
48 #define OS_SCHEDULER_SET(cpuid) do {     \
49     g_taskScheduled |= (1U << (cpuid));  \
50 } while (0)
51 
52 #define OS_SCHEDULER_CLR(cpuid) do {     \
53     g_taskScheduled &= ~(1U << (cpuid)); \
54 } while (0)
55 
56 #define OS_SCHEDULER_ACTIVE ((g_taskScheduled & (1U << ArchCurrCpuid())) != 0)
57 
58 #define OS_SCHEDULER_ALL_ACTIVE (g_taskScheduled == LOSCFG_KERNEL_CPU_MASK)
59 
60 typedef enum {
61     INT_NO_RESCH = 0,   /* no needs to schedule */
62     INT_PEND_RESCH,     /* pending schedule flag */
63     INT_SUSPEND_DELETE_RESCH, /* pending flag in Int in suspend and delete interface */
64 } SchedFlag;
65 
66 /*
67  * This function simply picks the next task and switches to it.
68  * Current task needs to already be in the right state or the right
69  * queues it needs to be in.
70  */
71 extern VOID OsSchedResched(VOID);
72 
73 /*
74  * This function put the current task back to the ready queue and
75  * try to do the schedule. However, the schedule won't be definitely
76  * taken place while there're no other higher priority tasks or locked.
77  */
78 extern VOID OsSchedPreempt(VOID);
79 
80 #ifndef LOSCFG_SCHED_LATENCY
81 extern VOID OsSchedProcSchedFlag(VOID);
82 
OsSetSchedFlag(UINT32 schedFlag)83 STATIC INLINE VOID OsSetSchedFlag(UINT32 schedFlag)
84 {
85     /* Set schedule flag if preemption is disabled */
86     OsPercpuGet()->schedFlag = schedFlag;
87 }
88 
89 /*
90  * Just like OsSchedPreempt, except this function will do the OS_INT_ACTIVE
91  * check, in case the schedule taken place in the middle of an interrupt.
92  */
LOS_Schedule(VOID)93 STATIC INLINE VOID LOS_Schedule(VOID)
94 {
95     if (OS_INT_ACTIVE) {
96 #ifndef LOSCFG_KERNEL_SMP
97         if (OsPercpuGet()->schedFlag == INT_SUSPEND_DELETE_RESCH) {
98             return;
99         }
100 #endif
101         OsSetSchedFlag(INT_PEND_RESCH);
102         return;
103     }
104 
105     /*
106      * trigger schedule in task will also do the slice check
107      * if necessary, it will give up the timeslice more in time.
108      * otherwise, there's no other side effects.
109      */
110     OsSchedPreempt();
111 }
112 #else
113 /* There is no need to support latency sched interface after interrupt in Cortex-M */
OsSchedProcSchedFlag(VOID)114 STATIC INLINE VOID OsSchedProcSchedFlag(VOID)
115 {
116 }
117 
OsSetSchedFlag(UINT32 schedFlag)118 STATIC INLINE VOID OsSetSchedFlag(UINT32 schedFlag)
119 {
120     (VOID)schedFlag;
121 }
122 
LOS_Schedule(VOID)123 STATIC INLINE VOID LOS_Schedule(VOID)
124 {
125     /*
126      * trigger schedule in task will also do the slice check
127      * if necessary, it will give up the timeslice more in time.
128      * otherwise, there's no other side effects.
129      */
130     OsSchedPreempt();
131 }
132 #endif
133 
134 /* Check if preemptible with counter flag */
OsPreemptable(VOID)135 STATIC INLINE BOOL OsPreemptable(VOID)
136 {
137     /*
138      * Unlike OsPreemptableInSched, the int may be not disabled when OsPreemptable
139      * is called, needs manually disable interrupt, to prevent current task from
140      * being migrated to another core, and get the wrong preemptible status.
141      */
142     UINT32 intSave = LOS_IntLock();
143     BOOL preemptible = (OsPercpuGet()->taskLockCnt == 0);
144     if (!preemptible) {
145         OsSetSchedFlag(INT_PEND_RESCH);
146     }
147 
148     LOS_IntRestore(intSave);
149     return preemptible;
150 }
151 
OsPreemptableInSched(VOID)152 STATIC INLINE BOOL OsPreemptableInSched(VOID)
153 {
154     BOOL preemptible = FALSE;
155 
156 #ifdef LOSCFG_KERNEL_SMP
157     /*
158      * For smp systems, schedule must hold the task spinlock, and this counter
159      * will increase by 1 in that case.
160      */
161     preemptible = (OsPercpuGet()->taskLockCnt == 1);
162 
163 #else
164     preemptible = (OsPercpuGet()->taskLockCnt == 0);
165 #endif
166     if (!preemptible) {
167         OsSetSchedFlag(INT_PEND_RESCH);
168     }
169 
170     return preemptible;
171 }
172 
173 #ifdef LOSCFG_BASE_CORE_TIMESLICE
174 /*
175  * This API is used to check time slices. If the number of Ticks equals to the time for task switch,
176  * tasks are switched. Otherwise, the Tick counting continues.
177  */
178 extern VOID OsTimesliceCheck(VOID);
179 #endif
180 
181 extern VOID OsSchedWait(LosTaskCB *runTask, LOS_DL_LIST *list, UINT32 timeout);
182 extern VOID OsSchedWake(LosTaskCB *resumedTask);
183 extern VOID OsSchedPrioModify(LosTaskCB *taskCB, UINT16 priority);
184 extern UINT32 OsSchedSuspend(LosTaskCB *taskCB);
185 
186 #ifdef LOSCFG_SCHED_MQ
187 extern LosTaskCB *OsGetTopTaskByPriQueue(PriQueue *priQueue, UINT32 cpuId);
OsGetTopTask(VOID)188 STATIC INLINE LosTaskCB *OsGetTopTask(VOID)
189 {
190     return OsGetTopTaskByPriQueue(&g_priQueue[ArchCurrCpuid()], ArchCurrCpuid());
191 }
192 
193 extern VOID OsSchedLock(VOID);
194 extern VOID OsSchedUnlock(VOID);
195 extern SPIN_LOCK_S *OsSchedLockByTask(LosTaskCB *task);
196 extern UINT32 OsSchedPickCpu(LosTaskCB *task);
197 
OsSchedLockGlobal2Local(VOID)198 STATIC INLINE VOID OsSchedLockGlobal2Local(VOID)
199 {
200     OsSpinUnlockRaw(&g_taskSpin);
201     OsSpinLockRaw(&g_priQueue[ArchCurrCpuid()].lock);
202 }
203 
OsSchedLockLocal2Global(VOID)204 STATIC INLINE VOID OsSchedLockLocal2Global(VOID)
205 {
206     OsSpinUnlockRaw(&g_priQueue[ArchCurrCpuid()].lock);
207     OsSpinLockRaw(&g_taskSpin);
208 }
209 
OsSchedUnlockLocalRestore(UINT32 intSave)210 STATIC INLINE VOID OsSchedUnlockLocalRestore(UINT32 intSave)
211 {
212     LOS_SpinUnlockNoSched(&g_priQueue[ArchCurrCpuid()].lock);
213     LOS_IntRestore(intSave);
214 }
215 
OsSchedLockTwoRaw(UINT32 cpu1,UINT32 cpu2)216 STATIC INLINE VOID OsSchedLockTwoRaw(UINT32 cpu1, UINT32 cpu2)
217 {
218     if (cpu1 == cpu2) {
219         OsSpinLockRaw(&g_priQueue[cpu1].lock);
220         return;
221     }
222 
223     if (cpu1 < cpu2) {
224         OsSpinLockRaw(&g_priQueue[cpu1].lock);
225         OsSpinLockRaw(&g_priQueue[cpu2].lock);
226         return;
227     }
228 
229     /* cpu1 > cpu2 */
230     OsSpinLockRaw(&g_priQueue[cpu2].lock);
231     OsSpinLockRaw(&g_priQueue[cpu1].lock);
232 }
233 
OsSchedUnlockRemoteRaw(UINT32 cpu)234 STATIC INLINE VOID OsSchedUnlockRemoteRaw(UINT32 cpu)
235 {
236     if (cpu == ArchCurrCpuid()) {
237         return;
238     }
239 
240     OsSpinUnlockRaw(&g_priQueue[cpu].lock);
241 }
242 
243 /*
244  * Task set affinity may cause the ready task migrated to another cpu.
245  * When the scheduler use the global priqueue, this func will do nothing.
246  */
247 VOID OsSchedAffiChange(VOID *task);
248 
OsSchedResume(LosTaskCB * taskCB,UINT32 intSave)249 STATIC INLINE VOID OsSchedResume(LosTaskCB *taskCB, UINT32 intSave)
250 {
251     taskCB->taskExtStatus = OS_TASK_STATUS_ENQUE;
252     LOS_SpinUnlockNoSched(&g_taskSpin);
253 
254     taskCB->taskStatus |= OS_TASK_STATUS_READY;
255     OsPriQueueEnqueueProtect(&taskCB->pendList, taskCB->priority, PRI_QUEUE_TAIL);
256     taskCB->taskExtStatus &= ~OS_TASK_STATUS_ENQUE;
257 
258     LOS_IntRestore(intSave);
259 }
260 
OsSchedYield(LosTaskCB * runTask)261 STATIC INLINE UINT32 OsSchedYield(LosTaskCB *runTask)
262 {
263     UINT32 cpuId;
264 
265     runTask->taskExtStatus = OS_TASK_STATUS_ENQUE | OS_TASK_STATUS_SCHED;
266     OsSpinUnlockRaw(&g_taskSpin);
267 
268     cpuId = OsSchedPickCpu(runTask);
269 
270     OsSchedLockTwoRaw(ArchCurrCpuid(), cpuId);
271 
272 #ifdef LOSCFG_BASE_CORE_TIMESLICE
273     /* reset timeslice of yielded task */
274     runTask->timeSlice = 0;
275 #endif
276 
277     if (OsPriQueueIsEmpty(runTask->priority)) {
278         OsSchedUnlockRemoteRaw(cpuId);
279         runTask->taskExtStatus &= ~(OS_TASK_STATUS_ENQUE | OS_TASK_STATUS_SCHED);
280         return LOS_ERRNO_TSK_YIELD_NOT_ENOUGH_TASK;
281     }
282 
283     runTask->taskStatus |= OS_TASK_STATUS_READY;
284     runTask->currCpu = cpuId;
285     OsPriQueueSimpleEnqueue(&runTask->pendList, PRI_QUEUE_TAIL);
286     OsSchedUnlockRemoteRaw(cpuId);
287 
288     runTask->taskExtStatus &= ~OS_TASK_STATUS_ENQUE;
289     OsSchedResched();
290 
291     return LOS_OK;
292 }
293 
OsSchedPrioSet(LosTaskCB * taskCB,UINT16 priority,BOOL * needSched)294 STATIC INLINE UINT32 OsSchedPrioSet(LosTaskCB *taskCB, UINT16 priority, BOOL *needSched)
295 {
296     BOOL needRetry;
297     UINT16 tempStatus;
298 
299     do {
300         /*
301          * By default, the loop is executed only once. If priqueue dequeue fails, or the task is inoperable,
302          * we need to re-judge the task status and perform the corresponding operation.
303          */
304         needRetry = FALSE;
305         if (taskCB->taskExtStatus & OS_TASK_IS_INOPERABLE) {
306             needRetry = TRUE;
307             continue;
308         }
309 
310         tempStatus = taskCB->taskStatus;
311         if (tempStatus & OS_TASK_STATUS_UNUSED) {
312             return LOS_ERRNO_TSK_NOT_CREATED;
313         }
314 
315         if (OS_TASK_IS_ZOMBIE(tempStatus)) {
316             return LOS_ERRNO_TSK_IS_ZOMBIE;
317         }
318 
319         if (tempStatus & OS_TASK_STATUS_READY) {
320             taskCB->taskExtStatus = OS_TASK_STATUS_ENQUE | OS_TASK_STATUS_DEQUE;
321             OsSpinUnlockRaw(&g_taskSpin);
322 
323             OsSpinLockRaw(&g_priQueue[taskCB->currCpu].lock);
324 
325             if (!(taskCB->taskStatus & OS_TASK_STATUS_READY)) {
326                 OsSpinUnlockRaw(&g_priQueue[taskCB->currCpu].lock);
327                 taskCB->taskExtStatus &= ~(OS_TASK_STATUS_ENQUE | OS_TASK_STATUS_DEQUE);
328 
329                 OsSpinLockRaw(&g_taskSpin);
330                 needRetry = TRUE;
331                 continue;
332             }
333 
334             OsPriQueueDequeue(&taskCB->pendList);
335 
336             taskCB->priority = priority;
337             OsPriQueueSimpleEnqueue(&taskCB->pendList, PRI_QUEUE_TAIL);
338             *needSched = TRUE;
339             OsSpinUnlockRaw(&g_priQueue[taskCB->currCpu].lock);
340             taskCB->taskExtStatus &= ~(OS_TASK_STATUS_ENQUE | OS_TASK_STATUS_DEQUE);
341 
342             OsSpinLockRaw(&g_taskSpin);
343         } else {
344             taskCB->priority = priority;
345             if (tempStatus & OS_TASK_STATUS_RUNNING) {
346                 *needSched = TRUE;
347             }
348         }
349     } while (needRetry);
350 
351     return LOS_OK;
352 }
353 
354 /*
355  * Description : Removes a ready task from the priority queue.
356  * Input       : taskCB     --- Task to be removed
357  * Return      : TRUE on success or FALSE on failure
358  */
OsSchedReadyTaskRemove(LosTaskCB * taskCB)359 STATIC INLINE BOOL OsSchedReadyTaskRemove(LosTaskCB *taskCB)
360 {
361     taskCB->taskExtStatus = OS_TASK_STATUS_DEQUE;
362     BARRIER();
363     OsSpinUnlockRaw(&g_taskSpin);
364 
365     OsSpinLockRaw(&g_priQueue[taskCB->currCpu].lock);
366 
367     if (!(taskCB->taskStatus & OS_TASK_STATUS_READY)) {
368         OsSpinUnlockRaw(&g_priQueue[taskCB->currCpu].lock);
369         taskCB->taskExtStatus &= ~OS_TASK_STATUS_DEQUE;
370 
371         OsSpinLockRaw(&g_taskSpin);
372         return FALSE;
373     }
374 
375     OsPriQueueDequeue(&taskCB->pendList);
376 
377     /*
378      * There is no lock held here, but the OS_TASK_STATUS_DEQUE protection is used,
379      * so setting taskStatus needs to be done before clearing OS_TASK_STATUS_DEQUE.
380      */
381     taskCB->taskStatus &= ~OS_TASK_STATUS_READY;
382     if (OS_TASK_IS_JOINABLE(taskCB)) {
383         taskCB->taskStatus |= OS_TASK_STATUS_ZOMBIE;
384     } else {
385         taskCB->taskStatus |= OS_TASK_STATUS_UNUSED;
386     }
387 
388     OsSpinUnlockRaw(&g_priQueue[taskCB->currCpu].lock);
389     taskCB->taskExtStatus &= ~OS_TASK_STATUS_DEQUE;
390 
391     OsSpinLockRaw(&g_taskSpin);
392     return TRUE;
393 }
394 
395 #else /* LOSCFG_SCHED_SQ */
396 extern LosTaskCB *OsGetTopTask(VOID);
397 
OsSchedLock(VOID)398 STATIC INLINE VOID OsSchedLock(VOID)
399 {
400     LOS_SpinLock(&g_taskSpin);
401 }
402 
OsSchedUnlock(VOID)403 STATIC INLINE VOID OsSchedUnlock(VOID)
404 {
405     LOS_SpinUnlock(&g_taskSpin);
406 }
407 
OsSchedLockGlobal2Local(VOID)408 STATIC INLINE VOID OsSchedLockGlobal2Local(VOID) {}
OsSchedLockLocal2Global(VOID)409 STATIC INLINE VOID OsSchedLockLocal2Global(VOID) {}
410 
OsSchedUnlockLocalRestore(UINT32 intSave)411 STATIC INLINE VOID OsSchedUnlockLocalRestore(UINT32 intSave)
412 {
413     SCHEDULER_UNLOCK(intSave);
414 }
415 
OsSchedAffiChange(VOID * task)416 STATIC INLINE VOID OsSchedAffiChange(VOID *task)
417 {
418     (VOID)task;
419 }
420 
OsSchedResume(LosTaskCB * taskCB,UINT32 intSave)421 STATIC INLINE VOID OsSchedResume(LosTaskCB *taskCB, UINT32 intSave)
422 {
423     taskCB->taskStatus |= OS_TASK_STATUS_READY;
424     OsPriQueueEnqueue(&taskCB->pendList, taskCB->priority, PRI_QUEUE_TAIL);
425 
426     SCHEDULER_UNLOCK(intSave);
427 }
428 
OsSchedYield(LosTaskCB * runTask)429 STATIC INLINE UINT32 OsSchedYield(LosTaskCB *runTask)
430 {
431 #ifdef LOSCFG_BASE_CORE_TIMESLICE
432     /* reset timeslice of yielded task */
433     runTask->timeSlice = 0;
434 #endif
435 
436     if (OsPriQueueIsEmpty(runTask->priority)) {
437         return LOS_ERRNO_TSK_YIELD_NOT_ENOUGH_TASK;
438     }
439 
440     runTask->taskStatus |= OS_TASK_STATUS_READY;
441     OsPriQueueEnqueue(&runTask->pendList, runTask->priority, PRI_QUEUE_TAIL);
442 
443     OsSchedResched();
444 
445     return LOS_OK;
446 }
447 
OsSchedPrioSet(LosTaskCB * taskCB,UINT16 priority,BOOL * needSched)448 STATIC INLINE UINT32 OsSchedPrioSet(LosTaskCB *taskCB, UINT16 priority, BOOL *needSched)
449 {
450     UINT16 tempStatus = taskCB->taskStatus;
451 
452     if (tempStatus & OS_TASK_STATUS_UNUSED) {
453         return LOS_ERRNO_TSK_NOT_CREATED;
454     }
455 
456     if (OS_TASK_IS_ZOMBIE(tempStatus)) {
457         return LOS_ERRNO_TSK_IS_ZOMBIE;
458     }
459 
460     if (tempStatus & OS_TASK_STATUS_READY) {
461         OsPriQueueDequeue(&taskCB->pendList);
462         taskCB->priority = priority;
463         OsPriQueueEnqueue(&taskCB->pendList, taskCB->priority, PRI_QUEUE_TAIL);
464         *needSched = TRUE;
465     } else {
466         taskCB->priority = priority;
467         if (tempStatus & OS_TASK_STATUS_RUNNING) {
468             *needSched = TRUE;
469         }
470     }
471 
472     return LOS_OK;
473 }
474 
475 /*
476  * Description : Removes a ready task from the priority queue.
477  * Input       : taskCB     --- Task to be removed
478  * Return      : TRUE on success or FALSE on failure
479  */
OsSchedReadyTaskRemove(LosTaskCB * taskCB)480 STATIC INLINE BOOL OsSchedReadyTaskRemove(LosTaskCB *taskCB)
481 {
482     OsPriQueueDequeue(&taskCB->pendList);
483     taskCB->taskStatus &= ~OS_TASK_STATUS_READY;
484     return TRUE;
485 }
486 
487 #endif /* LOSCFG_SCHED_MQ */
488 
489 #ifdef __cplusplus
490 }
491 #endif /* __cplusplus */
492 
493 #endif /* _LOS_SCHED_PRI_H */
494