1 /*
2 * Copyright (c) 2013-2019, Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020, Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "linux/workqueue.h"
33 #include "unistd.h"
34 #include "los_event.h"
35 #include "los_init.h"
36 #include "los_swtmr_pri.h"
37
38 #define DELAY_TIME 10000
39
40 struct workqueue_struct *g_pstSystemWq = NULL;
41
42 /* spinlock for workqueue module only available on SMP mode */
43 LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_workqueueSpin);
44
45 STATIC cpu_workqueue_struct *InitCpuWorkqueue(struct workqueue_struct *wq, INT32 cpu);
46 STATIC UINT32 CreateWorkqueueThread(cpu_workqueue_struct *cwq, INT32 cpu);
47 STATIC VOID WorkerThread(cpu_workqueue_struct *cwq);
48 STATIC VOID RunWorkqueue(cpu_workqueue_struct *cwq);
49 STATIC VOID DelayedWorkTimerFunc(unsigned long data);
50
51 /*
52 * @ingroup workqueue
53 * Obtain the first work in a workqueue.
54 */
55 #define worklist_entry(ptr, type, member) ((type *)((char *)(ptr)-((UINTPTR)&(((type*)0)->member))))
56
57 /*
58 * @ingroup workqueue
59 * Traverse a workqueue.
60 */
61 #define LIST_FOR_WORK(pos, listObject, type, field) \
62 for ((pos) = LOS_DL_LIST_ENTRY((listObject)->next, type, field); \
63 &(pos)->field != (listObject); \
64 (pos) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field))
65
66 #define LIST_FOR_WORK_DEL(pos, nextNode, listObject, type, field) \
67 for ((pos) = LOS_DL_LIST_ENTRY((listObject)->next, type, field), \
68 (nextNode) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field); \
69 &(pos)->field != (listObject); \
70 (pos) = (nextNode), (nextNode) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field))
71
72
linux_init_delayed_work(struct delayed_work * dwork,work_func_t func)73 void linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
74 {
75 if ((dwork == NULL) || (func == NULL)) {
76 return;
77 }
78 INIT_WORK(&(dwork->work), func);
79 init_timer(&dwork->timer);
80 dwork->timer.function = DelayedWorkTimerFunc;
81 dwork->timer.data = (unsigned long)(UINTPTR)(dwork);
82 dwork->work.work_status = 0;
83 }
84
WorkqueueIsEmpty(cpu_workqueue_struct * cwq)85 STATIC UINT32 WorkqueueIsEmpty(cpu_workqueue_struct *cwq)
86 {
87 UINT32 ret;
88 ret = list_empty(&cwq->worklist);
89 return ret;
90 }
91
__create_workqueue_key(char * name,int singleThread,int freezeable,int rt,struct lock_class_key * key,const char * lockName)92 struct workqueue_struct *__create_workqueue_key(char *name,
93 int singleThread,
94 int freezeable,
95 int rt,
96 struct lock_class_key *key,
97 const char *lockName)
98 {
99 struct workqueue_struct *wq = NULL;
100 cpu_workqueue_struct *cwq = NULL;
101 UINT32 ret;
102 (VOID)key;
103 (VOID)lockName;
104
105 if (name == NULL) {
106 return NULL;
107 }
108
109 wq = (struct workqueue_struct *)LOS_MemAlloc(m_aucSysMem0, sizeof(struct workqueue_struct));
110 if (wq == NULL) {
111 return NULL;
112 }
113
114 wq->cpu_wq = (cpu_workqueue_struct *)LOS_MemAlloc(m_aucSysMem0, sizeof(cpu_workqueue_struct));
115 if (wq->cpu_wq == NULL) {
116 (VOID)LOS_MemFree(m_aucSysMem0, wq);
117 return NULL;
118 }
119
120 wq->name = name;
121 wq->singlethread = singleThread;
122 wq->freezeable = freezeable;
123 wq->rt = rt;
124 wq->delayed_work_count = 0;
125 INIT_LIST_HEAD(&wq->list);
126 (VOID)LOS_EventInit(&wq->wq_event);
127
128 if (singleThread) {
129 cwq = InitCpuWorkqueue(wq, singleThread);
130 ret = CreateWorkqueueThread(cwq, singleThread);
131 } else {
132 LOS_MemFree(m_aucSysMem0, wq->cpu_wq);
133 LOS_MemFree(m_aucSysMem0, wq);
134 return NULL;
135 }
136
137 if (ret) {
138 destroy_workqueue(wq);
139 wq = NULL;
140 }
141
142 return wq;
143 }
144
linux_create_singlethread_workqueue(char * name)145 struct workqueue_struct *linux_create_singlethread_workqueue(char *name)
146 {
147 return __create_workqueue_key(name, 1, 0, 0, NULL, NULL);
148 }
149
InitCpuWorkqueue(struct workqueue_struct * wq,INT32 cpu)150 STATIC cpu_workqueue_struct *InitCpuWorkqueue(struct workqueue_struct *wq, INT32 cpu)
151 {
152 cpu_workqueue_struct *cwq = wq->cpu_wq;
153 (VOID)cpu;
154
155 cwq->wq = wq;
156 INIT_LIST_HEAD(&cwq->worklist);
157
158 return cwq;
159 }
160
CreateWorkqueueThread(cpu_workqueue_struct * cwq,INT32 cpu)161 STATIC UINT32 CreateWorkqueueThread(cpu_workqueue_struct *cwq, INT32 cpu)
162 {
163 struct workqueue_struct *wq = cwq->wq;
164 TSK_INIT_PARAM_S taskInitParam = {0};
165 UINT32 ret;
166 (VOID)cpu;
167
168 taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)WorkerThread;
169 taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
170 taskInitParam.pcName = wq->name;
171 taskInitParam.usTaskPrio = 1;
172 taskInitParam.auwArgs[0] = (UINTPTR)cwq;
173 taskInitParam.uwResved = LOS_TASK_STATUS_DETACHED;
174
175 ret = LOS_TaskCreate(&cwq->wq->wq_id, &taskInitParam);
176 if (ret != LOS_OK) {
177 return LOS_NOK;
178 }
179
180 cwq->thread = (task_struct*)OS_TCB_FROM_TID(cwq->wq->wq_id);
181 (VOID)LOS_TaskYield();
182
183 return LOS_OK;
184 }
185
WorkerThread(cpu_workqueue_struct * cwqParam)186 STATIC VOID WorkerThread(cpu_workqueue_struct *cwqParam)
187 {
188 cpu_workqueue_struct *cwq = cwqParam;
189
190 for (;;) {
191 if (WorkqueueIsEmpty(cwq)) {
192 (VOID)LOS_EventRead(&(cwq->wq->wq_event), 0x01, LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
193 }
194 RunWorkqueue(cwq);
195 }
196 }
197
RunWorkqueue(cpu_workqueue_struct * cwq)198 STATIC VOID RunWorkqueue(cpu_workqueue_struct *cwq)
199 {
200 struct work_struct *work = NULL;
201 UINT32 intSave;
202 work_func_t func = NULL;
203
204 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
205
206 if (!WorkqueueIsEmpty(cwq)) {
207 work = worklist_entry(cwq->worklist.next, struct work_struct, entry);
208 work->work_status |= WORK_STRUCT_RUNNING;
209 list_del_init(cwq->worklist.next);
210 func = work->func;
211
212 cwq->current_work = work;
213 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
214 func(work);
215 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
216 cwq->current_work = NULL;
217
218 if (work->work_status & WORK_STRUCT_RUNNING) {
219 work->work_status &= ~(WORK_STRUCT_RUNNING | WORK_STRUCT_PENDING);
220 }
221 }
222
223 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
224 }
225
ListAdd(struct list_head * newNode,struct list_head * prev,struct list_head * next)226 STATIC VOID ListAdd(struct list_head *newNode, struct list_head *prev, struct list_head *next)
227 {
228 next->prev = newNode;
229 newNode->next = next;
230 newNode->prev = prev;
231 prev->next = newNode;
232 }
233
234 #ifdef WORKQUEUE_SUPPORT_PRIORITY
WorkListAdd(struct list_head * newNode,struct list_head * head,UINT32 workPri)235 STATIC VOID WorkListAdd(struct list_head *newNode, struct list_head *head, UINT32 workPri)
236 {
237 struct work_struct *work = NULL;
238 struct list_head *list = head;
239 do {
240 list = list->next;
241 if (list == head) {
242 break;
243 }
244 work = worklist_entry(list, struct work_struct, entry);
245 } while (work->work_pri <= workPri);
246
247 ListAdd(newNode, list->prev, list);
248 }
249 #else
WorkListAddTail(struct list_head * newNode,struct list_head * head)250 STATIC VOID WorkListAddTail(struct list_head *newNode, struct list_head *head)
251 {
252 ListAdd(newNode, head->prev, head);
253 }
254 #endif
255
InsertWork(cpu_workqueue_struct * cwq,struct work_struct * work,struct list_head * head,UINT32 * intSave)256 STATIC VOID InsertWork(cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, UINT32 *intSave)
257 {
258 #ifdef WORKQUEUE_SUPPORT_PRIORITY
259 WorkListAdd(&work->entry, head, work->work_pri);
260 #else
261 WorkListAddTail(&work->entry, head);
262 #endif
263 LOS_SpinUnlockRestore(&g_workqueueSpin, *intSave);
264 (VOID)LOS_EventWrite(&(cwq->wq->wq_event), 0x01);
265 LOS_SpinLockSave(&g_workqueueSpin, intSave);
266 }
267
QueueWork(cpu_workqueue_struct * cwq,struct work_struct * work,UINT32 * intSave)268 STATIC VOID QueueWork(cpu_workqueue_struct *cwq, struct work_struct *work, UINT32 *intSave)
269 {
270 InsertWork(cwq, work, &cwq->worklist, intSave);
271 }
272
QueueWorkOn(struct workqueue_struct * wq,struct work_struct * work,UINT32 * intSave)273 STATIC bool QueueWorkOn(struct workqueue_struct *wq, struct work_struct *work, UINT32 *intSave)
274 {
275 bool ret = FALSE;
276 struct work_struct *tmpWork = NULL;
277
278 if (WorkqueueIsEmpty(wq->cpu_wq)) {
279 ret = TRUE;
280 } else {
281 LIST_FOR_WORK(tmpWork, &wq->cpu_wq->worklist, struct work_struct, entry) {
282 if (tmpWork == work) {
283 return FALSE;
284 }
285 }
286 ret = TRUE;
287 }
288 QueueWork(wq->cpu_wq, work, intSave);
289
290 return ret;
291 }
292
linux_queue_work(struct workqueue_struct * wq,struct work_struct * work)293 bool linux_queue_work(struct workqueue_struct *wq, struct work_struct *work)
294 {
295 bool ret = FALSE;
296 UINT32 intSave;
297
298 if ((wq == NULL) || (wq->name == NULL) || (work == NULL)) {
299 return FALSE;
300 }
301 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
302
303 work->work_status = WORK_STRUCT_PENDING;
304 ret = QueueWorkOn(wq, work, &intSave);
305
306 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
307 return ret;
308 }
309
linux_cancel_work_sync(struct work_struct * work)310 bool linux_cancel_work_sync(struct work_struct *work)
311 {
312 bool ret = FALSE;
313
314 if (work == NULL) {
315 return FALSE;
316 }
317
318 if (!work->work_status) {
319 ret = FALSE;
320 } else if (work->work_status & WORK_STRUCT_RUNNING) {
321 ret = FALSE;
322 } else {
323 ret = TRUE;
324 }
325 while (work->work_status) {
326 (VOID)usleep(DELAY_TIME);
327 }
328 return ret;
329 }
330
linux_flush_work(struct work_struct * work)331 bool linux_flush_work(struct work_struct *work)
332 {
333 if (work == NULL) {
334 return FALSE;
335 }
336
337 if (work->work_status & WORK_STRUCT_PENDING) {
338 while (work->work_status) {
339 (VOID)usleep(DELAY_TIME);
340 }
341 return TRUE;
342 }
343 return FALSE;
344 }
345
DelayedWorkTimerFunc(unsigned long data)346 STATIC VOID DelayedWorkTimerFunc(unsigned long data)
347 {
348 struct delayed_work *dwork = (struct delayed_work *)data;
349 UINT32 intSave;
350
351 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
352 /* It should have been called from irqsafe timer with irq already off. */
353 dwork->wq->delayed_work_count--;
354 (VOID)del_timer(&dwork->timer);
355 (VOID)QueueWorkOn(dwork->wq, &dwork->work, &intSave);
356 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
357 }
358
OsDelayWorkQueueCond(UINTPTR sortList,UINTPTR dwork)359 STATIC BOOL OsDelayWorkQueueCond(UINTPTR sortList, UINTPTR dwork)
360 {
361 SWTMR_CTRL_S *swtmr = LOS_DL_LIST_ENTRY(sortList, SWTMR_CTRL_S, stSortList);
362 return (((struct delayed_work *)swtmr->uwArg) == (struct delayed_work *)dwork);
363 }
364
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned int delayTime)365 bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned int delayTime)
366 {
367 UINT32 intSave;
368 struct work_struct *work = NULL;
369
370 if ((wq == NULL) || (wq->name == NULL) || (wq->cpu_wq == NULL) || (dwork == NULL)) {
371 return FALSE;
372 }
373
374 dwork->wq = wq;
375 if (delayTime == 0) {
376 return queue_work(dwork->wq, &dwork->work);
377 }
378
379 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
380 if (OsSwtmrWorkQueueFind(OsDelayWorkQueueCond, (UINTPTR)dwork)) {
381 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
382 return FALSE;
383 }
384
385 if (!WorkqueueIsEmpty(wq->cpu_wq)) {
386 LIST_FOR_WORK(work, &wq->cpu_wq->worklist, struct work_struct, entry) {
387 if (work == &dwork->work) {
388 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
389 return FALSE;
390 }
391 }
392 }
393 dwork->timer.expires = delayTime;
394 add_timer(&dwork->timer);
395 wq->delayed_work_count++;
396 dwork->work.work_status = WORK_STRUCT_PENDING;
397 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
398 return TRUE;
399 }
400
401
OsDelayWorkCancelCond(UINTPTR sortList,UINTPTR dwork)402 STATIC BOOL OsDelayWorkCancelCond(UINTPTR sortList, UINTPTR dwork)
403 {
404 SWTMR_CTRL_S *swtmr = LOS_DL_LIST_ENTRY(sortList, SWTMR_CTRL_S, stSortList);
405 if ((swtmr->usTimerID == ((struct delayed_work *)dwork)->timer.timerid) &&
406 (swtmr->ucState == OS_SWTMR_STATUS_TICKING)) {
407 return TRUE;
408 }
409 return FALSE;
410 }
411
linux_cancel_delayed_work(struct delayed_work * dwork)412 bool linux_cancel_delayed_work(struct delayed_work *dwork)
413 {
414 struct work_struct *work = NULL;
415 struct work_struct *workNext = NULL;
416 UINT32 intSave;
417 bool ret = FALSE;
418
419 if ((dwork == NULL) || (dwork->wq == NULL)) {
420 return FALSE;
421 }
422
423 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
424
425 if (dwork->work.work_status & WORK_STRUCT_PENDING) {
426 if (OsSwtmrWorkQueueFind(OsDelayWorkCancelCond, (UINTPTR)dwork)) {
427 (VOID)del_timer(&dwork->timer);
428 dwork->work.work_status = 0;
429 dwork->wq->delayed_work_count--;
430 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
431 return TRUE;
432 }
433
434 if (dwork->work.work_status & WORK_STRUCT_RUNNING) {
435 ret = FALSE;
436 } else if (dwork->work.work_status & WORK_STRUCT_PENDING) {
437 LIST_FOR_WORK_DEL(work, workNext, &dwork->wq->cpu_wq->worklist, struct work_struct, entry) {
438 if (work == &dwork->work) {
439 list_del_init(&work->entry);
440 dwork->work.work_status = 0;
441 ret = TRUE;
442 break;
443 }
444 }
445 }
446 }
447
448 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
449 return ret;
450 }
451
linux_cancel_delayed_work_sync(struct delayed_work * dwork)452 bool linux_cancel_delayed_work_sync(struct delayed_work *dwork)
453 {
454 return cancel_delayed_work(dwork);
455 }
456
linux_flush_delayed_work(struct delayed_work * dwork)457 bool linux_flush_delayed_work(struct delayed_work *dwork)
458 {
459 UINT32 intSave;
460
461 if ((dwork == NULL) || (dwork->wq == NULL)) {
462 return FALSE;
463 }
464
465 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
466 if (!(dwork->work.work_status & WORK_STRUCT_PENDING)) {
467 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
468 return FALSE;
469 }
470
471 if (OsSwtmrWorkQueueFind(OsDelayWorkCancelCond, (UINTPTR)dwork)) {
472 (VOID)del_timer(&dwork->timer);
473 dwork->wq->delayed_work_count--;
474 (VOID)queue_work(dwork->wq, &dwork->work);
475 } else {
476 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
477 }
478 (VOID)flush_work(&dwork->work);
479 return TRUE;
480 }
481
linux_work_busy(struct work_struct * work)482 unsigned int linux_work_busy(struct work_struct *work)
483 {
484 UINT32 ret = 0;
485
486 if (work == NULL) {
487 return FALSE;
488 }
489
490 if (work->work_status & WORK_STRUCT_PENDING) {
491 ret |= WORK_BUSY_PENDING;
492 }
493 if (work->work_status & WORK_STRUCT_RUNNING) {
494 ret |= WORK_BUSY_RUNNING;
495 }
496 return ret;
497 }
498
linux_schedule_work(struct work_struct * work)499 bool linux_schedule_work(struct work_struct *work)
500 {
501 bool ret = FALSE;
502 UINT32 intSave;
503
504 if ((g_pstSystemWq == NULL) || (g_pstSystemWq->name == NULL) || (work == NULL)) {
505 return FALSE;
506 }
507
508 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
509
510 work->work_status = WORK_STRUCT_PENDING;
511 ret = QueueWorkOn(g_pstSystemWq, work, &intSave);
512
513 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
514 return ret;
515 }
516
linux_schedule_delayed_work(struct delayed_work * dwork,unsigned int delayTime)517 bool linux_schedule_delayed_work(struct delayed_work *dwork, unsigned int delayTime)
518 {
519 bool ret = FALSE;
520
521 if ((g_pstSystemWq == NULL) || (dwork == NULL)) {
522 return FALSE;
523 }
524
525 ret = queue_delayed_work(g_pstSystemWq, dwork, delayTime);
526
527 return ret;
528 }
529
drain_workqueue(struct workqueue_struct * wq)530 static void drain_workqueue(struct workqueue_struct *wq)
531 {
532 UINT32 intSave;
533 while (1) {
534 (VOID)usleep(DELAY_TIME);
535 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
536 if (WorkqueueIsEmpty(wq->cpu_wq) && (wq->delayed_work_count == 0)) {
537 break;
538 }
539
540 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
541 }
542 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
543 }
544
linux_destroy_workqueue(struct workqueue_struct * wq)545 void linux_destroy_workqueue(struct workqueue_struct *wq)
546 {
547 UINT32 intSave;
548 if (wq == NULL) {
549 return;
550 }
551
552 /* Drain it before proceeding with destruction */
553 drain_workqueue(wq);
554
555 (VOID)LOS_TaskDelete(wq->wq_id);
556
557 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
558 wq->name = NULL;
559 list_del_init(&wq->list);
560 (VOID)LOS_EventDestroy(&(wq->wq_event));
561
562 (VOID)LOS_MemFree(m_aucSysMem0, wq->cpu_wq);
563 (VOID)LOS_MemFree(m_aucSysMem0, wq);
564 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
565 }
566
OsSysWorkQueueInit(VOID)567 UINT32 OsSysWorkQueueInit(VOID)
568 {
569 g_pstSystemWq = create_workqueue("system_wq");
570
571 return LOS_OK;
572 }
573
574 LOS_MODULE_INIT(OsSysWorkQueueInit, LOS_INIT_LEVEL_KMOD_EXTENDED);
575