1 /*
2 * Copyright (c) 2013-2019, Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020, Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "linux/workqueue.h"
33 #include "unistd.h"
34 #include "los_event.h"
35 #include "los_init.h"
36 #include "los_swtmr_pri.h"
37
38 #define DELAY_TIME 10000
39
40 struct workqueue_struct *g_pstSystemWq = NULL;
41
42 /* spinlock for workqueue module only available on SMP mode */
43 LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_workqueueSpin);
44
45 STATIC cpu_workqueue_struct *InitCpuWorkqueue(struct workqueue_struct *wq, INT32 cpu);
46 STATIC UINT32 CreateWorkqueueThread(cpu_workqueue_struct *cwq, INT32 cpu);
47 STATIC VOID WorkerThread(cpu_workqueue_struct *cwq);
48 STATIC VOID RunWorkqueue(cpu_workqueue_struct *cwq);
49 STATIC VOID DelayedWorkTimerFunc(unsigned long data);
50 typedef BOOL (*OsSortLinkCond)(SWTMR_CTRL_S *swtmr, struct delayed_work *dwork);
51
52 /*
53 * @ingroup workqueue
54 * Obtain the first work in a workqueue.
55 */
56 #define worklist_entry(ptr, type, member) ((type *)((char *)(ptr)-((UINTPTR)&(((type*)0)->member))))
57
58 /*
59 * @ingroup workqueue
60 * Traverse a workqueue.
61 */
62 #define LIST_FOR_WORK(pos, listObject, type, field) \
63 for ((pos) = LOS_DL_LIST_ENTRY((listObject)->next, type, field); \
64 &(pos)->field != (listObject); \
65 (pos) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field))
66
67 #define LIST_FOR_WORK_DEL(pos, nextNode, listObject, type, field) \
68 for ((pos) = LOS_DL_LIST_ENTRY((listObject)->next, type, field), \
69 (nextNode) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field); \
70 &(pos)->field != (listObject); \
71 (pos) = (nextNode), (nextNode) = LOS_DL_LIST_ENTRY((pos)->field.next, type, field))
72
73
linux_init_delayed_work(struct delayed_work * dwork,work_func_t func)74 void linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
75 {
76 if ((dwork == NULL) || (func == NULL)) {
77 return;
78 }
79 INIT_WORK(&(dwork->work), func);
80 init_timer(&dwork->timer);
81 dwork->timer.function = DelayedWorkTimerFunc;
82 dwork->timer.data = (unsigned long)(UINTPTR)(dwork);
83 dwork->work.work_status = 0;
84 }
85
WorkqueueIsEmpty(cpu_workqueue_struct * cwq)86 STATIC UINT32 WorkqueueIsEmpty(cpu_workqueue_struct *cwq)
87 {
88 UINT32 ret;
89 ret = list_empty(&cwq->worklist);
90 return ret;
91 }
92
__create_workqueue_key(char * name,int singleThread,int freezeable,int rt,struct lock_class_key * key,const char * lockName)93 struct workqueue_struct *__create_workqueue_key(char *name,
94 int singleThread,
95 int freezeable,
96 int rt,
97 struct lock_class_key *key,
98 const char *lockName)
99 {
100 struct workqueue_struct *wq = NULL;
101 cpu_workqueue_struct *cwq = NULL;
102 UINT32 ret;
103 (VOID)key;
104 (VOID)lockName;
105
106 if (name == NULL) {
107 return NULL;
108 }
109
110 wq = (struct workqueue_struct *)LOS_MemAlloc(m_aucSysMem0, sizeof(struct workqueue_struct));
111 if (wq == NULL) {
112 return NULL;
113 }
114
115 wq->cpu_wq = (cpu_workqueue_struct *)LOS_MemAlloc(m_aucSysMem0, sizeof(cpu_workqueue_struct));
116 if (wq->cpu_wq == NULL) {
117 (VOID)LOS_MemFree(m_aucSysMem0, wq);
118 return NULL;
119 }
120
121 wq->name = name;
122 wq->singlethread = singleThread;
123 wq->freezeable = freezeable;
124 wq->rt = rt;
125 wq->delayed_work_count = 0;
126 INIT_LIST_HEAD(&wq->list);
127 (VOID)LOS_EventInit(&wq->wq_event);
128
129 if (singleThread) {
130 cwq = InitCpuWorkqueue(wq, singleThread);
131 ret = CreateWorkqueueThread(cwq, singleThread);
132 } else {
133 LOS_MemFree(m_aucSysMem0, wq->cpu_wq);
134 LOS_MemFree(m_aucSysMem0, wq);
135 return NULL;
136 }
137
138 if (ret) {
139 destroy_workqueue(wq);
140 wq = NULL;
141 }
142
143 return wq;
144 }
145
linux_create_singlethread_workqueue(char * name)146 struct workqueue_struct *linux_create_singlethread_workqueue(char *name)
147 {
148 return __create_workqueue_key(name, 1, 0, 0, NULL, NULL);
149 }
150
InitCpuWorkqueue(struct workqueue_struct * wq,INT32 cpu)151 STATIC cpu_workqueue_struct *InitCpuWorkqueue(struct workqueue_struct *wq, INT32 cpu)
152 {
153 cpu_workqueue_struct *cwq = wq->cpu_wq;
154 (VOID)cpu;
155
156 cwq->wq = wq;
157 INIT_LIST_HEAD(&cwq->worklist);
158
159 return cwq;
160 }
161
CreateWorkqueueThread(cpu_workqueue_struct * cwq,INT32 cpu)162 STATIC UINT32 CreateWorkqueueThread(cpu_workqueue_struct *cwq, INT32 cpu)
163 {
164 struct workqueue_struct *wq = cwq->wq;
165 TSK_INIT_PARAM_S taskInitParam = {0};
166 UINT32 ret;
167 (VOID)cpu;
168
169 taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)WorkerThread;
170 taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
171 taskInitParam.pcName = wq->name;
172 taskInitParam.usTaskPrio = 1;
173 taskInitParam.auwArgs[0] = (UINTPTR)cwq;
174 taskInitParam.uwResved = LOS_TASK_STATUS_DETACHED;
175
176 ret = LOS_TaskCreate(&cwq->wq->wq_id, &taskInitParam);
177 if (ret != LOS_OK) {
178 return LOS_NOK;
179 }
180
181 cwq->thread = (task_struct*)OS_TCB_FROM_TID(cwq->wq->wq_id);
182 (VOID)LOS_TaskYield();
183
184 return LOS_OK;
185 }
186
WorkerThread(cpu_workqueue_struct * cwqParam)187 STATIC VOID WorkerThread(cpu_workqueue_struct *cwqParam)
188 {
189 cpu_workqueue_struct *cwq = cwqParam;
190
191 for (;;) {
192 if (WorkqueueIsEmpty(cwq)) {
193 (VOID)LOS_EventRead(&(cwq->wq->wq_event), 0x01, LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
194 }
195 RunWorkqueue(cwq);
196 }
197 }
198
RunWorkqueue(cpu_workqueue_struct * cwq)199 STATIC VOID RunWorkqueue(cpu_workqueue_struct *cwq)
200 {
201 struct work_struct *work = NULL;
202 UINT32 intSave;
203 work_func_t func = NULL;
204
205 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
206
207 if (!WorkqueueIsEmpty(cwq)) {
208 work = worklist_entry(cwq->worklist.next, struct work_struct, entry);
209 work->work_status |= WORK_STRUCT_RUNNING;
210 list_del_init(cwq->worklist.next);
211 func = work->func;
212
213 cwq->current_work = work;
214 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
215 func(work);
216 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
217 cwq->current_work = NULL;
218
219 if (work->work_status & WORK_STRUCT_RUNNING) {
220 work->work_status &= ~(WORK_STRUCT_RUNNING | WORK_STRUCT_PENDING);
221 }
222 }
223
224 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
225 }
226
ListAdd(struct list_head * newNode,struct list_head * prev,struct list_head * next)227 STATIC VOID ListAdd(struct list_head *newNode, struct list_head *prev, struct list_head *next)
228 {
229 next->prev = newNode;
230 newNode->next = next;
231 newNode->prev = prev;
232 prev->next = newNode;
233 }
234
235 #ifdef WORKQUEUE_SUPPORT_PRIORITY
WorkListAdd(struct list_head * newNode,struct list_head * head,UINT32 workPri)236 STATIC VOID WorkListAdd(struct list_head *newNode, struct list_head *head, UINT32 workPri)
237 {
238 struct work_struct *work = NULL;
239 struct list_head *list = head;
240 do {
241 list = list->next;
242 if (list == head) {
243 break;
244 }
245 work = worklist_entry(list, struct work_struct, entry);
246 } while (work->work_pri <= workPri);
247
248 ListAdd(newNode, list->prev, list);
249 }
250 #else
WorkListAddTail(struct list_head * newNode,struct list_head * head)251 STATIC VOID WorkListAddTail(struct list_head *newNode, struct list_head *head)
252 {
253 ListAdd(newNode, head->prev, head);
254 }
255 #endif
256
InsertWork(cpu_workqueue_struct * cwq,struct work_struct * work,struct list_head * head,UINT32 * intSave)257 STATIC VOID InsertWork(cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, UINT32 *intSave)
258 {
259 #ifdef WORKQUEUE_SUPPORT_PRIORITY
260 WorkListAdd(&work->entry, head, work->work_pri);
261 #else
262 WorkListAddTail(&work->entry, head);
263 #endif
264 LOS_SpinUnlockRestore(&g_workqueueSpin, *intSave);
265 (VOID)LOS_EventWrite(&(cwq->wq->wq_event), 0x01);
266 LOS_SpinLockSave(&g_workqueueSpin, intSave);
267 }
268
QueueWork(cpu_workqueue_struct * cwq,struct work_struct * work,UINT32 * intSave)269 STATIC VOID QueueWork(cpu_workqueue_struct *cwq, struct work_struct *work, UINT32 *intSave)
270 {
271 InsertWork(cwq, work, &cwq->worklist, intSave);
272 }
273
QueueWorkOn(struct workqueue_struct * wq,struct work_struct * work,UINT32 * intSave)274 STATIC bool QueueWorkOn(struct workqueue_struct *wq, struct work_struct *work, UINT32 *intSave)
275 {
276 bool ret = FALSE;
277 struct work_struct *tmpWork = NULL;
278
279 if (WorkqueueIsEmpty(wq->cpu_wq)) {
280 ret = TRUE;
281 } else {
282 LIST_FOR_WORK(tmpWork, &wq->cpu_wq->worklist, struct work_struct, entry) {
283 if (tmpWork == work) {
284 return FALSE;
285 }
286 }
287 ret = TRUE;
288 }
289 QueueWork(wq->cpu_wq, work, intSave);
290
291 return ret;
292 }
293
linux_queue_work(struct workqueue_struct * wq,struct work_struct * work)294 bool linux_queue_work(struct workqueue_struct *wq, struct work_struct *work)
295 {
296 bool ret = FALSE;
297 UINT32 intSave;
298
299 if ((wq == NULL) || (wq->name == NULL) || (work == NULL)) {
300 return FALSE;
301 }
302 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
303
304 work->work_status = WORK_STRUCT_PENDING;
305 ret = QueueWorkOn(wq, work, &intSave);
306
307 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
308 return ret;
309 }
310
linux_cancel_work_sync(struct work_struct * work)311 bool linux_cancel_work_sync(struct work_struct *work)
312 {
313 bool ret = FALSE;
314
315 if (work == NULL) {
316 return FALSE;
317 }
318
319 if (!work->work_status) {
320 ret = FALSE;
321 } else if (work->work_status & WORK_STRUCT_RUNNING) {
322 ret = FALSE;
323 } else {
324 ret = TRUE;
325 }
326 while (work->work_status) {
327 (VOID)usleep(DELAY_TIME);
328 }
329 return ret;
330 }
331
linux_flush_work(struct work_struct * work)332 bool linux_flush_work(struct work_struct *work)
333 {
334 if (work == NULL) {
335 return FALSE;
336 }
337
338 if (work->work_status & WORK_STRUCT_PENDING) {
339 while (work->work_status) {
340 (VOID)usleep(DELAY_TIME);
341 }
342 return TRUE;
343 }
344 return FALSE;
345 }
346
DelayedWorkTimerFunc(unsigned long data)347 STATIC VOID DelayedWorkTimerFunc(unsigned long data)
348 {
349 struct delayed_work *dwork = (struct delayed_work *)data;
350 UINT32 intSave;
351
352 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
353 /* It should have been called from irqsafe timer with irq already off. */
354 dwork->wq->delayed_work_count--;
355 (VOID)del_timer(&dwork->timer);
356 (VOID)QueueWorkOn(dwork->wq, &dwork->work, &intSave);
357 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
358 }
359
OsPerCpuSortLinkSearch(SortLinkAttribute * swtmrSortLink,OsSortLinkCond checkFunc,VOID * arg)360 STATIC BOOL OsPerCpuSortLinkSearch(SortLinkAttribute *swtmrSortLink, OsSortLinkCond checkFunc, VOID *arg)
361 {
362 LOS_DL_LIST *listObject = &swtmrSortLink->sortLink;
363 LOS_DL_LIST *list = listObject->pstNext;
364
365 while (list != listObject) {
366 SortLinkList *listSorted = LOS_DL_LIST_ENTRY(list, SortLinkList, sortLinkNode);
367 SWTMR_CTRL_S *curSwtmr = LOS_DL_LIST_ENTRY(listSorted, SWTMR_CTRL_S, stSortList);
368 if (checkFunc(curSwtmr, arg)) {
369 return TRUE;
370 }
371 list = list->pstNext;
372 }
373
374 return FALSE;
375 }
376
OsSortLinkSearch(OsSortLinkCond checkFunc,VOID * arg)377 BOOL OsSortLinkSearch(OsSortLinkCond checkFunc, VOID *arg)
378 {
379 UINT32 intSave;
380 UINT32 i;
381
382 for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
383 Percpu *cpu = OsPercpuGetByID(i);
384 SortLinkAttribute *swtmrSortLink = &OsPercpuGetByID(i)->swtmrSortLink;
385 LOS_SpinLockSave(&cpu->swtmrSortLinkSpin, &intSave);
386 if (OsPerCpuSortLinkSearch(swtmrSortLink, checkFunc, arg)) {
387 LOS_SpinUnlockRestore(&cpu->swtmrSortLinkSpin, intSave);
388 return TRUE;
389 }
390 LOS_SpinUnlockRestore(&cpu->swtmrSortLinkSpin, intSave);
391 }
392 return FALSE;
393 }
394
OsDelayWorkQueueCond(SWTMR_CTRL_S * swtmr,struct delayed_work * dwork)395 STATIC BOOL OsDelayWorkQueueCond(SWTMR_CTRL_S *swtmr, struct delayed_work *dwork)
396 {
397 return (((struct delayed_work *)swtmr->uwArg) == dwork);
398 }
399
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned int delayTime)400 bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned int delayTime)
401 {
402 UINT32 intSave, intSave1;
403 struct work_struct *work = NULL;
404
405 if ((wq == NULL) || (wq->name == NULL) || (wq->cpu_wq == NULL) || (dwork == NULL)) {
406 return FALSE;
407 }
408
409 dwork->wq = wq;
410 if (delayTime == 0) {
411 return queue_work(dwork->wq, &dwork->work);
412 }
413
414 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
415 LOS_SpinLockSave(&g_swtmrSpin, &intSave1);
416 if (OsSortLinkSearch(OsDelayWorkQueueCond, dwork)) {
417 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
418 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
419 return FALSE;
420 }
421 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
422 if (!WorkqueueIsEmpty(wq->cpu_wq)) {
423 LIST_FOR_WORK(work, &wq->cpu_wq->worklist, struct work_struct, entry) {
424 if (work == &dwork->work) {
425 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
426 return FALSE;
427 }
428 }
429 }
430 dwork->timer.expires = delayTime;
431 add_timer(&dwork->timer);
432 wq->delayed_work_count++;
433 dwork->work.work_status = WORK_STRUCT_PENDING;
434 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
435 return TRUE;
436 }
437
438
OsDelayWorkCancelCond(SWTMR_CTRL_S * swtmr,struct delayed_work * dwork)439 STATIC BOOL OsDelayWorkCancelCond(SWTMR_CTRL_S *swtmr, struct delayed_work *dwork)
440 {
441 if ((swtmr->usTimerID == dwork->timer.timerid) &&
442 (swtmr->ucState == OS_SWTMR_STATUS_TICKING)) {
443 return TRUE;
444 }
445 return FALSE;
446 }
447
linux_cancel_delayed_work(struct delayed_work * dwork)448 bool linux_cancel_delayed_work(struct delayed_work *dwork)
449 {
450 struct work_struct *work = NULL;
451 struct work_struct *workNext = NULL;
452 UINT32 intSave, intSave1;
453 bool ret = FALSE;
454
455 if ((dwork == NULL) || (dwork->wq == NULL)) {
456 return FALSE;
457 }
458
459 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
460
461 if (dwork->work.work_status & WORK_STRUCT_PENDING) {
462 LOS_SpinLockSave(&g_swtmrSpin, &intSave1);
463 if (OsSortLinkSearch(OsDelayWorkCancelCond, dwork)) {
464 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
465 (VOID)del_timer(&dwork->timer);
466 dwork->work.work_status = 0;
467 dwork->wq->delayed_work_count--;
468 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
469 return TRUE;
470 }
471
472 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
473 if (dwork->work.work_status & WORK_STRUCT_RUNNING) {
474 ret = FALSE;
475 } else if (dwork->work.work_status & WORK_STRUCT_PENDING) {
476 LIST_FOR_WORK_DEL(work, workNext, &dwork->wq->cpu_wq->worklist, struct work_struct, entry) {
477 if (work == &dwork->work) {
478 list_del_init(&work->entry);
479 dwork->work.work_status = 0;
480 ret = TRUE;
481 break;
482 }
483 }
484 }
485 }
486
487 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
488 return ret;
489 }
490
linux_cancel_delayed_work_sync(struct delayed_work * dwork)491 bool linux_cancel_delayed_work_sync(struct delayed_work *dwork)
492 {
493 return cancel_delayed_work(dwork);
494 }
495
linux_flush_delayed_work(struct delayed_work * dwork)496 bool linux_flush_delayed_work(struct delayed_work *dwork)
497 {
498 UINT32 intSave, intSave1;
499
500 if ((dwork == NULL) || (dwork->wq == NULL)) {
501 return FALSE;
502 }
503
504 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
505 if (!(dwork->work.work_status & WORK_STRUCT_PENDING)) {
506 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
507 return FALSE;
508 }
509
510 LOS_SpinLockSave(&g_swtmrSpin, &intSave1);
511 if (OsSortLinkSearch(OsDelayWorkCancelCond, dwork)) {
512 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
513 (VOID)del_timer(&dwork->timer);
514 dwork->wq->delayed_work_count--;
515 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
516 (VOID)queue_work(dwork->wq, &dwork->work);
517 } else {
518 LOS_SpinUnlockRestore(&g_swtmrSpin, intSave1);
519 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
520 }
521 (VOID)flush_work(&dwork->work);
522 return TRUE;
523 }
524
linux_work_busy(struct work_struct * work)525 unsigned int linux_work_busy(struct work_struct *work)
526 {
527 UINT32 ret = 0;
528
529 if (work == NULL) {
530 return FALSE;
531 }
532
533 if (work->work_status & WORK_STRUCT_PENDING) {
534 ret |= WORK_BUSY_PENDING;
535 }
536 if (work->work_status & WORK_STRUCT_RUNNING) {
537 ret |= WORK_BUSY_RUNNING;
538 }
539 return ret;
540 }
541
linux_schedule_work(struct work_struct * work)542 bool linux_schedule_work(struct work_struct *work)
543 {
544 bool ret = FALSE;
545 UINT32 intSave;
546
547 if ((g_pstSystemWq == NULL) || (g_pstSystemWq->name == NULL) || (work == NULL)) {
548 return FALSE;
549 }
550
551 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
552
553 work->work_status = WORK_STRUCT_PENDING;
554 ret = QueueWorkOn(g_pstSystemWq, work, &intSave);
555
556 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
557 return ret;
558 }
559
linux_schedule_delayed_work(struct delayed_work * dwork,unsigned int delayTime)560 bool linux_schedule_delayed_work(struct delayed_work *dwork, unsigned int delayTime)
561 {
562 bool ret = FALSE;
563
564 if ((g_pstSystemWq == NULL) || (dwork == NULL)) {
565 return FALSE;
566 }
567
568 ret = queue_delayed_work(g_pstSystemWq, dwork, delayTime);
569
570 return ret;
571 }
572
drain_workqueue(struct workqueue_struct * wq)573 static void drain_workqueue(struct workqueue_struct *wq)
574 {
575 UINT32 intSave;
576 while (1) {
577 (VOID)usleep(DELAY_TIME);
578 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
579 if (WorkqueueIsEmpty(wq->cpu_wq) && (wq->delayed_work_count == 0)) {
580 break;
581 }
582
583 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
584 }
585 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
586 }
587
linux_destroy_workqueue(struct workqueue_struct * wq)588 void linux_destroy_workqueue(struct workqueue_struct *wq)
589 {
590 UINT32 intSave;
591 if (wq == NULL) {
592 return;
593 }
594
595 /* Drain it before proceeding with destruction */
596 drain_workqueue(wq);
597
598 (VOID)LOS_TaskDelete(wq->wq_id);
599
600 LOS_SpinLockSave(&g_workqueueSpin, &intSave);
601 wq->name = NULL;
602 list_del_init(&wq->list);
603 (VOID)LOS_EventDestroy(&(wq->wq_event));
604
605 (VOID)LOS_MemFree(m_aucSysMem0, wq->cpu_wq);
606 (VOID)LOS_MemFree(m_aucSysMem0, wq);
607 LOS_SpinUnlockRestore(&g_workqueueSpin, intSave);
608 }
609
OsSysWorkQueueInit(VOID)610 UINT32 OsSysWorkQueueInit(VOID)
611 {
612 g_pstSystemWq = create_workqueue("system_wq");
613
614 return LOS_OK;
615 }
616
617 LOS_MODULE_INIT(OsSysWorkQueueInit, LOS_INIT_LEVEL_KMOD_EXTENDED);
618