• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "pprivate.h"
33 #include "pthread.h"
34 #include "sched.h"
35 
36 #include "stdio.h"
37 #include "map_error.h"
38 #include "los_process_pri.h"
39 #include "los_sched_pri.h"
40 
41 
42 /*
43  * Array of pthread control structures. A pthread_t object is
44  * "just" an index into this array.
45  */
46 STATIC _pthread_data g_pthreadData[LOSCFG_BASE_CORE_TSK_LIMIT + 1];
47 
48 /* Count of number of threads that have exited and not been reaped. */
49 STATIC INT32 g_pthreadsExited = 0;
50 
51 /* this is to protect the pthread data */
52 STATIC pthread_mutex_t g_pthreadsDataMutex = PTHREAD_MUTEX_INITIALIZER;
53 
54 /* pointed to by PTHREAD_CANCELED */
55 UINTPTR g_pthreadCanceledDummyVar;
56 
57 /*
58  * Private version of pthread_self() that returns a pointer to our internal
59  * control structure.
60  */
pthread_get_self_data(void)61 _pthread_data *pthread_get_self_data(void)
62 {
63     UINT32 runningTaskPID = ((LosTaskCB *)(OsCurrTaskGet()))->taskID;
64     _pthread_data *data = &g_pthreadData[runningTaskPID];
65 
66     return data;
67 }
68 
pthread_get_data(pthread_t id)69 _pthread_data *pthread_get_data(pthread_t id)
70 {
71     _pthread_data *data = NULL;
72 
73     if (OS_TID_CHECK_INVALID(id)) {
74         return NULL;
75     }
76 
77     data = &g_pthreadData[id];
78     /* Check that this is a valid entry */
79     if ((data->state == PTHREAD_STATE_FREE) || (data->state == PTHREAD_STATE_EXITED)) {
80         return NULL;
81     }
82 
83     /* Check that the entry matches the id */
84     if (data->id != id) {
85         return NULL;
86     }
87 
88     /* Return the pointer */
89     return data;
90 }
91 
92 /*
93  * Check whether there is a cancel pending and if so, whether
94  * cancellations are enabled. We do it in this order to reduce the
95  * number of tests in the common case - when no cancellations are
96  * pending. We make this inline so it can be called directly below for speed
97  */
CheckForCancel(VOID)98 STATIC INT32 CheckForCancel(VOID)
99 {
100     _pthread_data *self = pthread_get_self_data();
101     if (self->canceled && (self->cancelstate == PTHREAD_CANCEL_ENABLE)) {
102         return 1;
103     }
104     return 0;
105 }
106 
ProcessUnusedStatusTask(_pthread_data * data)107 STATIC VOID ProcessUnusedStatusTask(_pthread_data *data)
108 {
109     data->state = PTHREAD_STATE_FREE;
110     (VOID)memset_s(data, sizeof(_pthread_data), 0, sizeof(_pthread_data));
111 }
112 
113 /*
114  * This function is called to tidy up and dispose of any threads that have
115  * exited. This work must be done from a thread other than the one exiting.
116  * Note: this function must be called with pthread_mutex locked.
117  */
PthreadReap(VOID)118 STATIC VOID PthreadReap(VOID)
119 {
120     UINT32 i;
121     _pthread_data *data = NULL;
122     /*
123      * Loop over the thread table looking for exited threads. The
124      * g_pthreadsExited counter springs us out of this once we have
125      * found them all (and keeps us out if there are none to do).
126      */
127     for (i = 0; g_pthreadsExited && (i < g_taskMaxNum); i++) {
128         data = &g_pthreadData[i];
129         if (data->state == PTHREAD_STATE_EXITED) {
130             /* the Huawei LiteOS not delete the dead TCB,so need to delete the TCB */
131             (VOID)LOS_TaskDelete(data->task->taskID);
132             if (data->task->taskStatus & OS_TASK_STATUS_UNUSED) {
133                 ProcessUnusedStatusTask(data);
134                 g_pthreadsExited--;
135             }
136         }
137     }
138 }
139 
SetPthreadAttr(const _pthread_data * self,const pthread_attr_t * attr,pthread_attr_t * outAttr)140 STATIC VOID SetPthreadAttr(const _pthread_data *self, const pthread_attr_t *attr, pthread_attr_t *outAttr)
141 {
142     /*
143      * Set use_attr to the set of attributes we are going to
144      * actually use. Either those passed in, or the default set.
145      */
146     if (attr == NULL) {
147         (VOID)pthread_attr_init(outAttr);
148     } else {
149         (VOID)memcpy_s(outAttr, sizeof(pthread_attr_t), attr, sizeof(pthread_attr_t));
150     }
151 
152     /*
153      * If the stack size is not valid, we can assume that it is at
154      * least PTHREAD_STACK_MIN bytes.
155      */
156     if (!outAttr->stacksize_set) {
157         outAttr->stacksize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
158     }
159     if (outAttr->inheritsched == PTHREAD_INHERIT_SCHED) {
160         if (self->task == NULL) {
161             outAttr->schedparam.sched_priority = LOS_TaskPriGet(OsCurrTaskGet()->taskID);
162         } else {
163             outAttr->schedpolicy = self->attr.schedpolicy;
164             outAttr->schedparam  = self->attr.schedparam;
165             outAttr->scope       = self->attr.scope;
166         }
167     }
168 }
169 
SetPthreadDataAttr(const pthread_attr_t * userAttr,const pthread_t threadID,LosTaskCB * taskCB,_pthread_data * created)170 STATIC VOID SetPthreadDataAttr(const pthread_attr_t *userAttr, const pthread_t threadID,
171                                LosTaskCB *taskCB, _pthread_data *created)
172 {
173     created->attr         = *userAttr;
174     created->id           = threadID;
175     created->task         = taskCB;
176     created->state        = (userAttr->detachstate == PTHREAD_CREATE_JOINABLE) ?
177                             PTHREAD_STATE_RUNNING : PTHREAD_STATE_DETACHED;
178     /* need to confirmation */
179     created->cancelstate  = PTHREAD_CANCEL_ENABLE;
180     created->canceltype   = PTHREAD_CANCEL_DEFERRED;
181     created->cancelbuffer = NULL;
182     created->canceled     = 0;
183     created->freestack    = 0; /* no use default : 0 */
184     created->stackmem     = taskCB->topOfStack;
185     created->thread_data  = NULL;
186 }
187 
InitPthreadData(pthread_t threadID,pthread_attr_t * userAttr,const CHAR name[],size_t len)188 STATIC UINT32 InitPthreadData(pthread_t threadID, pthread_attr_t *userAttr,
189                               const CHAR name[], size_t len)
190 {
191     errno_t err;
192     UINT32 ret = LOS_OK;
193     LosTaskCB *taskCB = OS_TCB_FROM_TID(threadID);
194     _pthread_data *created = &g_pthreadData[threadID];
195 
196     err = strncpy_s(created->name, sizeof(created->name), name, len);
197     if (err != EOK) {
198         PRINT_ERR("%s: %d, err: %d\n", __FUNCTION__, __LINE__, err);
199         return LOS_NOK;
200     }
201     userAttr->stacksize   = taskCB->stackSize;
202     err = OsSetTaskName(taskCB, created->name, FALSE);
203     if (err != LOS_OK) {
204         PRINT_ERR("%s: %d, err: %d\n", __FUNCTION__, __LINE__, err);
205         return LOS_NOK;
206     }
207 #ifdef LOSCFG_KERNEL_SMP
208     if (userAttr->cpuset.__bits[0] > 0) {
209         taskCB->cpuAffiMask = (UINT16)userAttr->cpuset.__bits[0];
210     }
211 #endif
212 
213     SetPthreadDataAttr(userAttr, threadID, taskCB, created);
214     return ret;
215 }
216 
pthread_create(pthread_t * thread,const pthread_attr_t * attr,void * (* startRoutine)(void *),void * arg)217 int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
218                    void *(*startRoutine)(void *), void *arg)
219 {
220     pthread_attr_t userAttr;
221     UINT32 ret;
222     CHAR name[PTHREAD_DATA_NAME_MAX] = {0};
223     STATIC UINT16 pthreadNumber = 1;
224     TSK_INIT_PARAM_S taskInitParam = {0};
225     UINT32 taskHandle;
226     _pthread_data *self = pthread_get_self_data();
227 
228     if ((thread == NULL) || (startRoutine == NULL)) {
229         return EINVAL;
230     }
231 
232     SetPthreadAttr(self, attr, &userAttr);
233 
234     (VOID)snprintf_s(name, sizeof(name), sizeof(name) - 1, "pth%02d", pthreadNumber);
235     pthreadNumber++;
236 
237     taskInitParam.pcName       = name;
238     taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)startRoutine;
239     taskInitParam.auwArgs[0]   = (UINTPTR)arg;
240     taskInitParam.usTaskPrio   = (UINT16)userAttr.schedparam.sched_priority;
241     taskInitParam.uwStackSize  = userAttr.stacksize;
242     if (OsProcessIsUserMode(OsCurrProcessGet())) {
243         taskInitParam.processID = (UINTPTR)OsGetKernelInitProcess();
244     } else {
245         taskInitParam.processID = (UINTPTR)OsCurrProcessGet();
246     }
247     if (userAttr.detachstate == PTHREAD_CREATE_DETACHED) {
248         taskInitParam.uwResved = LOS_TASK_STATUS_DETACHED;
249     } else {
250         /* Set the pthread default joinable */
251         taskInitParam.uwResved = LOS_TASK_ATTR_JOINABLE;
252     }
253 
254     PthreadReap();
255     ret = LOS_TaskCreateOnly(&taskHandle, &taskInitParam);
256     if (ret == LOS_OK) {
257         *thread = (pthread_t)taskHandle;
258         ret = InitPthreadData(*thread, &userAttr, name, PTHREAD_DATA_NAME_MAX);
259         if (ret != LOS_OK) {
260             goto ERROR_OUT_WITH_TASK;
261         }
262         (VOID)LOS_SetTaskScheduler(taskHandle, SCHED_RR, taskInitParam.usTaskPrio);
263     }
264 
265     if (ret == LOS_OK) {
266         return ENOERR;
267     } else {
268         goto ERROR_OUT;
269     }
270 
271 ERROR_OUT_WITH_TASK:
272     (VOID)LOS_TaskDelete(taskHandle);
273 ERROR_OUT:
274     *thread = (pthread_t)-1;
275 
276     return map_errno(ret);
277 }
278 
pthread_exit(void * retVal)279 void pthread_exit(void *retVal)
280 {
281     _pthread_data *self = pthread_get_self_data();
282     UINT32 intSave;
283 
284     if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, (int *)0) != ENOERR) {
285         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
286     }
287 
288     if (pthread_mutex_lock(&g_pthreadsDataMutex) != ENOERR) {
289         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
290     }
291 
292     self->task->joinRetval = retVal;
293     /*
294      * If we are already detached, go to EXITED state, otherwise
295      * go into JOIN state.
296      */
297     if (self->state == PTHREAD_STATE_DETACHED) {
298         self->state = PTHREAD_STATE_EXITED;
299         g_pthreadsExited++;
300     } else {
301         self->state = PTHREAD_STATE_JOIN;
302     }
303 
304     if (pthread_mutex_unlock(&g_pthreadsDataMutex) != ENOERR) {
305         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
306     }
307     SCHEDULER_LOCK(intSave);
308     /* If the thread is the highest thread,it can't schedule in LOS_SemPost. */
309     OsTaskJoinPostUnsafe(self->task);
310     if (self->task->taskStatus & OS_TASK_STATUS_RUNNING) {
311         OsSchedResched();
312     }
313     SCHEDULER_UNLOCK(intSave);
314 }
315 
ProcessByJoinState(_pthread_data * joined)316 STATIC INT32 ProcessByJoinState(_pthread_data *joined)
317 {
318     UINT32 intSave;
319     INT32 err = 0;
320     UINT32 ret;
321     switch (joined->state) {
322         case PTHREAD_STATE_RUNNING:
323             /* The thread is still running, we must wait for it. */
324             SCHEDULER_LOCK(intSave);
325             ret = OsTaskJoinPendUnsafe(joined->task);
326             SCHEDULER_UNLOCK(intSave);
327             if (ret != LOS_OK) {
328                 err = (INT32)ret;
329                 break;
330             }
331 
332             joined->state = PTHREAD_STATE_ALRDY_JOIN;
333             break;
334            /*
335             * The thread has become unjoinable while we waited, so we
336             * fall through to complain.
337             */
338         case PTHREAD_STATE_FREE:
339         case PTHREAD_STATE_DETACHED:
340         case PTHREAD_STATE_EXITED:
341             /* None of these may be joined. */
342             err = EINVAL;
343             break;
344         case PTHREAD_STATE_ALRDY_JOIN:
345             err = EINVAL;
346             break;
347         case PTHREAD_STATE_JOIN:
348             break;
349         default:
350             PRINT_ERR("state: %u is not supported\n", (UINT32)joined->state);
351             break;
352     }
353     return err;
354 }
355 
pthread_join(pthread_t thread,void ** retVal)356 int pthread_join(pthread_t thread, void **retVal)
357 {
358     INT32 err;
359     UINT8 status;
360     _pthread_data *self = NULL;
361     _pthread_data *joined = NULL;
362 
363     /* Check for cancellation first. */
364     pthread_testcancel();
365 
366     /* Dispose of any dead threads */
367     (VOID)pthread_mutex_lock(&g_pthreadsDataMutex);
368     PthreadReap();
369     (VOID)pthread_mutex_unlock(&g_pthreadsDataMutex);
370 
371     self   = pthread_get_self_data();
372     joined = pthread_get_data(thread);
373     if (joined == NULL) {
374         return ESRCH;
375     }
376     status = joined->state;
377 
378     if (joined == self) {
379         return EDEADLK;
380     }
381 
382     err = ProcessByJoinState(joined);
383     (VOID)pthread_mutex_lock(&g_pthreadsDataMutex);
384 
385     if (!err) {
386         /*
387          * Here, we know that joinee is a thread that has exited and is
388          * ready to be joined.
389          */
390         if (retVal != NULL) {
391             /* Get the retVal */
392             *retVal = joined->task->joinRetval;
393         }
394 
395         /* Set state to exited. */
396         joined->state = PTHREAD_STATE_EXITED;
397         g_pthreadsExited++;
398 
399         /* Dispose of any dead threads */
400         PthreadReap();
401     } else {
402         joined->state = status;
403     }
404 
405     (VOID)pthread_mutex_unlock(&g_pthreadsDataMutex);
406     /* Check for cancellation before returning */
407     pthread_testcancel();
408 
409     return err;
410 }
411 
412 /*
413  * Set the detachstate of the thread to "detached". The thread then does not
414  * need to be joined and its resources will be freed when it exits.
415  */
pthread_detach(pthread_t thread)416 int pthread_detach(pthread_t thread)
417 {
418     int ret = 0;
419     UINT32 intSave;
420 
421     _pthread_data *detached = NULL;
422 
423     if (pthread_mutex_lock(&g_pthreadsDataMutex) != ENOERR) {
424         ret = ESRCH;
425     }
426     detached = pthread_get_data(thread);
427     if (detached == NULL) {
428         ret = ESRCH; /* No such thread */
429     } else if (detached->state == PTHREAD_STATE_DETACHED) {
430         ret = EINVAL; /* Already detached! */
431     } else if (detached->state == PTHREAD_STATE_JOIN) {
432         detached->state = PTHREAD_STATE_EXITED;
433         g_pthreadsExited++;
434     } else {
435         /* Set state to detached and kick any joinees to make them return. */
436         SCHEDULER_LOCK(intSave);
437         if (!(detached->task->taskStatus & OS_TASK_STATUS_EXIT)) {
438             ret = OsTaskSetDetachUnsafe(detached->task);
439             if (ret == ESRCH) {
440                 ret = LOS_OK;
441             } else if (ret == LOS_OK) {
442                 detached->state = PTHREAD_STATE_DETACHED;
443             }
444         } else {
445             detached->state = PTHREAD_STATE_EXITED;
446             g_pthreadsExited++;
447         }
448         SCHEDULER_UNLOCK(intSave);
449     }
450 
451     /* Dispose of any dead threads */
452     PthreadReap();
453     if (pthread_mutex_unlock(&g_pthreadsDataMutex) != ENOERR) {
454         ret = ESRCH;
455     }
456 
457     return ret;
458 }
459 
pthread_setschedparam(pthread_t thread,int policy,const struct sched_param * param)460 int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
461 {
462     _pthread_data *data = NULL;
463     int ret;
464 
465     if ((param == NULL) || (param->sched_priority > OS_TASK_PRIORITY_LOWEST)) {
466         return EINVAL;
467     }
468 
469     if (policy != SCHED_RR) {
470         return EINVAL;
471     }
472 
473     /* The parameters seem OK, change the thread. */
474     ret = pthread_mutex_lock(&g_pthreadsDataMutex);
475     if (ret != ENOERR) {
476         return ret;
477     }
478 
479     data = pthread_get_data(thread);
480     if (data == NULL) {
481         ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
482         if (ret != ENOERR) {
483             return ret;
484         }
485         return ESRCH;
486     }
487 
488     /* Only support one policy now */
489     data->attr.schedpolicy = SCHED_RR;
490     data->attr.schedparam  = *param;
491 
492     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
493     if (ret != ENOERR) {
494         return ret;
495     }
496     (VOID)LOS_TaskPriSet((UINT32)thread, (UINT16)param->sched_priority);
497 
498     return ENOERR;
499 }
500 
pthread_getschedparam(pthread_t thread,int * policy,struct sched_param * param)501 int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
502 {
503     _pthread_data *data = NULL;
504     int ret;
505 
506     if ((policy == NULL) || (param == NULL)) {
507         return EINVAL;
508     }
509 
510     ret = pthread_mutex_lock(&g_pthreadsDataMutex);
511     if (ret != ENOERR) {
512         return ret;
513     }
514 
515     data = pthread_get_data(thread);
516     if (data == NULL) {
517         goto ERR_OUT;
518     }
519 
520     *policy = data->attr.schedpolicy;
521     *param = data->attr.schedparam;
522 
523     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
524     return ret;
525 ERR_OUT:
526     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
527     if (ret != ENOERR) {
528         return ret;
529     }
530     return ESRCH;
531 }
532 
533 /* Call initRoutine just the once per control variable. */
pthread_once(pthread_once_t * onceControl,void (* initRoutine)(void))534 int pthread_once(pthread_once_t *onceControl, void (*initRoutine)(void))
535 {
536     pthread_once_t old;
537     int ret;
538 
539     if ((onceControl == NULL) || (initRoutine == NULL)) {
540         return EINVAL;
541     }
542 
543     /* Do a test and set on the onceControl object. */
544     ret = pthread_mutex_lock(&g_pthreadsDataMutex);
545     if (ret != ENOERR) {
546         return ret;
547     }
548 
549     old = *onceControl;
550     *onceControl = 1;
551 
552     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
553     if (ret != ENOERR) {
554         return ret;
555     }
556     /* If the onceControl was zero, call the initRoutine(). */
557     if (!old) {
558         initRoutine();
559     }
560 
561     return ENOERR;
562 }
563 
564 /* Thread specific data */
pthread_key_create(pthread_key_t * key,void (* destructor)(void *))565 int pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
566 {
567     (VOID)key;
568     (VOID)destructor;
569     PRINT_ERR("[%s] is not support.\n", __FUNCTION__);
570     return 0;
571 }
572 
573 /* Store the pointer value in the thread-specific data slot addressed by the key. */
pthread_setspecific(pthread_key_t key,const void * pointer)574 int pthread_setspecific(pthread_key_t key, const void *pointer)
575 {
576     (VOID)key;
577     (VOID)pointer;
578     PRINT_ERR("[%s] is not support.\n", __FUNCTION__);
579     return 0;
580 }
581 
582 /* Retrieve the pointer value in the thread-specific data slot addressed by the key. */
pthread_getspecific(pthread_key_t key)583 void *pthread_getspecific(pthread_key_t key)
584 {
585     (VOID)key;
586     PRINT_ERR("[%s] is not support.\n", __FUNCTION__);
587     return NULL;
588 }
589 
590 /*
591  * Set cancel state of current thread to ENABLE or DISABLE.
592  * Returns old state in *oldState.
593  */
pthread_setcancelstate(int state,int * oldState)594 int pthread_setcancelstate(int state, int *oldState)
595 {
596     _pthread_data *self = NULL;
597     int ret;
598 
599     if ((state != PTHREAD_CANCEL_ENABLE) && (state != PTHREAD_CANCEL_DISABLE)) {
600         return EINVAL;
601     }
602 
603     ret = pthread_mutex_lock(&g_pthreadsDataMutex);
604     if (ret != ENOERR) {
605         return ret;
606     }
607 
608     self = pthread_get_self_data();
609 
610     if (oldState != NULL) {
611         *oldState = self->cancelstate;
612     }
613 
614     self->cancelstate = (UINT8)state;
615 
616     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
617     if (ret != ENOERR) {
618         return ret;
619     }
620 
621     return ENOERR;
622 }
623 
624 /*
625  * Set cancel type of current thread to ASYNCHRONOUS or DEFERRED.
626  * Returns old type in *oldType.
627  */
pthread_setcanceltype(int type,int * oldType)628 int pthread_setcanceltype(int type, int *oldType)
629 {
630     _pthread_data *self = NULL;
631     int ret;
632 
633     if ((type != PTHREAD_CANCEL_ASYNCHRONOUS) && (type != PTHREAD_CANCEL_DEFERRED)) {
634         return EINVAL;
635     }
636 
637     ret = pthread_mutex_lock(&g_pthreadsDataMutex);
638     if (ret != ENOERR) {
639         return ret;
640     }
641 
642     self = pthread_get_self_data();
643     if (oldType != NULL) {
644         *oldType = self->canceltype;
645     }
646 
647     self->canceltype = (UINT8)type;
648 
649     ret = pthread_mutex_unlock(&g_pthreadsDataMutex);
650     if (ret != ENOERR) {
651         return ret;
652     }
653 
654     return ENOERR;
655 }
656 
DoPthreadCancel(_pthread_data * data)657 STATIC UINT32 DoPthreadCancel(_pthread_data *data)
658 {
659     UINT32 ret = LOS_OK;
660     UINT32 intSave;
661     LOS_TaskLock();
662     data->canceled = 0;
663     if ((data->task->taskStatus & OS_TASK_STATUS_EXIT) || (LOS_TaskSuspend(data->task->taskID) != ENOERR)) {
664         ret = LOS_NOK;
665         goto OUT;
666     }
667 
668     if (data->task->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
669         SCHEDULER_LOCK(intSave);
670         OsTaskJoinPostUnsafe(data->task);
671         SCHEDULER_UNLOCK(intSave);
672         g_pthreadCanceledDummyVar = (UINTPTR)PTHREAD_CANCELED;
673         data->task->joinRetval = (VOID *)g_pthreadCanceledDummyVar;
674     } else if (data->state && !(data->task->taskStatus & OS_TASK_STATUS_UNUSED)) {
675         data->state = PTHREAD_STATE_EXITED;
676         g_pthreadsExited++;
677         PthreadReap();
678     } else {
679         ret = LOS_NOK;
680     }
681 OUT:
682     LOS_TaskUnlock();
683     return ret;
684 }
685 
pthread_cancel(pthread_t thread)686 int pthread_cancel(pthread_t thread)
687 {
688     _pthread_data *data = NULL;
689 
690     if (pthread_mutex_lock(&g_pthreadsDataMutex) != ENOERR) {
691         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
692     }
693 
694     data = pthread_get_data(thread);
695     if (data == NULL) {
696         if (pthread_mutex_unlock(&g_pthreadsDataMutex) != ENOERR) {
697             PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
698         }
699         return ESRCH;
700     }
701 
702     data->canceled = 1;
703 
704     if ((data->cancelstate == PTHREAD_CANCEL_ENABLE) &&
705         (data->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)) {
706         /*
707          * If the thread has cancellation enabled, and it is in
708          * asynchronous mode, suspend it and set corresponding thread's status.
709          * We also release the thread out of any current wait to make it wake up.
710          */
711         if (DoPthreadCancel(data) == LOS_NOK) {
712             goto ERROR_OUT;
713         }
714     }
715 
716     /*
717      * Otherwise the thread has cancellation disabled, in which case
718      * it is up to the thread to enable cancellation
719      */
720     if (pthread_mutex_unlock(&g_pthreadsDataMutex) != ENOERR) {
721         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
722     }
723 
724     return ENOERR;
725 ERROR_OUT:
726     if (pthread_mutex_unlock(&g_pthreadsDataMutex) != ENOERR) {
727         PRINT_ERR("%s: %d failed\n", __FUNCTION__, __LINE__);
728     }
729     return ESRCH;
730 }
731 
732 /*
733  * Test for a pending cancellation for the current thread and terminate
734  * the thread if there is one.
735  */
pthread_testcancel(void)736 void pthread_testcancel(void)
737 {
738     if (CheckForCancel()) {
739         /*
740          * If we have cancellation enabled, and there is a cancellation
741          * pending, then go ahead and do the deed.
742          * Exit now with special retVal. pthread_exit() calls the
743          * cancellation handlers implicitly.
744          */
745         pthread_exit((void *)PTHREAD_CANCELED);
746     }
747 }
748 
749 /* Get current thread id. */
pthread_self(void)750 pthread_t pthread_self(void)
751 {
752     _pthread_data *data = pthread_get_self_data();
753 
754     return data->id;
755 }
756 
757 /* Compare two thread identifiers. */
pthread_equal(pthread_t thread1,pthread_t thread2)758 int pthread_equal(pthread_t thread1, pthread_t thread2)
759 {
760     return thread1 == thread2;
761 }
762 
pthread_cleanup_push_inner(struct pthread_cleanup_buffer * buffer,void (* routine)(void *),void * arg)763 void pthread_cleanup_push_inner(struct pthread_cleanup_buffer *buffer,
764                                 void (*routine)(void *), void *arg)
765 {
766     (VOID)buffer;
767     (VOID)routine;
768     (VOID)arg;
769     PRINT_ERR("[%s] is not support.\n", __FUNCTION__);
770     return;
771 }
772 
pthread_cleanup_pop_inner(struct pthread_cleanup_buffer * buffer,int execute)773 void pthread_cleanup_pop_inner(struct pthread_cleanup_buffer *buffer, int execute)
774 {
775     (VOID)buffer;
776     (VOID)execute;
777     PRINT_ERR("[%s] is not support.\n", __FUNCTION__);
778     return;
779 }
780 
781 /*
782  * Set the cpu affinity mask for the thread
783  */
pthread_setaffinity_np(pthread_t thread,size_t cpusetsize,const cpu_set_t * cpuset)784 int pthread_setaffinity_np(pthread_t thread, size_t cpusetsize, const cpu_set_t* cpuset)
785 {
786     INT32 ret = sched_setaffinity(thread, cpusetsize, cpuset);
787     if (ret == -1) {
788         return errno;
789     } else {
790         return ENOERR;
791     }
792 }
793 
794 /*
795  * Get the cpu affinity mask from the thread
796  */
pthread_getaffinity_np(pthread_t thread,size_t cpusetsize,cpu_set_t * cpuset)797 int pthread_getaffinity_np(pthread_t thread, size_t cpusetsize, cpu_set_t* cpuset)
798 {
799     INT32 ret = sched_getaffinity(thread, cpusetsize, cpuset);
800     if (ret == -1) {
801         return errno;
802     } else {
803         return ENOERR;
804     }
805 }
806 
807