1 /*
2 * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
25 * OF SUCH DAMAGE.
26 *
27 * This file is part of the lwIP TCP/IP stack.
28 *
29 * Author: Adam Dunkels <adam@sics.se>
30 *
31 */
32
33 /*
34 * Wed Apr 17 16:05:29 EDT 2002 (James Roth)
35 *
36 * - Fixed an unlikely sys_thread_new() race condition.
37 *
38 * - Made current_thread() work with threads which where
39 * not created with sys_thread_new(). This includes
40 * the main thread and threads made with pthread_create().
41 *
42 * - Catch overflows where more than SYS_MBOX_SIZE messages
43 * are waiting to be read. The sys_mbox_post() routine
44 * will block until there is more room instead of just
45 * leaking messages.
46 */
47 #define _GNU_SOURCE /* pull in pthread_setname_np() on Linux */
48
49 #include "lwip/debug.h"
50
51 #include <string.h>
52 #include <sys/time.h>
53 #include <sys/types.h>
54 #include <stdlib.h>
55 #include <unistd.h>
56 #include <pthread.h>
57 #include <errno.h>
58
59 #include "lwip/def.h"
60
61 #ifdef LWIP_UNIX_MACH
62 #include <mach/mach.h>
63 #include <mach/mach_time.h>
64 #endif
65
66 #include "lwip/sys.h"
67 #include "lwip/opt.h"
68 #include "lwip/stats.h"
69 #include "lwip/tcpip.h"
70
71 #if LWIP_NETCONN_SEM_PER_THREAD
72 /* pthread key to *our* thread local storage entry */
73 static pthread_key_t sys_thread_sem_key;
74 #endif
75
76 /* Return code for an interrupted timed wait */
77 #define SYS_ARCH_INTR 0xfffffffeUL
78
79 u32_t
lwip_port_rand(void)80 lwip_port_rand(void)
81 {
82 return (u32_t)rand();
83 }
84
85 static void
get_monotonic_time(struct timespec * ts)86 get_monotonic_time(struct timespec *ts)
87 {
88 #ifdef LWIP_UNIX_MACH
89 /* darwin impl (no CLOCK_MONOTONIC) */
90 u64_t t = mach_absolute_time();
91 mach_timebase_info_data_t timebase_info = {0, 0};
92 mach_timebase_info(&timebase_info);
93 u64_t nano = (t * timebase_info.numer) / (timebase_info.denom);
94 u64_t sec = nano/1000000000L;
95 nano -= sec * 1000000000L;
96 ts->tv_sec = sec;
97 ts->tv_nsec = nano;
98 #else
99 clock_gettime(CLOCK_MONOTONIC, ts);
100 #endif
101 }
102
103 #if SYS_LIGHTWEIGHT_PROT
104 static pthread_mutex_t lwprot_mutex = PTHREAD_MUTEX_INITIALIZER;
105 static pthread_t lwprot_thread = (pthread_t)0xDEAD;
106 static int lwprot_count = 0;
107 #endif /* SYS_LIGHTWEIGHT_PROT */
108
109 #if !NO_SYS
110
111 static struct sys_thread *threads = NULL;
112 static pthread_mutex_t threads_mutex = PTHREAD_MUTEX_INITIALIZER;
113
114 struct sys_mbox_msg {
115 struct sys_mbox_msg *next;
116 void *msg;
117 };
118
119 #define SYS_MBOX_SIZE 128
120
121 struct sys_mbox {
122 int first, last;
123 void *msgs[SYS_MBOX_SIZE];
124 struct sys_sem *not_empty;
125 struct sys_sem *not_full;
126 struct sys_sem *mutex;
127 int wait_send;
128 };
129
130 struct sys_sem {
131 unsigned int c;
132 pthread_condattr_t condattr;
133 pthread_cond_t cond;
134 pthread_mutex_t mutex;
135 };
136
137 struct sys_mutex {
138 pthread_mutex_t mutex;
139 };
140
141 struct sys_thread {
142 struct sys_thread *next;
143 pthread_t pthread;
144 };
145
146 static struct sys_sem *sys_sem_new_internal(u8_t count);
147 static void sys_sem_free_internal(struct sys_sem *sem);
148
149 static u32_t cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex,
150 u32_t timeout);
151
152 /*-----------------------------------------------------------------------------------*/
153 /* Threads */
154 static struct sys_thread *
introduce_thread(pthread_t id)155 introduce_thread(pthread_t id)
156 {
157 struct sys_thread *thread;
158
159 thread = (struct sys_thread *)malloc(sizeof(struct sys_thread));
160
161 if (thread != NULL) {
162 pthread_mutex_lock(&threads_mutex);
163 thread->next = threads;
164 thread->pthread = id;
165 threads = thread;
166 pthread_mutex_unlock(&threads_mutex);
167 }
168
169 return thread;
170 }
171
172 struct thread_wrapper_data
173 {
174 lwip_thread_fn function;
175 void *arg;
176 };
177
178 static void *
thread_wrapper(void * arg)179 thread_wrapper(void *arg)
180 {
181 struct thread_wrapper_data *thread_data = (struct thread_wrapper_data *)arg;
182
183 thread_data->function(thread_data->arg);
184
185 /* we should never get here */
186 free(arg);
187 return NULL;
188 }
189
190 sys_thread_t
sys_thread_new(const char * name,lwip_thread_fn function,void * arg,int stacksize,int prio)191 sys_thread_new(const char *name, lwip_thread_fn function, void *arg, int stacksize, int prio)
192 {
193 int code;
194 pthread_t tmp;
195 struct sys_thread *st = NULL;
196 struct thread_wrapper_data *thread_data;
197 LWIP_UNUSED_ARG(name);
198 LWIP_UNUSED_ARG(stacksize);
199 LWIP_UNUSED_ARG(prio);
200
201 thread_data = (struct thread_wrapper_data *)malloc(sizeof(struct thread_wrapper_data));
202 thread_data->arg = arg;
203 thread_data->function = function;
204 code = pthread_create(&tmp,
205 NULL,
206 thread_wrapper,
207 thread_data);
208
209 #ifdef LWIP_UNIX_LINUX
210 pthread_setname_np(tmp, name);
211 #endif
212
213 if (0 == code) {
214 st = introduce_thread(tmp);
215 }
216
217 if (NULL == st) {
218 LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_new: pthread_create %d, st = 0x%lx\n",
219 code, (unsigned long)st));
220 abort();
221 }
222 return st;
223 }
224
225 #if LWIP_TCPIP_CORE_LOCKING
226 static pthread_t lwip_core_lock_holder_thread_id;
sys_lock_tcpip_core(void)227 void sys_lock_tcpip_core(void)
228 {
229 sys_mutex_lock(&lock_tcpip_core);
230 lwip_core_lock_holder_thread_id = pthread_self();
231 }
232
sys_unlock_tcpip_core(void)233 void sys_unlock_tcpip_core(void)
234 {
235 lwip_core_lock_holder_thread_id = 0;
236 sys_mutex_unlock(&lock_tcpip_core);
237 }
238 #endif /* LWIP_TCPIP_CORE_LOCKING */
239
240 static pthread_t lwip_tcpip_thread_id;
sys_mark_tcpip_thread(void)241 void sys_mark_tcpip_thread(void)
242 {
243 lwip_tcpip_thread_id = pthread_self();
244 }
245
sys_check_core_locking(void)246 void sys_check_core_locking(void)
247 {
248 /* Embedded systems should check we are NOT in an interrupt context here */
249
250 if (lwip_tcpip_thread_id != 0) {
251 pthread_t current_thread_id = pthread_self();
252
253 #if LWIP_TCPIP_CORE_LOCKING
254 LWIP_ASSERT("Function called without core lock", current_thread_id == lwip_core_lock_holder_thread_id);
255 #else /* LWIP_TCPIP_CORE_LOCKING */
256 LWIP_ASSERT("Function called from wrong thread", current_thread_id == lwip_tcpip_thread_id);
257 #endif /* LWIP_TCPIP_CORE_LOCKING */
258 }
259 }
260
261 /*-----------------------------------------------------------------------------------*/
262 /* Mailbox */
263 err_t
sys_mbox_new(struct sys_mbox ** mb,int size)264 sys_mbox_new(struct sys_mbox **mb, int size)
265 {
266 struct sys_mbox *mbox;
267 LWIP_UNUSED_ARG(size);
268
269 mbox = (struct sys_mbox *)malloc(sizeof(struct sys_mbox));
270 if (mbox == NULL) {
271 return ERR_MEM;
272 }
273 mbox->first = mbox->last = 0;
274 mbox->not_empty = sys_sem_new_internal(0);
275 mbox->not_full = sys_sem_new_internal(0);
276 mbox->mutex = sys_sem_new_internal(1);
277 mbox->wait_send = 0;
278
279 SYS_STATS_INC_USED(mbox);
280 *mb = mbox;
281 return ERR_OK;
282 }
283
284 void
sys_mbox_free(struct sys_mbox ** mb)285 sys_mbox_free(struct sys_mbox **mb)
286 {
287 if ((mb != NULL) && (*mb != SYS_MBOX_NULL)) {
288 struct sys_mbox *mbox = *mb;
289 SYS_STATS_DEC(mbox.used);
290 sys_arch_sem_wait(&mbox->mutex, 0);
291
292 sys_sem_free_internal(mbox->not_empty);
293 sys_sem_free_internal(mbox->not_full);
294 sys_sem_free_internal(mbox->mutex);
295 mbox->not_empty = mbox->not_full = mbox->mutex = NULL;
296 /* LWIP_DEBUGF("sys_mbox_free: mbox 0x%lx\n", mbox); */
297 free(mbox);
298 }
299 }
300
301 err_t
sys_mbox_trypost(struct sys_mbox ** mb,void * msg)302 sys_mbox_trypost(struct sys_mbox **mb, void *msg)
303 {
304 u8_t first;
305 struct sys_mbox *mbox;
306 LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
307 mbox = *mb;
308
309 sys_arch_sem_wait(&mbox->mutex, 0);
310
311 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n",
312 (void *)mbox, (void *)msg));
313
314 if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
315 sys_sem_signal(&mbox->mutex);
316 return ERR_MEM;
317 }
318
319 mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
320
321 if (mbox->last == mbox->first) {
322 first = 1;
323 } else {
324 first = 0;
325 }
326
327 mbox->last++;
328
329 if (first) {
330 sys_sem_signal(&mbox->not_empty);
331 }
332
333 sys_sem_signal(&mbox->mutex);
334
335 return ERR_OK;
336 }
337
338 err_t
sys_mbox_trypost_fromisr(sys_mbox_t * q,void * msg)339 sys_mbox_trypost_fromisr(sys_mbox_t *q, void *msg)
340 {
341 return sys_mbox_trypost(q, msg);
342 }
343
344 void
sys_mbox_post(struct sys_mbox ** mb,void * msg)345 sys_mbox_post(struct sys_mbox **mb, void *msg)
346 {
347 u8_t first;
348 struct sys_mbox *mbox;
349 LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
350 mbox = *mb;
351
352 sys_arch_sem_wait(&mbox->mutex, 0);
353
354 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_post: mbox %p msg %p\n", (void *)mbox, (void *)msg));
355
356 while ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
357 mbox->wait_send++;
358 sys_sem_signal(&mbox->mutex);
359 sys_arch_sem_wait(&mbox->not_full, 0);
360 sys_arch_sem_wait(&mbox->mutex, 0);
361 mbox->wait_send--;
362 }
363
364 mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
365
366 if (mbox->last == mbox->first) {
367 first = 1;
368 } else {
369 first = 0;
370 }
371
372 mbox->last++;
373
374 if (first) {
375 sys_sem_signal(&mbox->not_empty);
376 }
377
378 sys_sem_signal(&mbox->mutex);
379 }
380
381 u32_t
sys_arch_mbox_tryfetch(struct sys_mbox ** mb,void ** msg)382 sys_arch_mbox_tryfetch(struct sys_mbox **mb, void **msg)
383 {
384 struct sys_mbox *mbox;
385 LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
386 mbox = *mb;
387
388 sys_arch_sem_wait(&mbox->mutex, 0);
389
390 if (mbox->first == mbox->last) {
391 sys_sem_signal(&mbox->mutex);
392 return SYS_MBOX_EMPTY;
393 }
394
395 if (msg != NULL) {
396 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p msg %p\n", (void *)mbox, *msg));
397 *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
398 }
399 else{
400 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p, null msg\n", (void *)mbox));
401 }
402
403 mbox->first++;
404
405 if (mbox->wait_send) {
406 sys_sem_signal(&mbox->not_full);
407 }
408
409 sys_sem_signal(&mbox->mutex);
410
411 return 0;
412 }
413
414 u32_t
sys_arch_mbox_fetch(struct sys_mbox ** mb,void ** msg,u32_t timeout)415 sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, u32_t timeout)
416 {
417 u32_t time_needed = 0;
418 struct sys_mbox *mbox;
419 LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
420 mbox = *mb;
421
422 /* The mutex lock is quick so we don't bother with the timeout
423 stuff here. */
424 sys_arch_sem_wait(&mbox->mutex, 0);
425
426 while (mbox->first == mbox->last) {
427 sys_sem_signal(&mbox->mutex);
428
429 /* We block while waiting for a mail to arrive in the mailbox. We
430 must be prepared to timeout. */
431 if (timeout != 0) {
432 time_needed = sys_arch_sem_wait(&mbox->not_empty, timeout);
433
434 if (time_needed == SYS_ARCH_TIMEOUT) {
435 return SYS_ARCH_TIMEOUT;
436 }
437 } else {
438 sys_arch_sem_wait(&mbox->not_empty, 0);
439 }
440
441 sys_arch_sem_wait(&mbox->mutex, 0);
442 }
443
444 if (msg != NULL) {
445 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg));
446 *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
447 }
448 else{
449 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox));
450 }
451
452 mbox->first++;
453
454 if (mbox->wait_send) {
455 sys_sem_signal(&mbox->not_full);
456 }
457
458 sys_sem_signal(&mbox->mutex);
459
460 return time_needed;
461 }
462
463 /*-----------------------------------------------------------------------------------*/
464 /* Semaphore */
465 static struct sys_sem *
sys_sem_new_internal(u8_t count)466 sys_sem_new_internal(u8_t count)
467 {
468 struct sys_sem *sem;
469
470 sem = (struct sys_sem *)malloc(sizeof(struct sys_sem));
471 if (sem != NULL) {
472 sem->c = count;
473 pthread_condattr_init(&(sem->condattr));
474 #if !(defined(LWIP_UNIX_MACH) || (defined(LWIP_UNIX_ANDROID) && __ANDROID_API__ < 21))
475 pthread_condattr_setclock(&(sem->condattr), CLOCK_MONOTONIC);
476 #endif
477 pthread_cond_init(&(sem->cond), &(sem->condattr));
478 pthread_mutex_init(&(sem->mutex), NULL);
479 }
480 return sem;
481 }
482
483 err_t
sys_sem_new(struct sys_sem ** sem,u8_t count)484 sys_sem_new(struct sys_sem **sem, u8_t count)
485 {
486 SYS_STATS_INC_USED(sem);
487 *sem = sys_sem_new_internal(count);
488 if (*sem == NULL) {
489 return ERR_MEM;
490 }
491 return ERR_OK;
492 }
493
494 static u32_t
cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex,u32_t timeout)495 cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, u32_t timeout)
496 {
497 struct timespec rtime1, rtime2, ts;
498 int ret;
499
500 #ifdef LWIP_UNIX_HURD
501 #define pthread_cond_wait pthread_hurd_cond_wait_np
502 #define pthread_cond_timedwait pthread_hurd_cond_timedwait_np
503 #endif
504
505 if (timeout == 0) {
506 ret = pthread_cond_wait(cond, mutex);
507 return
508 #ifdef LWIP_UNIX_HURD
509 /* On the Hurd, ret == 1 means the RPC has been cancelled.
510 * The thread is awakened (not terminated) and execution must continue */
511 ret == 1 ? SYS_ARCH_INTR :
512 #endif
513 (u32_t)ret;
514 }
515
516 /* Get a timestamp and add the timeout value. */
517 get_monotonic_time(&rtime1);
518 #if defined(LWIP_UNIX_MACH) || (defined(LWIP_UNIX_ANDROID) && __ANDROID_API__ < 21)
519 ts.tv_sec = timeout / 1000L;
520 ts.tv_nsec = (timeout % 1000L) * 1000000L;
521 ret = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
522 #else
523 ts.tv_sec = rtime1.tv_sec + timeout / 1000L;
524 ts.tv_nsec = rtime1.tv_nsec + (timeout % 1000L) * 1000000L;
525 if (ts.tv_nsec >= 1000000000L) {
526 ts.tv_sec++;
527 ts.tv_nsec -= 1000000000L;
528 }
529
530 ret = pthread_cond_timedwait(cond, mutex, &ts);
531 #endif
532 if (ret == ETIMEDOUT) {
533 return SYS_ARCH_TIMEOUT;
534 #ifdef LWIP_UNIX_HURD
535 /* On the Hurd, ret == 1 means the RPC has been cancelled.
536 * The thread is awakened (not terminated) and execution must continue */
537 } else if (ret == EINTR) {
538 return SYS_ARCH_INTR;
539 #endif
540 }
541
542 /* Calculate for how long we waited for the cond. */
543 get_monotonic_time(&rtime2);
544 ts.tv_sec = rtime2.tv_sec - rtime1.tv_sec;
545 ts.tv_nsec = rtime2.tv_nsec - rtime1.tv_nsec;
546 if (ts.tv_nsec < 0) {
547 ts.tv_sec--;
548 ts.tv_nsec += 1000000000L;
549 }
550 return (u32_t)(ts.tv_sec * 1000L + ts.tv_nsec / 1000000L);
551 }
552
553 u32_t
sys_arch_sem_wait(struct sys_sem ** s,u32_t timeout)554 sys_arch_sem_wait(struct sys_sem **s, u32_t timeout)
555 {
556 u32_t time_needed = 0;
557 struct sys_sem *sem;
558 LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
559 sem = *s;
560
561 pthread_mutex_lock(&(sem->mutex));
562 while (sem->c <= 0) {
563 if (timeout > 0) {
564 time_needed = cond_wait(&(sem->cond), &(sem->mutex), timeout);
565
566 if (time_needed == SYS_ARCH_TIMEOUT) {
567 pthread_mutex_unlock(&(sem->mutex));
568 return SYS_ARCH_TIMEOUT;
569 #ifdef LWIP_UNIX_HURD
570 } else if(time_needed == SYS_ARCH_INTR) {
571 pthread_mutex_unlock(&(sem->mutex));
572 return 0;
573 #endif
574 }
575 /* pthread_mutex_unlock(&(sem->mutex));
576 return time_needed; */
577 } else if(cond_wait(&(sem->cond), &(sem->mutex), 0)) {
578 /* Some error happened or the thread has been awakened but not by lwip */
579 pthread_mutex_unlock(&(sem->mutex));
580 return 0;
581 }
582 }
583 sem->c--;
584 pthread_mutex_unlock(&(sem->mutex));
585 return (u32_t)time_needed;
586 }
587
588 void
sys_sem_signal(struct sys_sem ** s)589 sys_sem_signal(struct sys_sem **s)
590 {
591 struct sys_sem *sem;
592 LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
593 sem = *s;
594
595 pthread_mutex_lock(&(sem->mutex));
596 sem->c++;
597
598 if (sem->c > 1) {
599 sem->c = 1;
600 }
601
602 pthread_cond_broadcast(&(sem->cond));
603 pthread_mutex_unlock(&(sem->mutex));
604 }
605
606 static void
sys_sem_free_internal(struct sys_sem * sem)607 sys_sem_free_internal(struct sys_sem *sem)
608 {
609 pthread_cond_destroy(&(sem->cond));
610 pthread_condattr_destroy(&(sem->condattr));
611 pthread_mutex_destroy(&(sem->mutex));
612 free(sem);
613 }
614
615 void
sys_sem_free(struct sys_sem ** sem)616 sys_sem_free(struct sys_sem **sem)
617 {
618 if ((sem != NULL) && (*sem != SYS_SEM_NULL)) {
619 SYS_STATS_DEC(sem.used);
620 sys_sem_free_internal(*sem);
621 }
622 }
623
624 /*-----------------------------------------------------------------------------------*/
625 /* Mutex */
626 /** Create a new mutex
627 * @param mutex pointer to the mutex to create
628 * @return a new mutex */
629 err_t
sys_mutex_new(struct sys_mutex ** mutex)630 sys_mutex_new(struct sys_mutex **mutex)
631 {
632 struct sys_mutex *mtx;
633
634 mtx = (struct sys_mutex *)malloc(sizeof(struct sys_mutex));
635 if (mtx != NULL) {
636 pthread_mutex_init(&(mtx->mutex), NULL);
637 *mutex = mtx;
638 return ERR_OK;
639 }
640 else {
641 return ERR_MEM;
642 }
643 }
644
645 /** Lock a mutex
646 * @param mutex the mutex to lock */
647 void
sys_mutex_lock(struct sys_mutex ** mutex)648 sys_mutex_lock(struct sys_mutex **mutex)
649 {
650 pthread_mutex_lock(&((*mutex)->mutex));
651 }
652
653 /** Unlock a mutex
654 * @param mutex the mutex to unlock */
655 void
sys_mutex_unlock(struct sys_mutex ** mutex)656 sys_mutex_unlock(struct sys_mutex **mutex)
657 {
658 pthread_mutex_unlock(&((*mutex)->mutex));
659 }
660
661 /** Delete a mutex
662 * @param mutex the mutex to delete */
663 void
sys_mutex_free(struct sys_mutex ** mutex)664 sys_mutex_free(struct sys_mutex **mutex)
665 {
666 pthread_mutex_destroy(&((*mutex)->mutex));
667 free(*mutex);
668 }
669
670 #endif /* !NO_SYS */
671
672 #if LWIP_NETCONN_SEM_PER_THREAD
673 /*-----------------------------------------------------------------------------------*/
674 /* Semaphore per thread located TLS */
675
676 static void
sys_thread_sem_free(void * data)677 sys_thread_sem_free(void* data)
678 {
679 sys_sem_t *sem = (sys_sem_t*)(data);
680
681 if (sem) {
682 sys_sem_free(sem);
683 free(sem);
684 }
685 }
686
687 static sys_sem_t*
sys_thread_sem_alloc(void)688 sys_thread_sem_alloc(void)
689 {
690 sys_sem_t *sem;
691 err_t err;
692 int ret;
693
694 sem = (sys_sem_t*)malloc(sizeof(sys_sem_t*));
695 LWIP_ASSERT("failed to allocate memory for TLS semaphore", sem != NULL);
696 err = sys_sem_new(sem, 0);
697 LWIP_ASSERT("failed to initialise TLS semaphore", err == ERR_OK);
698 ret = pthread_setspecific(sys_thread_sem_key, sem);
699 LWIP_ASSERT("failed to initialise TLS semaphore storage", ret == 0);
700 return sem;
701 }
702
703 sys_sem_t*
sys_arch_netconn_sem_get(void)704 sys_arch_netconn_sem_get(void)
705 {
706 sys_sem_t* sem = (sys_sem_t*)pthread_getspecific(sys_thread_sem_key);
707 if (!sem) {
708 sem = sys_thread_sem_alloc();
709 }
710 LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_sem_get s=%p\n", (void*)sem));
711 return sem;
712 }
713
714 void
sys_arch_netconn_sem_alloc(void)715 sys_arch_netconn_sem_alloc(void)
716 {
717 sys_sem_t* sem = sys_thread_sem_alloc();
718 LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_sem created s=%p\n", (void*)sem));
719 }
720
721 void
sys_arch_netconn_sem_free(void)722 sys_arch_netconn_sem_free(void)
723 {
724 int ret;
725
726 sys_sem_t *sem = (sys_sem_t *)pthread_getspecific(sys_thread_sem_key);
727 sys_thread_sem_free(sem);
728 ret = pthread_setspecific(sys_thread_sem_key, NULL);
729 LWIP_ASSERT("failed to de-init TLS semaphore storage", ret == 0);
730 }
731 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
732
733 /*-----------------------------------------------------------------------------------*/
734 /* Time */
735 u32_t
sys_now(void)736 sys_now(void)
737 {
738 struct timespec ts;
739 u32_t now;
740
741 get_monotonic_time(&ts);
742 now = (u32_t)(ts.tv_sec * 1000L + ts.tv_nsec / 1000000L);
743 #ifdef LWIP_FUZZ_SYS_NOW
744 now += sys_now_offset;
745 #endif
746 return now;
747 }
748
749 u32_t
sys_jiffies(void)750 sys_jiffies(void)
751 {
752 struct timespec ts;
753
754 get_monotonic_time(&ts);
755 return (u32_t)(ts.tv_sec * 1000000000L + ts.tv_nsec);
756 }
757
758 /*-----------------------------------------------------------------------------------*/
759 /* Init */
760
761 void
sys_init(void)762 sys_init(void)
763 {
764 #if LWIP_NETCONN_SEM_PER_THREAD
765 pthread_key_create(&sys_thread_sem_key, sys_thread_sem_free);
766 #endif
767 }
768
769 /*-----------------------------------------------------------------------------------*/
770 /* Critical section */
771 #if SYS_LIGHTWEIGHT_PROT
772 /** sys_prot_t sys_arch_protect(void)
773
774 This optional function does a "fast" critical region protection and returns
775 the previous protection level. This function is only called during very short
776 critical regions. An embedded system which supports ISR-based drivers might
777 want to implement this function by disabling interrupts. Task-based systems
778 might want to implement this by using a mutex or disabling tasking. This
779 function should support recursive calls from the same task or interrupt. In
780 other words, sys_arch_protect() could be called while already protected. In
781 that case the return value indicates that it is already protected.
782
783 sys_arch_protect() is only required if your port is supporting an operating
784 system.
785 */
786 sys_prot_t
sys_arch_protect(void)787 sys_arch_protect(void)
788 {
789 /* Note that for the UNIX port, we are using a lightweight mutex, and our
790 * own counter (which is locked by the mutex). The return code is not actually
791 * used. */
792 if (lwprot_thread != pthread_self())
793 {
794 /* We are locking the mutex where it has not been locked before *
795 * or is being locked by another thread */
796 pthread_mutex_lock(&lwprot_mutex);
797 lwprot_thread = pthread_self();
798 lwprot_count = 1;
799 }
800 else
801 /* It is already locked by THIS thread */
802 lwprot_count++;
803 return 0;
804 }
805
806 /** void sys_arch_unprotect(sys_prot_t pval)
807
808 This optional function does a "fast" set of critical region protection to the
809 value specified by pval. See the documentation for sys_arch_protect() for
810 more information. This function is only required if your port is supporting
811 an operating system.
812 */
813 void
sys_arch_unprotect(sys_prot_t pval)814 sys_arch_unprotect(sys_prot_t pval)
815 {
816 LWIP_UNUSED_ARG(pval);
817 if (lwprot_thread == pthread_self())
818 {
819 lwprot_count--;
820 if (lwprot_count == 0)
821 {
822 lwprot_thread = (pthread_t) 0xDEAD;
823 pthread_mutex_unlock(&lwprot_mutex);
824 }
825 }
826 }
827 #endif /* SYS_LIGHTWEIGHT_PROT */
828
829 #if !NO_SYS
830 /* get keyboard state to terminate the debug app by using select */
831 int
lwip_unix_keypressed(void)832 lwip_unix_keypressed(void)
833 {
834 struct timeval tv = { 0L, 0L };
835 fd_set fds;
836 FD_ZERO(&fds);
837 FD_SET(0, &fds);
838 return select(1, &fds, NULL, NULL, &tv);
839 }
840 #endif /* !NO_SYS */
841