1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "pthread_impl.h"
17
18 #define IS32BIT(x) !((x)+0x80000000ULL>>32)
19 #define CLAMP(x) (int)(IS32BIT(x) ? (x) : 0x7fffffffU+((0ULL+(x))>>63))
20
__futex4(volatile void * addr,int op,int val,const struct timespec * to)21 static int __futex4(volatile void *addr, int op, int val, const struct timespec *to)
22 {
23 #ifdef SYS_futex_time64
24 time_t s = to ? to->tv_sec : 0;
25 long ns = to ? to->tv_nsec : 0;
26 int r = -ENOSYS;
27 if (SYS_futex == SYS_futex_time64 || !IS32BIT(s))
28 r = __syscall(SYS_futex_time64, addr, op, val,
29 to ? ((long long[]){s, ns}) : 0);
30 if (SYS_futex == SYS_futex_time64 || r!=-ENOSYS) return r;
31 to = to ? (void *)(long[]){CLAMP(s), ns} : 0;
32 #endif
33 return __syscall(SYS_futex, addr, op, val, to);
34 }
35
pthread_mutex_timedlock_pi(pthread_mutex_t * restrict m,const struct timespec * restrict at)36 static int pthread_mutex_timedlock_pi(pthread_mutex_t *restrict m, const struct timespec *restrict at)
37 {
38 int clock = m->_m_clock;
39 int type = m->_m_type;
40 int priv = (type & 128) ^ 128;
41 pthread_t self = __pthread_self();
42 int e;
43
44 if (!priv) self->robust_list.pending = &m->_m_next;
45
46 do e = -__futex4(&m->_m_lock, FUTEX_LOCK_PI|priv, 0, at);
47 while (e==EINTR);
48 if (e) self->robust_list.pending = 0;
49
50 switch (e) {
51 case 0:
52 /* Catch spurious success for non-robust mutexes. */
53 if (!(type&4) && ((m->_m_lock & 0x40000000) || m->_m_waiters)) {
54 a_store(&m->_m_waiters, -1);
55 __syscall(SYS_futex, &m->_m_lock, FUTEX_UNLOCK_PI|priv);
56 self->robust_list.pending = 0;
57 break;
58 }
59 /* Signal to trylock that we already have the lock. */
60 m->_m_count = -1;
61 return __pthread_mutex_trylock_owner(m);
62 case ETIMEDOUT:
63 return e;
64 case EDEADLK:
65 if ((type&3) == PTHREAD_MUTEX_ERRORCHECK) return e;
66 }
67 do e = __timedwait(&(int){0}, 0, clock, at, 1);
68 while (e != ETIMEDOUT);
69 return e;
70 }
71
__pthread_mutex_timedlock_inner(pthread_mutex_t * restrict m,const struct timespec * restrict at)72 int __pthread_mutex_timedlock_inner(pthread_mutex_t *restrict m, const struct timespec *restrict at)
73 {
74 int type = m->_m_type;
75 int r;
76 // PI
77 if (type&8) {
78 r = __pthread_mutex_trylock_owner(m);
79 if (r != EBUSY) return r;
80 return pthread_mutex_timedlock_pi(m, at);
81 }
82 r = __pthread_mutex_trylock(m);
83 if (r != EBUSY) return r;
84 int clock = (m->_m_clock == CLOCK_MONOTONIC) ? CLOCK_MONOTONIC : CLOCK_REALTIME;
85 int t, priv = (type & 128) ^ 128;
86 int spins = 100;
87 while (spins-- && m->_m_lock && !m->_m_waiters) a_spin();
88
89 while ((r=__pthread_mutex_trylock(m)) == EBUSY) {
90 r = m->_m_lock;
91 int own = r & 0x3fffffff;
92 if (!own && (!r || (type&4)))
93 continue;
94 if ((type&3) == PTHREAD_MUTEX_ERRORCHECK
95 && own == __pthread_self()->tid)
96 return EDEADLK;
97
98 a_inc(&m->_m_waiters);
99 t = r | 0x80000000;
100 a_cas(&m->_m_lock, r, t);
101 r = __timedwait(&m->_m_lock, t, clock, at, priv);
102 a_dec(&m->_m_waiters);
103 if (r && r != EINTR) break;
104 }
105 return r;
106 }
107
__pthread_mutex_timedlock(pthread_mutex_t * restrict m,const struct timespec * restrict at)108 int __pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec *restrict at)
109 {
110 if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL
111 && !a_cas(&m->_m_lock, 0, EBUSY))
112 return 0;
113 return __pthread_mutex_timedlock_inner(m, at);
114 }
115
116 weak_alias(__pthread_mutex_timedlock, pthread_mutex_timedlock);