1 /******************************************************************************
2 *
3 * Copyright (C) 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 #define LOG_TAG "bt_osi_thread"
20
21 #include <assert.h>
22 #include <errno.h>
23 #include <malloc.h>
24 #include <pthread.h>
25 #include <string.h>
26 #include <sys/prctl.h>
27 #include <sys/resource.h>
28 #include <sys/types.h>
29
30 #include "osi/include/allocator.h"
31 #include "osi/include/compat.h"
32 #include "osi/include/fixed_queue.h"
33 #include "osi/include/log.h"
34 #include "osi/include/reactor.h"
35 #include "osi/include/semaphore.h"
36 #include "osi/include/thread.h"
37
38 struct thread_t {
39 bool is_joined;
40 pthread_t pthread;
41 pid_t tid;
42 char name[THREAD_NAME_MAX + 1];
43 reactor_t *reactor;
44 fixed_queue_t *work_queue;
45 };
46
47 struct start_arg {
48 thread_t *thread;
49 semaphore_t *start_sem;
50 int error;
51 };
52
53 typedef struct {
54 thread_fn func;
55 void *context;
56 } work_item_t;
57
58 static void *run_thread(void *start_arg);
59 static void work_queue_read_cb(void *context);
60
61 static const size_t DEFAULT_WORK_QUEUE_CAPACITY = 128;
62
thread_new_sized(const char * name,size_t work_queue_capacity)63 thread_t *thread_new_sized(const char *name, size_t work_queue_capacity) {
64 assert(name != NULL);
65 assert(work_queue_capacity != 0);
66
67 thread_t *ret = osi_calloc(sizeof(thread_t));
68 if (!ret)
69 goto error;
70
71 ret->reactor = reactor_new();
72 if (!ret->reactor)
73 goto error;
74
75 ret->work_queue = fixed_queue_new(work_queue_capacity);
76 if (!ret->work_queue)
77 goto error;
78
79 // Start is on the stack, but we use a semaphore, so it's safe
80 struct start_arg start;
81 start.start_sem = semaphore_new(0);
82 if (!start.start_sem)
83 goto error;
84
85 strncpy(ret->name, name, THREAD_NAME_MAX);
86 start.thread = ret;
87 start.error = 0;
88 pthread_create(&ret->pthread, NULL, run_thread, &start);
89 semaphore_wait(start.start_sem);
90 semaphore_free(start.start_sem);
91
92 if (start.error)
93 goto error;
94
95 return ret;
96
97 error:;
98 if (ret) {
99 fixed_queue_free(ret->work_queue, osi_free);
100 reactor_free(ret->reactor);
101 }
102 osi_free(ret);
103 return NULL;
104 }
105
thread_new(const char * name)106 thread_t *thread_new(const char *name) {
107 return thread_new_sized(name, DEFAULT_WORK_QUEUE_CAPACITY);
108 }
109
thread_free(thread_t * thread)110 void thread_free(thread_t *thread) {
111 if (!thread)
112 return;
113
114 thread_stop(thread);
115 thread_join(thread);
116
117 fixed_queue_free(thread->work_queue, osi_free);
118 reactor_free(thread->reactor);
119 osi_free(thread);
120 }
121
thread_join(thread_t * thread)122 void thread_join(thread_t *thread) {
123 assert(thread != NULL);
124
125 // TODO(zachoverflow): use a compare and swap when ready
126 if (!thread->is_joined) {
127 thread->is_joined = true;
128 pthread_join(thread->pthread, NULL);
129 }
130 }
131
thread_post(thread_t * thread,thread_fn func,void * context)132 bool thread_post(thread_t *thread, thread_fn func, void *context) {
133 assert(thread != NULL);
134 assert(func != NULL);
135
136 // TODO(sharvil): if the current thread == |thread| and we've run out
137 // of queue space, we should abort this operation, otherwise we'll
138 // deadlock.
139
140 // Queue item is freed either when the queue itself is destroyed
141 // or when the item is removed from the queue for dispatch.
142 work_item_t *item = (work_item_t *)osi_malloc(sizeof(work_item_t));
143 if (!item) {
144 LOG_ERROR("%s unable to allocate memory: %s", __func__, strerror(errno));
145 return false;
146 }
147 item->func = func;
148 item->context = context;
149 fixed_queue_enqueue(thread->work_queue, item);
150 return true;
151 }
152
thread_stop(thread_t * thread)153 void thread_stop(thread_t *thread) {
154 assert(thread != NULL);
155 reactor_stop(thread->reactor);
156 }
157
thread_set_priority(thread_t * thread,int priority)158 bool thread_set_priority(thread_t *thread, int priority) {
159 if (!thread)
160 return false;
161
162 const int rc = setpriority(PRIO_PROCESS, thread->tid, priority);
163 if (rc < 0) {
164 LOG_ERROR("%s unable to set thread priority %d for tid %d, error %d",
165 __func__, priority, thread->tid, rc);
166 return false;
167 }
168
169 return true;
170 }
171
thread_is_self(const thread_t * thread)172 bool thread_is_self(const thread_t *thread) {
173 assert(thread != NULL);
174 return !!pthread_equal(pthread_self(), thread->pthread);
175 }
176
thread_get_reactor(const thread_t * thread)177 reactor_t *thread_get_reactor(const thread_t *thread) {
178 assert(thread != NULL);
179 return thread->reactor;
180 }
181
thread_name(const thread_t * thread)182 const char *thread_name(const thread_t *thread) {
183 assert(thread != NULL);
184 return thread->name;
185 }
186
run_thread(void * start_arg)187 static void *run_thread(void *start_arg) {
188 assert(start_arg != NULL);
189
190 struct start_arg *start = start_arg;
191 thread_t *thread = start->thread;
192
193 assert(thread != NULL);
194
195 if (prctl(PR_SET_NAME, (unsigned long)thread->name) == -1) {
196 LOG_ERROR("%s unable to set thread name: %s", __func__, strerror(errno));
197 start->error = errno;
198 semaphore_post(start->start_sem);
199 return NULL;
200 }
201 thread->tid = gettid();
202
203 semaphore_post(start->start_sem);
204
205 int fd = fixed_queue_get_dequeue_fd(thread->work_queue);
206 void *context = thread->work_queue;
207
208 reactor_object_t *work_queue_object = reactor_register(thread->reactor, fd, context, work_queue_read_cb, NULL);
209 reactor_start(thread->reactor);
210 reactor_unregister(work_queue_object);
211
212 // Make sure we dispatch all queued work items before exiting the thread.
213 // This allows a caller to safely tear down by enqueuing a teardown
214 // work item and then joining the thread.
215 size_t count = 0;
216 work_item_t *item = fixed_queue_try_dequeue(thread->work_queue);
217 while (item && count <= fixed_queue_capacity(thread->work_queue)) {
218 item->func(item->context);
219 osi_free(item);
220 item = fixed_queue_try_dequeue(thread->work_queue);
221 ++count;
222 }
223
224 if (count > fixed_queue_capacity(thread->work_queue))
225 LOG_DEBUG("%s growing event queue on shutdown.", __func__);
226
227 return NULL;
228 }
229
work_queue_read_cb(void * context)230 static void work_queue_read_cb(void *context) {
231 assert(context != NULL);
232
233 fixed_queue_t *queue = (fixed_queue_t *)context;
234 work_item_t *item = fixed_queue_dequeue(queue);
235 item->func(item->context);
236 osi_free(item);
237 }
238