1 /*
2 * Copyright (c) 2013-2018, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #define LOCAL_TRACE 0
25
26 #include <assert.h>
27 #include <debug.h>
28 #include <err.h>
29 #include <list.h> // for containerof
30 #include <stdlib.h>
31 #include <string.h>
32 #include <sys/types.h>
33 #include <trace.h>
34
35 #include <kernel/event.h>
36 #include <kernel/wait.h>
37
38 #if WITH_TRUSTY_IPC
39
40 #include <lib/syscall.h>
41 #include <lib/trusty/handle.h>
42
handle_init_etc(struct handle * handle,struct handle_ops * ops,uint32_t flags)43 void handle_init_etc(struct handle* handle,
44 struct handle_ops* ops,
45 uint32_t flags) {
46 DEBUG_ASSERT(handle);
47 DEBUG_ASSERT(ops);
48 DEBUG_ASSERT(ops->destroy);
49
50 refcount_init(&handle->refcnt);
51 handle->flags = flags;
52 handle->ops = ops;
53 handle->wait_event = NULL;
54 spin_lock_init(&handle->slock);
55 handle->cookie = NULL;
56 list_clear_node(&handle->hlist_node);
57 list_initialize(&handle->waiter_list);
58 }
59
__handle_destroy_ref(struct refcount * ref)60 static void __handle_destroy_ref(struct refcount* ref) {
61 DEBUG_ASSERT(ref);
62
63 struct handle* handle = containerof(ref, struct handle, refcnt);
64 handle->ops->destroy(handle);
65 }
66
handle_incref(struct handle * handle)67 void handle_incref(struct handle* handle) {
68 DEBUG_ASSERT(handle);
69 refcount_inc(&handle->refcnt);
70 }
71
handle_decref(struct handle * handle)72 void handle_decref(struct handle* handle) {
73 DEBUG_ASSERT(handle);
74 refcount_dec(&handle->refcnt, __handle_destroy_ref);
75 }
76
handle_close(struct handle * handle)77 void handle_close(struct handle* handle) {
78 DEBUG_ASSERT(handle);
79 if (handle->ops->shutdown)
80 handle->ops->shutdown(handle);
81 handle_decref(handle);
82 }
83
__do_wait(event_t * ev,lk_time_t timeout)84 static int __do_wait(event_t* ev, lk_time_t timeout) {
85 int ret;
86
87 LTRACEF("waiting\n");
88 ret = event_wait_timeout(ev, timeout);
89 LTRACEF("waited\n");
90 return ret;
91 }
92
_prepare_wait_handle(event_t * ev,struct handle * handle)93 static int _prepare_wait_handle(event_t* ev, struct handle* handle) {
94 int ret = 0;
95 spin_lock_saved_state_t state;
96
97 spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
98 if (unlikely(handle->wait_event)) {
99 LTRACEF("someone is already waiting on handle %p?!\n", handle);
100 ret = ERR_ALREADY_STARTED;
101 } else {
102 handle->wait_event = ev;
103 }
104 spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
105 return ret;
106 }
107
_finish_wait_handle(struct handle * handle)108 static void _finish_wait_handle(struct handle* handle) {
109 spin_lock_saved_state_t state;
110
111 /* clear out our event ptr */
112 spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
113 handle->wait_event = NULL;
114 spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
115 }
116
handle_add_waiter(struct handle * h,struct handle_waiter * w)117 void handle_add_waiter(struct handle* h, struct handle_waiter* w) {
118 spin_lock_saved_state_t state;
119
120 spin_lock_save(&h->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
121 list_add_tail(&h->waiter_list, &w->node);
122 spin_unlock_restore(&h->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
123 }
124
handle_del_waiter(struct handle * h,struct handle_waiter * w)125 void handle_del_waiter(struct handle* h, struct handle_waiter* w) {
126 spin_lock_saved_state_t state;
127
128 spin_lock_save(&h->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
129 list_delete(&w->node);
130 spin_unlock_restore(&h->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
131 }
132
handle_wait(struct handle * handle,uint32_t * handle_event,lk_time_t timeout)133 int handle_wait(struct handle* handle,
134 uint32_t* handle_event,
135 lk_time_t timeout) {
136 uint32_t event;
137 int ret = 0;
138 struct handle_event_waiter ew = HANDLE_EVENT_WAITER_INITIAL_VALUE(ew);
139
140 if (!handle || !handle_event)
141 return ERR_INVALID_ARGS;
142
143 if (!handle->ops->poll)
144 return ERR_NOT_SUPPORTED;
145
146 handle_add_waiter(handle, &ew.waiter);
147
148 while (true) {
149 event = handle->ops->poll(handle, ~0U, true);
150 if (event)
151 break;
152 ret = __do_wait(&ew.event, timeout);
153 if (ret < 0)
154 goto finish_wait;
155 }
156
157 *handle_event = event;
158 ret = NO_ERROR;
159
160 finish_wait:
161 handle_del_waiter(handle, &ew.waiter);
162 event_destroy(&ew.event);
163 return ret;
164 }
165
handle_notify_waiters_locked(struct handle * handle)166 void handle_notify_waiters_locked(struct handle* handle) {
167 struct handle_waiter* w;
168
169 list_for_every_entry(&handle->waiter_list, w, struct handle_waiter, node) {
170 w->notify_proc(w);
171 }
172 if (handle->wait_event) {
173 LTRACEF("notifying handle %p wait_event %p\n", handle,
174 handle->wait_event);
175 event_signal(handle->wait_event, false);
176 }
177 }
178
handle_notify(struct handle * handle)179 void handle_notify(struct handle* handle) {
180 DEBUG_ASSERT(handle);
181
182 spin_lock_saved_state_t state;
183 spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
184 handle_notify_waiters_locked(handle);
185 spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
186 }
187
handle_ref_is_attached(const struct handle_ref * const ref)188 bool handle_ref_is_attached(const struct handle_ref* const ref) {
189 return list_in_list(&ref->set_node);
190 }
191
handle_list_init(struct handle_list * hlist)192 void handle_list_init(struct handle_list* hlist) {
193 DEBUG_ASSERT(hlist);
194
195 *hlist = (struct handle_list)HANDLE_LIST_INITIAL_VALUE(*hlist);
196 }
197
handle_list_add(struct handle_list * hlist,struct handle * handle)198 void handle_list_add(struct handle_list* hlist, struct handle* handle) {
199 DEBUG_ASSERT(hlist);
200 DEBUG_ASSERT(handle);
201 DEBUG_ASSERT(!list_in_list(&handle->hlist_node));
202
203 handle_incref(handle);
204 mutex_acquire(&hlist->lock);
205 list_add_tail(&hlist->handles, &handle->hlist_node);
206 if (hlist->wait_event) {
207 /* somebody is waiting on list */
208 _prepare_wait_handle(hlist->wait_event, handle);
209
210 /* call poll to check if it is already signaled */
211 uint32_t event = handle->ops->poll(handle, ~0U, false);
212 if (event) {
213 handle_notify(handle);
214 }
215 }
216 mutex_release(&hlist->lock);
217 }
218
_handle_list_del_locked(struct handle_list * hlist,struct handle * handle)219 static void _handle_list_del_locked(struct handle_list* hlist,
220 struct handle* handle) {
221 DEBUG_ASSERT(hlist);
222 DEBUG_ASSERT(handle);
223 DEBUG_ASSERT(list_in_list(&handle->hlist_node));
224
225 /* remove item from list */
226 list_delete(&handle->hlist_node);
227
228 /* check if somebody is waiting on this handle list */
229 if (hlist->wait_event) {
230 /* finish waiting */
231 _finish_wait_handle(handle);
232 if (list_is_empty(&hlist->handles)) {
233 /* wakeup waiter if list is now empty */
234 event_signal(hlist->wait_event, true);
235 }
236 }
237 handle_decref(handle);
238 }
239
handle_list_del(struct handle_list * hlist,struct handle * handle)240 void handle_list_del(struct handle_list* hlist, struct handle* handle) {
241 DEBUG_ASSERT(hlist);
242 DEBUG_ASSERT(handle);
243
244 mutex_acquire(&hlist->lock);
245 _handle_list_del_locked(hlist, handle);
246 mutex_release(&hlist->lock);
247 }
248
handle_list_delete_all(struct handle_list * hlist)249 void handle_list_delete_all(struct handle_list* hlist) {
250 DEBUG_ASSERT(hlist);
251
252 mutex_acquire(&hlist->lock);
253 while (!list_is_empty(&hlist->handles)) {
254 struct handle* handle;
255
256 handle =
257 list_peek_head_type(&hlist->handles, struct handle, hlist_node);
258 _handle_list_del_locked(hlist, handle);
259 }
260 mutex_release(&hlist->lock);
261 }
262
263 /*
264 * Iterate handle list and call finish_wait for each item until the last one
265 * (inclusive) specified by corresponding function parameter. If the last item
266 * is not specified, iterate the whole list.
267 */
_hlist_finish_wait_locked(struct handle_list * hlist,struct handle * last)268 static void _hlist_finish_wait_locked(struct handle_list* hlist,
269 struct handle* last) {
270 struct handle* handle;
271 list_for_every_entry(&hlist->handles, handle, struct handle, hlist_node) {
272 _finish_wait_handle(handle);
273 if (handle == last)
274 break;
275 }
276 }
277
278 /*
279 * Iterate handle list and call prepare wait (if required) and poll for each
280 * handle until the ready one is found and return it to caller.
281 * Undo prepare op if ready handle is found or en error occured.
282 */
_hlist_do_poll_locked(struct handle_list * hlist,struct handle ** handle_ptr,uint32_t * event_ptr,bool prepare)283 static int _hlist_do_poll_locked(struct handle_list* hlist,
284 struct handle** handle_ptr,
285 uint32_t* event_ptr,
286 bool prepare) {
287 int ret = 0;
288
289 DEBUG_ASSERT(hlist->wait_event);
290
291 if (list_is_empty(&hlist->handles))
292 return ERR_NOT_FOUND; /* no handles in the list */
293
294 struct handle* next;
295 struct handle* last_prep = NULL;
296 list_for_every_entry(&hlist->handles, next, struct handle, hlist_node) {
297 if (prepare) {
298 ret = _prepare_wait_handle(hlist->wait_event, next);
299 if (ret)
300 break;
301 last_prep = next;
302 }
303
304 uint32_t event = next->ops->poll(next, ~0U, true);
305 if (event) {
306 *event_ptr = event;
307 *handle_ptr = next;
308 ret = 1;
309 break;
310 }
311 }
312
313 if (ret && prepare && last_prep) {
314 /* need to undo prepare */
315 _hlist_finish_wait_locked(hlist, last_prep);
316 }
317 return ret;
318 }
319
320 /* fills in the handle that has a pending event. The reference taken by the list
321 * is not dropped until the caller has had a chance to process the handle.
322 */
handle_list_wait(struct handle_list * hlist,struct handle ** handle_ptr,uint32_t * event_ptr,lk_time_t timeout)323 int handle_list_wait(struct handle_list* hlist,
324 struct handle** handle_ptr,
325 uint32_t* event_ptr,
326 lk_time_t timeout) {
327 int ret;
328 event_t ev;
329
330 DEBUG_ASSERT(hlist);
331 DEBUG_ASSERT(handle_ptr);
332 DEBUG_ASSERT(event_ptr);
333
334 event_init(&ev, false, EVENT_FLAG_AUTOUNSIGNAL);
335
336 *event_ptr = 0;
337 *handle_ptr = 0;
338
339 mutex_acquire(&hlist->lock);
340
341 DEBUG_ASSERT(hlist->wait_event == NULL);
342
343 hlist->wait_event = &ev;
344 ret = _hlist_do_poll_locked(hlist, handle_ptr, event_ptr, true);
345 if (ret < 0)
346 goto err_do_poll;
347
348 if (ret == 0) {
349 /* no handles ready */
350 do {
351 mutex_release(&hlist->lock);
352 ret = __do_wait(&ev, timeout);
353 mutex_acquire(&hlist->lock);
354
355 if (ret < 0)
356 break;
357
358 /* poll again */
359 ret = _hlist_do_poll_locked(hlist, handle_ptr, event_ptr, false);
360 } while (!ret);
361
362 _hlist_finish_wait_locked(hlist, NULL);
363 }
364
365 if (ret == 1) {
366 struct handle* handle = *handle_ptr;
367
368 handle_incref(handle);
369
370 /* move list head after item we just found */
371 list_delete(&hlist->handles);
372 list_add_head(&handle->hlist_node, &hlist->handles);
373
374 ret = NO_ERROR;
375 }
376
377 err_do_poll:
378 hlist->wait_event = NULL;
379 mutex_release(&hlist->lock);
380 event_destroy(&ev);
381 return ret;
382 }
383
handle_mmap(struct handle * handle,size_t offset,user_size_t size,uint32_t mmap_prot,user_addr_t * addr)384 status_t handle_mmap(struct handle* handle,
385 size_t offset,
386 user_size_t size,
387 uint32_t mmap_prot,
388 user_addr_t* addr) {
389 LTRACEF("mmap_prot 0x%x\n", mmap_prot);
390 if (handle->ops->mmap) {
391 return handle->ops->mmap(handle, offset, size, mmap_prot, addr);
392 } else {
393 return ERR_INVALID_ARGS;
394 }
395 }
396
397 #endif /* WITH_TRUSTY_IPC */
398