• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2018, Google, Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define LOCAL_TRACE 0
25 
26 #include <assert.h>
27 #include <debug.h>
28 #include <err.h>
29 #include <list.h>  // for containerof
30 #include <stdlib.h>
31 #include <string.h>
32 #include <sys/types.h>
33 #include <trace.h>
34 
35 #include <kernel/event.h>
36 #include <kernel/wait.h>
37 
38 #if WITH_TRUSTY_IPC
39 
40 #include <lib/syscall.h>
41 #include <lib/trusty/handle.h>
42 
handle_init_etc(struct handle * handle,struct handle_ops * ops,uint32_t flags)43 void handle_init_etc(struct handle* handle,
44                      struct handle_ops* ops,
45                      uint32_t flags) {
46     DEBUG_ASSERT(handle);
47     DEBUG_ASSERT(ops);
48     DEBUG_ASSERT(ops->destroy);
49 
50     refcount_init(&handle->refcnt);
51     handle->flags = flags;
52     handle->ops = ops;
53     handle->wait_event = NULL;
54     spin_lock_init(&handle->slock);
55     handle->cookie = NULL;
56     list_clear_node(&handle->hlist_node);
57     list_initialize(&handle->waiter_list);
58 }
59 
__handle_destroy_ref(struct refcount * ref)60 static void __handle_destroy_ref(struct refcount* ref) {
61     DEBUG_ASSERT(ref);
62 
63     struct handle* handle = containerof(ref, struct handle, refcnt);
64     handle->ops->destroy(handle);
65 }
66 
handle_incref(struct handle * handle)67 void handle_incref(struct handle* handle) {
68     DEBUG_ASSERT(handle);
69     refcount_inc(&handle->refcnt);
70 }
71 
handle_decref(struct handle * handle)72 void handle_decref(struct handle* handle) {
73     DEBUG_ASSERT(handle);
74     refcount_dec(&handle->refcnt, __handle_destroy_ref);
75 }
76 
handle_close(struct handle * handle)77 void handle_close(struct handle* handle) {
78     DEBUG_ASSERT(handle);
79     if (handle->ops->shutdown)
80         handle->ops->shutdown(handle);
81     handle_decref(handle);
82 }
83 
__do_wait(event_t * ev,lk_time_t timeout)84 static int __do_wait(event_t* ev, lk_time_t timeout) {
85     int ret;
86 
87     LTRACEF("waiting\n");
88     ret = event_wait_timeout(ev, timeout);
89     LTRACEF("waited\n");
90     return ret;
91 }
92 
_prepare_wait_handle(event_t * ev,struct handle * handle)93 static int _prepare_wait_handle(event_t* ev, struct handle* handle) {
94     int ret = 0;
95     spin_lock_saved_state_t state;
96 
97     spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
98     if (unlikely(handle->wait_event)) {
99         LTRACEF("someone is already waiting on handle %p?!\n", handle);
100         ret = ERR_ALREADY_STARTED;
101     } else {
102         handle->wait_event = ev;
103     }
104     spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
105     return ret;
106 }
107 
_finish_wait_handle(struct handle * handle)108 static void _finish_wait_handle(struct handle* handle) {
109     spin_lock_saved_state_t state;
110 
111     /* clear out our event ptr */
112     spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
113     handle->wait_event = NULL;
114     spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
115 }
116 
handle_add_waiter(struct handle * h,struct handle_waiter * w)117 void handle_add_waiter(struct handle* h, struct handle_waiter* w) {
118     spin_lock_saved_state_t state;
119 
120     spin_lock_save(&h->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
121     list_add_tail(&h->waiter_list, &w->node);
122     spin_unlock_restore(&h->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
123 }
124 
handle_del_waiter(struct handle * h,struct handle_waiter * w)125 void handle_del_waiter(struct handle* h, struct handle_waiter* w) {
126     spin_lock_saved_state_t state;
127 
128     spin_lock_save(&h->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
129     list_delete(&w->node);
130     spin_unlock_restore(&h->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
131 }
132 
handle_wait(struct handle * handle,uint32_t * handle_event,lk_time_t timeout)133 int handle_wait(struct handle* handle,
134                 uint32_t* handle_event,
135                 lk_time_t timeout) {
136     uint32_t event;
137     int ret = 0;
138     struct handle_event_waiter ew = HANDLE_EVENT_WAITER_INITIAL_VALUE(ew);
139 
140     if (!handle || !handle_event)
141         return ERR_INVALID_ARGS;
142 
143     if (!handle->ops->poll)
144         return ERR_NOT_SUPPORTED;
145 
146     handle_add_waiter(handle, &ew.waiter);
147 
148     while (true) {
149         event = handle->ops->poll(handle, ~0U, true);
150         if (event)
151             break;
152         ret = __do_wait(&ew.event, timeout);
153         if (ret < 0)
154             goto finish_wait;
155     }
156 
157     *handle_event = event;
158     ret = NO_ERROR;
159 
160 finish_wait:
161     handle_del_waiter(handle, &ew.waiter);
162     event_destroy(&ew.event);
163     return ret;
164 }
165 
handle_notify_waiters_locked(struct handle * handle)166 void handle_notify_waiters_locked(struct handle* handle) {
167     struct handle_waiter* w;
168 
169     list_for_every_entry(&handle->waiter_list, w, struct handle_waiter, node) {
170         w->notify_proc(w);
171     }
172     if (handle->wait_event) {
173         LTRACEF("notifying handle %p wait_event %p\n", handle,
174                 handle->wait_event);
175         event_signal(handle->wait_event, false);
176     }
177 }
178 
handle_notify(struct handle * handle)179 void handle_notify(struct handle* handle) {
180     DEBUG_ASSERT(handle);
181 
182     spin_lock_saved_state_t state;
183     spin_lock_save(&handle->slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
184     handle_notify_waiters_locked(handle);
185     spin_unlock_restore(&handle->slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
186 }
187 
handle_list_init(struct handle_list * hlist)188 void handle_list_init(struct handle_list* hlist) {
189     DEBUG_ASSERT(hlist);
190 
191     *hlist = (struct handle_list)HANDLE_LIST_INITIAL_VALUE(*hlist);
192 }
193 
handle_list_add(struct handle_list * hlist,struct handle * handle)194 void handle_list_add(struct handle_list* hlist, struct handle* handle) {
195     DEBUG_ASSERT(hlist);
196     DEBUG_ASSERT(handle);
197     DEBUG_ASSERT(!list_in_list(&handle->hlist_node));
198 
199     handle_incref(handle);
200     mutex_acquire(&hlist->lock);
201     list_add_tail(&hlist->handles, &handle->hlist_node);
202     if (hlist->wait_event) {
203         /* somebody is waiting on list */
204         _prepare_wait_handle(hlist->wait_event, handle);
205 
206         /* call poll to check if it is already signaled */
207         uint32_t event = handle->ops->poll(handle, ~0U, false);
208         if (event) {
209             handle_notify(handle);
210         }
211     }
212     mutex_release(&hlist->lock);
213 }
214 
_handle_list_del_locked(struct handle_list * hlist,struct handle * handle)215 static void _handle_list_del_locked(struct handle_list* hlist,
216                                     struct handle* handle) {
217     DEBUG_ASSERT(hlist);
218     DEBUG_ASSERT(handle);
219     DEBUG_ASSERT(list_in_list(&handle->hlist_node));
220 
221     /* remove item from list */
222     list_delete(&handle->hlist_node);
223 
224     /* check if somebody is waiting on this handle list */
225     if (hlist->wait_event) {
226         /* finish waiting */
227         _finish_wait_handle(handle);
228         if (list_is_empty(&hlist->handles)) {
229             /* wakeup waiter if list is now empty */
230             event_signal(hlist->wait_event, true);
231         }
232     }
233     handle_decref(handle);
234 }
235 
handle_list_del(struct handle_list * hlist,struct handle * handle)236 void handle_list_del(struct handle_list* hlist, struct handle* handle) {
237     DEBUG_ASSERT(hlist);
238     DEBUG_ASSERT(handle);
239 
240     mutex_acquire(&hlist->lock);
241     _handle_list_del_locked(hlist, handle);
242     mutex_release(&hlist->lock);
243 }
244 
handle_list_delete_all(struct handle_list * hlist)245 void handle_list_delete_all(struct handle_list* hlist) {
246     DEBUG_ASSERT(hlist);
247 
248     mutex_acquire(&hlist->lock);
249     while (!list_is_empty(&hlist->handles)) {
250         struct handle* handle;
251 
252         handle =
253                 list_peek_head_type(&hlist->handles, struct handle, hlist_node);
254         _handle_list_del_locked(hlist, handle);
255     }
256     mutex_release(&hlist->lock);
257 }
258 
259 /*
260  *  Iterate handle list and call finish_wait for each item until the last one
261  *  (inclusive) specified by corresponding function parameter. If the last item
262  *  is not specified, iterate the whole list.
263  */
_hlist_finish_wait_locked(struct handle_list * hlist,struct handle * last)264 static void _hlist_finish_wait_locked(struct handle_list* hlist,
265                                       struct handle* last) {
266     struct handle* handle;
267     list_for_every_entry(&hlist->handles, handle, struct handle, hlist_node) {
268         _finish_wait_handle(handle);
269         if (handle == last)
270             break;
271     }
272 }
273 
274 /*
275  *  Iterate handle list and call prepare wait (if required) and poll for each
276  *  handle until the ready one is found and return it to caller.
277  *  Undo prepare op if ready handle is found or en error occured.
278  */
_hlist_do_poll_locked(struct handle_list * hlist,struct handle ** handle_ptr,uint32_t * event_ptr,bool prepare)279 static int _hlist_do_poll_locked(struct handle_list* hlist,
280                                  struct handle** handle_ptr,
281                                  uint32_t* event_ptr,
282                                  bool prepare) {
283     int ret = 0;
284 
285     DEBUG_ASSERT(hlist->wait_event);
286 
287     if (list_is_empty(&hlist->handles))
288         return ERR_NOT_FOUND; /* no handles in the list */
289 
290     struct handle* next;
291     struct handle* last_prep = NULL;
292     list_for_every_entry(&hlist->handles, next, struct handle, hlist_node) {
293         if (prepare) {
294             ret = _prepare_wait_handle(hlist->wait_event, next);
295             if (ret)
296                 break;
297             last_prep = next;
298         }
299 
300         uint32_t event = next->ops->poll(next, ~0U, true);
301         if (event) {
302             *event_ptr = event;
303             *handle_ptr = next;
304             ret = 1;
305             break;
306         }
307     }
308 
309     if (ret && prepare && last_prep) {
310         /* need to undo prepare */
311         _hlist_finish_wait_locked(hlist, last_prep);
312     }
313     return ret;
314 }
315 
316 /* fills in the handle that has a pending event. The reference taken by the list
317  * is not dropped until the caller has had a chance to process the handle.
318  */
handle_list_wait(struct handle_list * hlist,struct handle ** handle_ptr,uint32_t * event_ptr,lk_time_t timeout)319 int handle_list_wait(struct handle_list* hlist,
320                      struct handle** handle_ptr,
321                      uint32_t* event_ptr,
322                      lk_time_t timeout) {
323     int ret;
324     event_t ev;
325 
326     DEBUG_ASSERT(hlist);
327     DEBUG_ASSERT(handle_ptr);
328     DEBUG_ASSERT(event_ptr);
329 
330     event_init(&ev, false, EVENT_FLAG_AUTOUNSIGNAL);
331 
332     *event_ptr = 0;
333     *handle_ptr = 0;
334 
335     mutex_acquire(&hlist->lock);
336 
337     DEBUG_ASSERT(hlist->wait_event == NULL);
338 
339     hlist->wait_event = &ev;
340     ret = _hlist_do_poll_locked(hlist, handle_ptr, event_ptr, true);
341     if (ret < 0)
342         goto err_do_poll;
343 
344     if (ret == 0) {
345         /* no handles ready */
346         do {
347             mutex_release(&hlist->lock);
348             ret = __do_wait(&ev, timeout);
349             mutex_acquire(&hlist->lock);
350 
351             if (ret < 0)
352                 break;
353 
354             /* poll again */
355             ret = _hlist_do_poll_locked(hlist, handle_ptr, event_ptr, false);
356         } while (!ret);
357 
358         _hlist_finish_wait_locked(hlist, NULL);
359     }
360 
361     if (ret == 1) {
362         struct handle* handle = *handle_ptr;
363 
364         handle_incref(handle);
365 
366         /* move list head after item we just found */
367         list_delete(&hlist->handles);
368         list_add_head(&handle->hlist_node, &hlist->handles);
369 
370         ret = NO_ERROR;
371     }
372 
373 err_do_poll:
374     hlist->wait_event = NULL;
375     mutex_release(&hlist->lock);
376     event_destroy(&ev);
377     return ret;
378 }
379 
handle_mmap(struct handle * handle,size_t offset,user_size_t size,uint32_t mmap_prot,user_addr_t * addr)380 status_t handle_mmap(struct handle* handle,
381                      size_t offset,
382                      user_size_t size,
383                      uint32_t mmap_prot,
384                      user_addr_t* addr) {
385     LTRACEF("mmap_prot 0x%x\n", mmap_prot);
386     if (handle->ops->mmap) {
387         return handle->ops->mmap(handle, offset, size, mmap_prot, addr);
388     } else {
389         return ERR_INVALID_ARGS;
390     }
391 }
392 
393 #endif /* WITH_TRUSTY_IPC */
394