1 /*
2 * Copyright (c) 2018, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <bits.h>
26 #include <debug.h>
27 #include <err.h>
28 #include <list.h>
29 #include <stdbool.h>
30 #include <stdint.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <sys/types.h>
34 #include <trace.h>
35
36 #include <kernel/event.h>
37 #include <kernel/mutex.h>
38 #include <kernel/wait.h>
39
40 #include <lib/syscall.h>
41 #include <lib/trusty/handle.h>
42 #include <lib/trusty/handle_set.h>
43 #include <lib/trusty/uctx.h>
44
45 #define LOCAL_TRACE 0
46
47 #ifdef SPIN_LOCK_FLAG_IRQ_FIQ
48 #define SLOCK_FLAGS SPIN_LOCK_FLAG_IRQ_FIQ
49 #else
50 #define SLOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
51 #endif
52
53 struct handle_set {
54 struct mutex mlock;
55 struct handle handle;
56 struct list_node ref_list;
57 struct list_node ready_list;
58 };
59
60 static uint32_t hset_poll(struct handle* handle, uint32_t emask, bool finalize);
61 static void hset_destroy(struct handle* handle);
62
63 static struct handle_ops hset_ops = {
64 .poll = hset_poll,
65 .destroy = hset_destroy,
66 };
67
68 static struct mutex g_hset_lock = MUTEX_INITIAL_VALUE(g_hset_lock);
69
is_handle_set(struct handle * h)70 static inline bool is_handle_set(struct handle* h) {
71 ASSERT(h);
72 return h->ops == &hset_ops;
73 }
74
handle_to_handle_set(struct handle * h)75 static inline struct handle_set* handle_to_handle_set(struct handle* h) {
76 ASSERT(is_handle_set(h));
77 return containerof(h, struct handle_set, handle);
78 }
79
hset_poll(struct handle * h,uint32_t emask,bool finalize)80 static uint32_t hset_poll(struct handle* h, uint32_t emask, bool finalize) {
81 uint32_t event = 0;
82 struct handle_set* hset = handle_to_handle_set(h);
83
84 if (!list_is_empty(&hset->ready_list))
85 event = IPC_HANDLE_POLL_READY;
86
87 return event & emask;
88 }
89
hset_detach_ref_locked(struct handle_set * hset,struct handle_ref * ref)90 static void hset_detach_ref_locked(struct handle_set* hset,
91 struct handle_ref* ref) {
92 spin_lock_saved_state_t state;
93
94 DEBUG_ASSERT(ref->parent == &hset->handle);
95
96 /* remove from waiter list */
97 handle_del_waiter(ref->handle, &ref->waiter);
98
99 /* remove from ready_list */
100 spin_lock_save(&hset->handle.slock, &state, SLOCK_FLAGS);
101 if (list_in_list(&ref->ready_node))
102 list_delete(&ref->ready_node);
103 spin_unlock_restore(&hset->handle.slock, state, SLOCK_FLAGS);
104
105 /* remove from handle set list */
106 list_delete(&ref->set_node);
107 ref->parent = NULL;
108 handle_decref(&hset->handle);
109 }
110
hset_destroy(struct handle * h)111 static void hset_destroy(struct handle* h) {
112 struct handle_set* hset = handle_to_handle_set(h);
113
114 LTRACEF("%p\n", h);
115
116 free(hset);
117 }
118
hset_init(struct handle_set * hset)119 static void hset_init(struct handle_set* hset) {
120 mutex_init(&hset->mlock);
121 list_initialize(&hset->ref_list);
122 list_initialize(&hset->ready_list);
123 handle_init_etc(&hset->handle, &hset_ops, HANDLE_FLAG_NO_SEND);
124 }
125
handle_set_create(void)126 struct handle* handle_set_create(void) {
127 struct handle_set* hset;
128
129 hset = malloc(sizeof(*hset));
130 if (!hset)
131 return NULL;
132
133 hset_init(hset);
134
135 LTRACEF("%p\n", &hset->handle);
136
137 return &hset->handle;
138 }
139
hset_waiter_notify(struct handle_waiter * w)140 static void hset_waiter_notify(struct handle_waiter* w) {
141 ASSERT(w);
142 struct handle_ref* ref = containerof(w, struct handle_ref, waiter);
143
144 ASSERT(ref->parent);
145 spin_lock(&ref->parent->slock);
146 if (!list_in_list(&ref->ready_node)) {
147 struct handle_set* hset = handle_to_handle_set(ref->parent);
148 list_add_tail(&hset->ready_list, &ref->ready_node);
149 }
150 handle_notify_waiters_locked(ref->parent);
151 spin_unlock(&ref->parent->slock);
152 }
153
hset_attach_ref(struct handle_set * hset,struct handle_ref * ref)154 static int hset_attach_ref(struct handle_set* hset, struct handle_ref* ref) {
155 DEBUG_ASSERT(ref->parent == NULL);
156 DEBUG_ASSERT(!list_in_list(&ref->set_node));
157 DEBUG_ASSERT(!list_in_list(&ref->ready_node));
158 DEBUG_ASSERT(!list_in_list(&ref->waiter.node));
159
160 LTRACEF("%p: %p\n", &hset->handle, ref->handle);
161
162 mutex_acquire(&hset->mlock);
163 handle_incref(&hset->handle);
164 ref->parent = &hset->handle;
165 ref->waiter.notify_proc = hset_waiter_notify;
166 list_add_tail(&hset->ref_list, &ref->set_node);
167 handle_add_waiter(ref->handle, &ref->waiter);
168 mutex_release(&hset->mlock);
169
170 if (ref->handle->ops->poll(ref->handle, ~0U, false)) {
171 /*
172 * TODO: this could be optimized a bit:
173 * instead of waking up all clients of this handle
174 * we can only wakeup a path that we are attaching to.
175 */
176 handle_notify(ref->handle);
177 }
178
179 return NO_ERROR;
180 }
181
hset_find_target(struct handle_set * hset,struct handle_set * target)182 static bool hset_find_target(struct handle_set* hset,
183 struct handle_set* target) {
184 struct handle_set* child_hset;
185 struct handle_ref* ref;
186
187 if (hset == target)
188 return true;
189
190 mutex_acquire(&hset->mlock);
191 list_for_every_entry(&hset->ref_list, ref, struct handle_ref, set_node) {
192 if (!ref->handle)
193 continue;
194
195 if (!is_handle_set(ref->handle))
196 continue;
197
198 child_hset = handle_to_handle_set(ref->handle);
199 if (hset_find_target(child_hset, target))
200 goto found;
201 }
202 mutex_release(&hset->mlock);
203 return false;
204
205 found:
206 mutex_release(&hset->mlock);
207 return true;
208 }
209
hset_attach_hset(struct handle_set * hset,struct handle_ref * ref)210 static int hset_attach_hset(struct handle_set* hset, struct handle_ref* ref) {
211 ASSERT(ref);
212 struct handle_set* new_hset = handle_to_handle_set(ref->handle);
213
214 /* check if it would create a circular references */
215 if (hset_find_target(new_hset, hset)) {
216 LTRACEF("Would create circular refs\n");
217 return ERR_INVALID_ARGS;
218 }
219
220 return hset_attach_ref(hset, ref);
221 }
222
handle_set_attach(struct handle * h,struct handle_ref * ref)223 int handle_set_attach(struct handle* h, struct handle_ref* ref) {
224 int ret;
225 struct handle_set* hset;
226
227 ASSERT(ref);
228 ASSERT(ref->handle);
229
230 hset = handle_to_handle_set(h);
231 if (is_handle_set(ref->handle)) {
232 mutex_acquire(&g_hset_lock);
233 ret = hset_attach_hset(hset, ref);
234 mutex_release(&g_hset_lock);
235 } else {
236 ret = hset_attach_ref(hset, ref);
237 }
238
239 return ret;
240 }
241
handle_set_detach_ref(struct handle_ref * ref)242 void handle_set_detach_ref(struct handle_ref* ref) {
243 ASSERT(ref);
244
245 if (ref->parent) {
246 struct handle_set* hset = handle_to_handle_set(ref->parent);
247 handle_incref(&hset->handle);
248 mutex_acquire(&hset->mlock);
249 hset_detach_ref_locked(hset, ref);
250 mutex_release(&hset->mlock);
251 handle_decref(&hset->handle);
252 }
253 }
254
handle_set_update_ref(struct handle_ref * ref,uint32_t emask,void * cookie)255 void handle_set_update_ref(struct handle_ref* ref,
256 uint32_t emask,
257 void* cookie) {
258 ASSERT(ref);
259
260 if (ref->parent) {
261 struct handle_set* hset = handle_to_handle_set(ref->parent);
262 mutex_acquire(&hset->mlock);
263 ref->emask = emask;
264 ref->cookie = cookie;
265 mutex_release(&hset->mlock);
266 handle_notify(ref->handle);
267 }
268 }
269
_hset_do_poll(struct handle_set * hset,struct handle_ref * out)270 static int _hset_do_poll(struct handle_set* hset, struct handle_ref* out) {
271 int ret = 0;
272 uint32_t event;
273 struct handle_ref* ref;
274 spin_lock_saved_state_t state;
275
276 mutex_acquire(&hset->mlock);
277
278 if (list_is_empty(&hset->ref_list)) {
279 ret = ERR_NOT_FOUND;
280 goto err_empty;
281 }
282
283 for (;;) {
284 spin_lock_save(&hset->handle.slock, &state, SLOCK_FLAGS);
285 ref = list_remove_head_type(&hset->ready_list, struct handle_ref,
286 ready_node);
287 spin_unlock_restore(&hset->handle.slock, state, SLOCK_FLAGS);
288
289 if (!ref)
290 break;
291
292 event = ref->handle->ops->poll(ref->handle, ref->emask, true);
293 if (event) {
294 handle_incref(ref->handle);
295 out->handle = ref->handle;
296 out->id = ref->id;
297 out->cookie = ref->cookie;
298 out->emask = event;
299
300 /* move it to the end of the queue */
301 spin_lock_save(&hset->handle.slock, &state, SLOCK_FLAGS);
302 if (!list_in_list(&ref->ready_node))
303 list_add_tail(&hset->ready_list, &ref->ready_node);
304 spin_unlock_restore(&hset->handle.slock, state, SLOCK_FLAGS);
305 ret = 1;
306 break;
307 }
308 }
309
310 err_empty:
311 mutex_release(&hset->mlock);
312
313 return ret;
314 }
315
hset_wait(struct handle_set * hset,struct handle_ref * out,lk_time_t timeout)316 static int hset_wait(struct handle_set* hset,
317 struct handle_ref* out,
318 lk_time_t timeout) {
319 int ret;
320 struct handle_event_waiter ew = HANDLE_EVENT_WAITER_INITIAL_VALUE(ew);
321
322 DEBUG_ASSERT(hset);
323 DEBUG_ASSERT(out);
324
325 handle_add_waiter(&hset->handle, &ew.waiter);
326
327 do {
328 /* poll */
329 ret = _hset_do_poll(hset, out);
330 if (!ret) {
331 /*
332 * wait for event if ret is zero,
333 * otherwise it is an error or valid event
334 */
335 ret = event_wait_timeout(&ew.event, timeout);
336 }
337 } while (!ret);
338
339 if (ret > 0)
340 ret = 0;
341
342 handle_del_waiter(&hset->handle, &ew.waiter);
343 event_destroy(&ew.event);
344 return ret;
345 }
346
handle_set_wait(struct handle * h,struct handle_ref * out,lk_time_t timeout)347 int handle_set_wait(struct handle* h,
348 struct handle_ref* out,
349 lk_time_t timeout) {
350 struct handle_set* hset = handle_to_handle_set(h);
351 return hset_wait(hset, out, timeout);
352 }
353
handle_set_ready(struct handle * h)354 bool handle_set_ready(struct handle* h) {
355 bool ret;
356 spin_lock_saved_state_t state;
357
358 struct handle_set* hset = handle_to_handle_set(h);
359
360 spin_lock_save(&hset->handle.slock, &state, SLOCK_FLAGS);
361 ret = !list_is_empty(&hset->ready_list);
362 spin_unlock_restore(&hset->handle.slock, state, SLOCK_FLAGS);
363
364 return ret;
365 }
366
handle_ref_wait(const struct handle_ref * in,struct handle_ref * out,lk_time_t timeout)367 int handle_ref_wait(const struct handle_ref* in,
368 struct handle_ref* out,
369 lk_time_t timeout) {
370 int ret = 0;
371
372 if (!in || !in->handle || !out)
373 return ERR_INVALID_ARGS;
374
375 if (is_handle_set(in->handle)) {
376 ret = handle_set_wait(in->handle, out, timeout);
377 } else {
378 uint32_t event;
379 ret = handle_wait(in->handle, &event, timeout);
380 if (ret == NO_ERROR) {
381 handle_incref(in->handle);
382 out->handle = in->handle;
383 out->cookie = in->cookie;
384 out->id = in->id;
385 out->emask = event;
386 }
387 }
388 return ret;
389 }
390