1 /*
2 * Copyright (c) 2019, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <err.h>
26 #include <kernel/mutex.h>
27 #include <kernel/usercopy.h>
28 #include <lib/binary_search_tree.h>
29 #include <lib/syscall.h>
30 #include <list.h>
31 #include <platform/interrupts.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <trace.h>
36
37 #define LOCAL_TRACE 0
38
39 #include <lib/trusty/event.h>
40 #include <lib/trusty/handle.h>
41 #include <lib/trusty/trusty_app.h>
42 #include <lib/trusty/uctx.h>
43 #include <lib/trusty/uio.h>
44
45 /**
46 * enum event_state - event states
47 * @EVENT_STATE_UNSIGNALED:
48 * state is an initial state of any event object. An event object leaves
49 * @EVENT_STATE_UNSIGNALED state when it becomes signaled. An event object
50 * might return to @EVENT_STATE_UNSIGNALED state under certain conditions
51 * (see below).
52 * @EVENT_STATE_SIGNALED:
53 * state is entered by event source object when event_source_signal() routine
54 * is invoked. The @EVENT_STATE_SIGNALED state is entered by event client
55 * object from @EVENT_STATE_UNSIGNALED state in when event_source_signal()
56 * routine if invoke on event source or from @EVENT_STATE_NOTIFIED_SIGNALED
57 * state when client acknowledges that previously delivered event has been
58 * handled. Waiting on event client object in @EVENT_STATE_SIGNALED state
59 * generates the %IPC_HANDLE_POLL_MSG event for its waiters.
60 * @EVENT_STATE_NOTIFIED:
61 * is entered by an event client object from @EVENT_STATE_SIGNALED state
62 * after %IPC_HANDLE_POLL_MSG event has been delivered to the client. In
63 * this state, the event client object stops generating %IPC_HANDLE_POLL_MSG
64 * event for its waiters. The client should handle received event and
65 * acknowledge that by invoking event_client_notify_handled() call on client
66 * event object. Upon receiving such acknowledgment the client event object
67 * is transitioning back to @EVENT_STATE_UNSIGNALED state.
68 * @EVENT_STATE_NOTIFIED_SIGNALED:
69 * is entered by an event client object from @EVENT_STATE_NOTIFIED state
70 * when another signal is received which is possible for events sources that
71 * support edge triggering semantic. Receiving acknowledgment for event
72 * client in this state transition event object into @EVENT_STATE_SIGNALED
73 * state which generates new %IPC_HANDLE_POLL_MSG event for its waiters.
74 * @EVENT_STATE_HANDLED:
75 * is entered by an event source object from @EVENT_STATE_SIGNALED state
76 * when all registered clients has finished handling an event and
77 * acknowledged that by invoking event_client_notify_handled() routine.
78 * Waiting on event source object when it is in @EVENT_STATE_HANDLED state
79 * generates %IPC_HANDLE_POLL_MSG event for its waiters and transition
80 * object back to @EVENT_STATE_UNSIGNALED state.
81 * @EVENT_STATE_CLOSED:
82 * is entered when the last reference to event source object handle goes
83 * away. This state is applicable for both event source and event client
84 * objects. In this state, the %IPC_HANDLE_POLL_HUP event is triggered to
85 * handle waiters.
86 */
87 enum event_state {
88 EVENT_STATE_UNSIGNALED = 0,
89 EVENT_STATE_SIGNALED,
90 EVENT_STATE_NOTIFIED,
91 EVENT_STATE_NOTIFIED_SIGNALED,
92 EVENT_STATE_HANDLED,
93 EVENT_STATE_CLOSED,
94 };
95
96 /**
97 * struct event_source - represents event source object
98 * @name: event name
99 * @ops: pointed to @struct event_source_ops
100 * @ops_arg: pointer passes as &priv parameters of all ops callbacks
101 * @uuids: pointer to array of &struct uuid items that are allowed to open
102 * this event source object
103 * @uuids_num: number of items in @uuids array
104 * @refobj: ref object
105 * @handle_ref: self reference from @handle
106 * @handle: embedded @struct handle
107 * @tree_node: tracking @struct bst node
108 * @client_list: list of attached clients
109 * @client_cnt: number of attached clients
110 * @slock: spinlock protecting internal state
111 * @ack_cnt: required ack count
112 * @state: event source state
113 *
114 * Note: the event object internal state and state transitions are protected by
115 * two locks: the global mutex (&es_lock) and a spin lock (@slock) private to
116 * event source object. The global mutex is held to protect operations related
117 * to global event object list (insert, remove and lookup) and ref object.
118 * In addition, it is held to synchronize invocation of &open and &close
119 * callbacks which is happening in context of creating and destroying event
120 * objects. All other state transitions are protected by the spin lock.
121 */
122 struct event_source {
123 const char* name;
124 const struct event_source_ops* ops;
125 const void* ops_arg;
126 const uuid_t* uuids;
127 unsigned int uuids_num;
128
129 struct obj refobj;
130
131 /* handle_ref is a self reference when there are
132 * outstanding handles out there. It is removed
133 * when last handle ref goes away.
134 */
135 struct obj_ref handle_ref;
136 struct handle handle;
137
138 struct bst_node tree_node;
139 struct list_node client_list;
140 unsigned int client_cnt;
141
142 spin_lock_t slock;
143
144 unsigned int ack_cnt;
145 volatile int state;
146 };
147
148 struct event_client {
149 struct handle handle;
150 struct list_node node;
151 struct event_source* es;
152 struct obj_ref es_ref;
153 volatile int state;
154 };
155
156 #define SLOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
157
158 static uint32_t event_source_poll(struct handle* handle,
159 uint32_t emask,
160 bool finalize);
161 static void event_source_destroy(struct handle* handle);
162
163 static uint32_t event_client_poll(struct handle* handle,
164 uint32_t emask,
165 bool finalize);
166 static void event_client_destroy(struct handle* handle);
167
168 static ssize_t event_client_user_readv(struct handle* h,
169 user_addr_t iov_uaddr,
170 uint32_t iov_cnt);
171 static ssize_t event_client_user_writev(struct handle* handle,
172 user_addr_t iov_uaddr,
173 uint32_t iov_cnt);
174
175 static mutex_t es_lock = MUTEX_INITIAL_VALUE(es_lock);
176 static struct bst_root es_tree_root = BST_ROOT_INITIAL_VALUE;
177
178 static struct handle_ops event_source_handle_ops = {
179 .poll = event_source_poll,
180 .destroy = event_source_destroy,
181 };
182
183 static struct handle_ops event_client_handle_ops = {
184 .poll = event_client_poll,
185 .destroy = event_client_destroy,
186 .user_readv = event_client_user_readv,
187 .user_writev = event_client_user_writev,
188 };
189
190 /******************************************************************************/
191
handle_to_event_source(struct handle * h)192 static struct event_source* handle_to_event_source(struct handle* h) {
193 ASSERT(h);
194 ASSERT(h->ops == &event_source_handle_ops);
195 return containerof(h, struct event_source, handle);
196 }
197
event_source_bst_compare(struct bst_node * a,struct bst_node * b)198 static int event_source_bst_compare(struct bst_node* a, struct bst_node* b) {
199 struct event_source* es_a = containerof(a, struct event_source, tree_node);
200 struct event_source* es_b = containerof(b, struct event_source, tree_node);
201 return strcmp(es_a->name, es_b->name);
202 }
203
event_source_poll(struct handle * h,uint32_t emask,bool finalize)204 static uint32_t event_source_poll(struct handle* h,
205 uint32_t emask,
206 bool finalize) {
207 int oldstate;
208 spin_lock_saved_state_t state;
209
210 struct event_source* es = handle_to_event_source(h);
211
212 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
213 oldstate = es->state;
214 if (finalize && (oldstate == EVENT_STATE_HANDLED)) {
215 es->state = EVENT_STATE_UNSIGNALED;
216 }
217 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
218
219 if (oldstate == EVENT_STATE_HANDLED) {
220 return IPC_HANDLE_POLL_MSG;
221 }
222
223 return 0;
224 }
225
event_source_obj_destroy(struct obj * obj)226 static void event_source_obj_destroy(struct obj* obj) {
227 struct event_source* es = containerof(obj, struct event_source, refobj);
228 free(es);
229 }
230
event_source_destroy(struct handle * h)231 static void event_source_destroy(struct handle* h) {
232 struct event_client* ec;
233 struct event_source* es;
234 spin_lock_saved_state_t state;
235
236 /* called when the last reference to handle goes away */
237
238 es = handle_to_event_source(h);
239
240 mutex_acquire(&es_lock);
241
242 /* if event source in global list : remove it */
243 if (es->tree_node.rank) {
244 bst_delete(&es_tree_root, &es->tree_node);
245
246 /* notify observers that event source is closed */
247 if (es->ops)
248 es->ops->close(es->ops_arg);
249 }
250
251 /* mark all clients still connected as closed */
252 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
253 es->state = EVENT_STATE_CLOSED;
254 list_for_every_entry(&es->client_list, ec, struct event_client, node) {
255 ec->state = EVENT_STATE_CLOSED;
256 handle_notify(&ec->handle);
257 }
258 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
259
260 /* clear pointers that should not be accessed past this point */
261 es->ops = NULL;
262 es->ops_arg = NULL;
263 es->uuids = NULL;
264 es->name = NULL;
265
266 /* remove self reference */
267 obj_del_ref(&es->refobj, &es->handle_ref, event_source_obj_destroy);
268 mutex_release(&es_lock);
269 }
270
event_source_lookup_locked(const char * name,const uuid_t * uuid,struct obj_ref * ref)271 static struct event_source* event_source_lookup_locked(const char* name,
272 const uuid_t* uuid,
273 struct obj_ref* ref) {
274 struct bst_node* tn;
275 struct event_source* es;
276 struct event_source unused;
277
278 /* only init .name */
279 unused.name = name;
280
281 DEBUG_ASSERT(is_mutex_held(&es_lock));
282
283 tn = bst_search(&es_tree_root, &unused.tree_node, event_source_bst_compare);
284 if (!tn) {
285 /* Object not found */
286 return NULL;
287 }
288
289 /* Object found: check if we are allowed to connect */
290 es = containerof(tn, struct event_source, tree_node);
291
292 if (!es->uuids_num) {
293 /* No uuids are configured: allow anybody */
294 obj_add_ref(&es->refobj, ref);
295 return es;
296 }
297
298 /* check client */
299 for (uint32_t i = 0; i < es->uuids_num; i++) {
300 if (memcmp(uuid, &es->uuids[i], sizeof(*uuid)) == 0) {
301 obj_add_ref(&es->refobj, ref);
302 return es;
303 }
304 }
305
306 return NULL;
307 }
308
event_source_attach_client_locked(struct event_source * es,struct event_client * ec)309 static void event_source_attach_client_locked(struct event_source* es,
310 struct event_client* ec) {
311 spin_lock_saved_state_t state;
312
313 DEBUG_ASSERT(is_mutex_held(&es_lock));
314 DEBUG_ASSERT(!spin_lock_held(&es->slock));
315
316 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
317
318 /* add ref to es and attach client to tracking list */
319 ec->es = es;
320 obj_add_ref(&es->refobj, &ec->es_ref);
321 list_add_tail(&es->client_list, &ec->node);
322
323 /* client starts in EVENT_STATE_UNSIGNALED state */
324 ec->state = EVENT_STATE_UNSIGNALED;
325
326 es->client_cnt++;
327
328 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
329
330 if (es->client_cnt == 1) {
331 /* if first client (invokes open) */
332 if (es->ops && es->ops->open) {
333 es->ops->open(es->ops_arg);
334 }
335 }
336 }
337
event_source_notify_done_slocked(struct event_source * es)338 static void event_source_notify_done_slocked(struct event_source* es) {
339 DEBUG_ASSERT(spin_lock_held(&es->slock));
340
341 ASSERT(es->ack_cnt > 0);
342
343 /* decrement ack count of event source */
344 if (--es->ack_cnt == 0) {
345 /* All clients notified */
346 es->state = EVENT_STATE_HANDLED;
347 handle_notify(&es->handle);
348
349 if (es->ops && es->ops->unmask) {
350 es->ops->unmask(es->ops_arg);
351 }
352 }
353 }
354
event_source_signal(struct handle * h)355 int event_source_signal(struct handle* h) {
356 struct event_client* ec;
357 struct event_source* es;
358 spin_lock_saved_state_t state;
359
360 es = handle_to_event_source(h);
361
362 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
363
364 if (es->ops && es->ops->mask) {
365 /*
366 * If we have mask method we are in "level triggered" mode. It is
367 * expected that event should be signaled only if the event source is
368 * in EVENT_STATE_UNSIGNALED or EVENT_STATE_HANDLED state.
369 */
370 ASSERT(es->state == EVENT_STATE_UNSIGNALED ||
371 es->state == EVENT_STATE_HANDLED);
372
373 /* mask source */
374 es->ops->mask(es->ops_arg);
375 }
376
377 if (es->client_cnt) {
378 /* we have clients */
379 es->ack_cnt = es->client_cnt;
380 es->state = EVENT_STATE_SIGNALED;
381 list_for_every_entry(&es->client_list, ec, struct event_client, node) {
382 if (ec->state == EVENT_STATE_UNSIGNALED) {
383 /* enter signaled state and pet handle */
384 ec->state = EVENT_STATE_SIGNALED;
385 handle_notify(&ec->handle);
386 } else if (ec->state == EVENT_STATE_NOTIFIED) {
387 /* enter signaled notify state */
388 ec->state = EVENT_STATE_NOTIFIED_SIGNALED;
389 }
390 }
391 } else {
392 /* no clients: mark source as handled and notify source handle */
393 es->state = EVENT_STATE_HANDLED;
394 handle_notify(&es->handle);
395 }
396
397 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
398
399 return NO_ERROR;
400 }
401
event_source_publish(struct handle * h)402 int event_source_publish(struct handle* h) {
403 bool inserted;
404 struct event_source* es = handle_to_event_source(h);
405
406 mutex_acquire(&es_lock);
407 inserted =
408 bst_insert(&es_tree_root, &es->tree_node, event_source_bst_compare);
409 mutex_release(&es_lock);
410
411 return inserted ? NO_ERROR : ERR_ALREADY_EXISTS;
412 }
413
event_source_create(const char * name,const struct event_source_ops * ops,const void * ops_arg,const struct uuid * uuids,unsigned int uuids_num,unsigned int flags,struct handle ** ph)414 int event_source_create(const char* name,
415 const struct event_source_ops* ops,
416 const void* ops_arg,
417 const struct uuid* uuids,
418 unsigned int uuids_num,
419 unsigned int flags,
420 struct handle** ph) {
421 struct event_source* es;
422
423 if (!name || *name == 0)
424 return ERR_INVALID_ARGS;
425
426 es = calloc(1, sizeof(*es));
427 if (!es) {
428 return ERR_NO_MEMORY;
429 }
430
431 es->name = name;
432
433 if (ops) {
434 ASSERT(ops->open);
435 ASSERT(ops->close);
436
437 /* mask and unmask must be set together */
438 ASSERT(!ops->mask == !ops->unmask);
439 }
440
441 es->ops = ops;
442 es->ops_arg = ops_arg;
443 es->uuids = uuids;
444 es->uuids_num = uuids_num;
445
446 spin_lock_init(&es->slock);
447 list_initialize(&es->client_list);
448 bst_node_initialize(&es->tree_node);
449 obj_init(&es->refobj, &es->handle_ref);
450 handle_init(&es->handle, &event_source_handle_ops);
451
452 *ph = &es->handle;
453 return NO_ERROR;
454 }
455
event_source_open(const uuid_t * cid,const char * name,size_t max_name,uint flags,struct handle ** ph)456 int event_source_open(const uuid_t* cid,
457 const char* name,
458 size_t max_name,
459 uint flags,
460 struct handle** ph) {
461 int ret;
462 struct event_source* es;
463 struct event_client* ec = NULL;
464 struct obj_ref es_tmp_ref = OBJ_REF_INITIAL_VALUE(es_tmp_ref);
465
466 if (!name) {
467 return ERR_INVALID_ARGS;
468 }
469
470 size_t len = strnlen(name, max_name);
471 if (len == 0 || len >= max_name) {
472 /* empty or unterminated string */
473 LTRACEF("invalid path specified\n");
474 return ERR_INVALID_ARGS;
475 }
476 /* After this point name is zero terminated */
477
478 mutex_acquire(&es_lock);
479
480 /* lookup event source */
481 es = event_source_lookup_locked(name, cid, &es_tmp_ref);
482 if (!es) {
483 ret = ERR_NOT_FOUND;
484 goto err_not_found;
485 }
486
487 /* allocate handle and tracking structure */
488 ec = calloc(1, sizeof(*ec));
489 if (!ec) {
490 ret = ERR_NO_MEMORY;
491 goto err_alloc;
492 }
493
494 obj_ref_init(&ec->es_ref);
495 handle_init(&ec->handle, &event_client_handle_ops);
496
497 /* attach it to event source */
498 event_source_attach_client_locked(es, ec);
499
500 /* Looks OK */
501 handle_incref(&ec->handle);
502 *ph = &ec->handle;
503 ret = NO_ERROR;
504
505 err_attach:
506 err_alloc:
507 obj_del_ref(&es->refobj, &es_tmp_ref, event_source_obj_destroy);
508 err_not_found:
509 mutex_release(&es_lock);
510
511 if (ec) {
512 handle_decref(&ec->handle);
513 }
514 return ret;
515 }
516
517 /******************************************************************************/
518
handle_is_client(struct handle * handle)519 static bool handle_is_client(struct handle* handle) {
520 ASSERT(handle);
521 return likely(handle->ops == &event_client_handle_ops);
522 }
523
event_client_poll(struct handle * h,uint32_t emask,bool finalize)524 static uint32_t event_client_poll(struct handle* h,
525 uint32_t emask,
526 bool finalize) {
527 int oldstate;
528 spin_lock_saved_state_t state;
529
530 ASSERT(handle_is_client(h));
531
532 struct event_client* ec = containerof(h, struct event_client, handle);
533
534 spin_lock_save(&ec->es->slock, &state, SLOCK_FLAGS);
535 oldstate = ec->state;
536 if (finalize && (oldstate == EVENT_STATE_SIGNALED)) {
537 ec->state = EVENT_STATE_NOTIFIED;
538 }
539 spin_unlock_restore(&ec->es->slock, state, SLOCK_FLAGS);
540
541 if (oldstate == EVENT_STATE_CLOSED) {
542 return IPC_HANDLE_POLL_HUP;
543 }
544
545 if (oldstate == EVENT_STATE_SIGNALED) {
546 return IPC_HANDLE_POLL_MSG;
547 }
548
549 return 0;
550 }
551
event_client_notify_done_slocked(struct event_client * ec)552 static void event_client_notify_done_slocked(struct event_client* ec) {
553 struct event_source* es = ec->es;
554
555 /* event source spinlock must be held. Global es_lock is not required */
556 DEBUG_ASSERT(spin_lock_held(&es->slock));
557
558 if (ec->state == EVENT_STATE_NOTIFIED_SIGNALED) {
559 /* back to signaled state and pet handle */
560 ec->state = EVENT_STATE_SIGNALED;
561 handle_notify(&ec->handle);
562 } else if (ec->state == EVENT_STATE_NOTIFIED) {
563 /* back to unsignaled state and update source */
564 ec->state = EVENT_STATE_UNSIGNALED;
565 event_source_notify_done_slocked(es);
566 }
567 }
568
event_client_destroy(struct handle * h)569 static void event_client_destroy(struct handle* h) {
570 int oldstate;
571 struct event_client* ec;
572 struct event_source* es;
573 spin_lock_saved_state_t state;
574
575 ASSERT(handle_is_client(h));
576
577 ec = containerof(h, struct event_client, handle);
578
579 mutex_acquire(&es_lock);
580
581 es = ec->es;
582 ASSERT(es);
583
584 /* detach client */
585 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
586
587 oldstate = ec->state;
588 if (oldstate != EVENT_STATE_CLOSED) {
589 /* if source is not closed */
590 if (oldstate == EVENT_STATE_SIGNALED ||
591 oldstate == EVENT_STATE_NOTIFIED ||
592 oldstate == EVENT_STATE_NOTIFIED_SIGNALED) {
593 /* then invoke notify done */
594 event_source_notify_done_slocked(es);
595 }
596 ec->state = EVENT_STATE_CLOSED;
597 }
598
599 ASSERT(list_in_list(&ec->node));
600 list_delete(&ec->node);
601 es->client_cnt--;
602
603 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
604
605 if (oldstate != EVENT_STATE_CLOSED) {
606 if (es->client_cnt == 0) {
607 /* last client: invoke close */
608 if (es->ops && es->ops->close) {
609 es->ops->close(es->ops_arg);
610 }
611 }
612 }
613
614 /* Remove reference to source object */
615 obj_del_ref(&es->refobj, &ec->es_ref, event_source_obj_destroy);
616
617 /* free client */
618 free(ec);
619
620 mutex_release(&es_lock);
621 }
622
event_client_notify_handled(struct handle * h)623 int event_client_notify_handled(struct handle* h) {
624 int ret = NO_ERROR;
625 struct event_client* ec;
626 struct event_source* es;
627 spin_lock_saved_state_t state;
628
629 if (!handle_is_client(h)) {
630 return ERR_INVALID_ARGS;
631 }
632
633 ec = containerof(h, struct event_client, handle);
634 es = ec->es;
635
636 ASSERT(es);
637
638 spin_lock_save(&es->slock, &state, SLOCK_FLAGS);
639 switch (ec->state) {
640 case EVENT_STATE_NOTIFIED:
641 case EVENT_STATE_NOTIFIED_SIGNALED:
642 event_client_notify_done_slocked(ec);
643 break;
644
645 case EVENT_STATE_CLOSED:
646 ret = ERR_CHANNEL_CLOSED;
647 break;
648
649 default:
650 ret = ERR_BAD_STATE;
651 }
652 spin_unlock_restore(&es->slock, state, SLOCK_FLAGS);
653
654 return ret;
655 }
656
event_client_user_writev(struct handle * h,user_addr_t iov_uaddr,uint32_t iov_cnt)657 static ssize_t event_client_user_writev(struct handle* h,
658 user_addr_t iov_uaddr,
659 uint32_t iov_cnt) {
660 int ret;
661 ssize_t len;
662 uint32_t cmd;
663
664 DEBUG_ASSERT(h);
665
666 if (iov_cnt != 1) {
667 /* we expect exactly one iov here */
668 return ERR_INVALID_ARGS;
669 }
670
671 len = user_iovec_to_membuf((uint8_t*)&cmd, sizeof(cmd), iov_uaddr, iov_cnt);
672 if (len < 0) {
673 /* most likely FAULT */
674 return (int32_t)len;
675 }
676
677 if (len != sizeof(cmd)) {
678 /* partial write */
679 return ERR_INVALID_ARGS;
680 }
681
682 switch (cmd) {
683 case EVENT_NOTIFY_CMD_HANDLED:
684 ret = event_client_notify_handled(h);
685 break;
686
687 default:
688 ret = ERR_INVALID_ARGS;
689 }
690
691 return ret;
692 }
693
event_client_user_readv(struct handle * h,user_addr_t iov_uaddr,uint32_t iov_cnt)694 static ssize_t event_client_user_readv(struct handle* h,
695 user_addr_t iov_uaddr,
696 uint32_t iov_cnt) {
697 return 0;
698 }
699