1 /*
2 * f_fs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
6 *
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20
21 #include <linux/blkdev.h>
22 #include <linux/pagemap.h>
23 #include <linux/export.h>
24 #include <linux/hid.h>
25 #include <linux/module.h>
26 #include <asm/unaligned.h>
27
28 #include <linux/usb/composite.h>
29 #include <linux/usb/functionfs.h>
30
31 #include <linux/aio.h>
32 #include <linux/mmu_context.h>
33 #include <linux/poll.h>
34
35 #include "u_fs.h"
36 #include "u_f.h"
37 #include "u_os_desc.h"
38 #include "configfs.h"
39
40 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
41
42 /* Reference counter handling */
43 static void ffs_data_get(struct ffs_data *ffs);
44 static void ffs_data_put(struct ffs_data *ffs);
45 /* Creates new ffs_data object. */
46 static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
47
48 /* Opened counter handling. */
49 static void ffs_data_opened(struct ffs_data *ffs);
50 static void ffs_data_closed(struct ffs_data *ffs);
51
52 /* Called with ffs->mutex held; take over ownership of data. */
53 static int __must_check
54 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
55 static int __must_check
56 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
57
58
59 /* The function structure ***************************************************/
60
61 struct ffs_ep;
62
63 struct ffs_function {
64 struct usb_configuration *conf;
65 struct usb_gadget *gadget;
66 struct ffs_data *ffs;
67
68 struct ffs_ep *eps;
69 u8 eps_revmap[16];
70 short *interfaces_nums;
71
72 struct usb_function function;
73 };
74
75
ffs_func_from_usb(struct usb_function * f)76 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
77 {
78 return container_of(f, struct ffs_function, function);
79 }
80
81
82 static inline enum ffs_setup_state
ffs_setup_state_clear_cancelled(struct ffs_data * ffs)83 ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
84 {
85 return (enum ffs_setup_state)
86 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
87 }
88
89
90 static void ffs_func_eps_disable(struct ffs_function *func);
91 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
92
93 static int ffs_func_bind(struct usb_configuration *,
94 struct usb_function *);
95 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
96 static void ffs_func_disable(struct usb_function *);
97 static int ffs_func_setup(struct usb_function *,
98 const struct usb_ctrlrequest *);
99 static void ffs_func_suspend(struct usb_function *);
100 static void ffs_func_resume(struct usb_function *);
101
102
103 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
104 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
105
106
107 /* The endpoints structures *************************************************/
108
109 struct ffs_ep {
110 struct usb_ep *ep; /* P: ffs->eps_lock */
111 struct usb_request *req; /* P: epfile->mutex */
112
113 /* [0]: full speed, [1]: high speed, [2]: super speed */
114 struct usb_endpoint_descriptor *descs[3];
115
116 u8 num;
117
118 int status; /* P: epfile->mutex */
119 };
120
121 struct ffs_epfile {
122 /* Protects ep->ep and ep->req. */
123 struct mutex mutex;
124 wait_queue_head_t wait;
125
126 struct ffs_data *ffs;
127 struct ffs_ep *ep; /* P: ffs->eps_lock */
128
129 struct dentry *dentry;
130
131 char name[5];
132
133 unsigned char in; /* P: ffs->eps_lock */
134 unsigned char isoc; /* P: ffs->eps_lock */
135
136 unsigned char _pad;
137 };
138
139 /* ffs_io_data structure ***************************************************/
140
141 struct ffs_io_data {
142 bool aio;
143 bool read;
144
145 struct kiocb *kiocb;
146 const struct iovec *iovec;
147 unsigned long nr_segs;
148 char __user *buf;
149 size_t len;
150
151 struct mm_struct *mm;
152 struct work_struct work;
153
154 struct usb_ep *ep;
155 struct usb_request *req;
156 };
157
158 struct ffs_desc_helper {
159 struct ffs_data *ffs;
160 unsigned interfaces_count;
161 unsigned eps_count;
162 };
163
164 static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
165 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
166
167 static struct dentry *
168 ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
169 const struct file_operations *fops);
170
171 /* Devices management *******************************************************/
172
173 DEFINE_MUTEX(ffs_lock);
174 EXPORT_SYMBOL_GPL(ffs_lock);
175
176 static struct ffs_dev *_ffs_find_dev(const char *name);
177 static struct ffs_dev *_ffs_alloc_dev(void);
178 static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
179 static void _ffs_free_dev(struct ffs_dev *dev);
180 static void *ffs_acquire_dev(const char *dev_name);
181 static void ffs_release_dev(struct ffs_data *ffs_data);
182 static int ffs_ready(struct ffs_data *ffs);
183 static void ffs_closed(struct ffs_data *ffs);
184
185 /* Misc helper functions ****************************************************/
186
187 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
188 __attribute__((warn_unused_result, nonnull));
189 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
190 __attribute__((warn_unused_result, nonnull));
191
192
193 /* Control file aka ep0 *****************************************************/
194
ffs_ep0_complete(struct usb_ep * ep,struct usb_request * req)195 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
196 {
197 struct ffs_data *ffs = req->context;
198
199 complete_all(&ffs->ep0req_completion);
200 }
201
__ffs_ep0_queue_wait(struct ffs_data * ffs,char * data,size_t len)202 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
203 {
204 struct usb_request *req = ffs->ep0req;
205 int ret;
206
207 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
208
209 spin_unlock_irq(&ffs->ev.waitq.lock);
210
211 req->buf = data;
212 req->length = len;
213
214 /*
215 * UDC layer requires to provide a buffer even for ZLP, but should
216 * not use it at all. Let's provide some poisoned pointer to catch
217 * possible bug in the driver.
218 */
219 if (req->buf == NULL)
220 req->buf = (void *)0xDEADBABE;
221
222 reinit_completion(&ffs->ep0req_completion);
223
224 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
225 if (unlikely(ret < 0))
226 return ret;
227
228 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
229 if (unlikely(ret)) {
230 usb_ep_dequeue(ffs->gadget->ep0, req);
231 return -EINTR;
232 }
233
234 ffs->setup_state = FFS_NO_SETUP;
235 return req->status ? req->status : req->actual;
236 }
237
__ffs_ep0_stall(struct ffs_data * ffs)238 static int __ffs_ep0_stall(struct ffs_data *ffs)
239 {
240 if (ffs->ev.can_stall) {
241 pr_vdebug("ep0 stall\n");
242 usb_ep_set_halt(ffs->gadget->ep0);
243 ffs->setup_state = FFS_NO_SETUP;
244 return -EL2HLT;
245 } else {
246 pr_debug("bogus ep0 stall!\n");
247 return -ESRCH;
248 }
249 }
250
ffs_ep0_write(struct file * file,const char __user * buf,size_t len,loff_t * ptr)251 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
252 size_t len, loff_t *ptr)
253 {
254 struct ffs_data *ffs = file->private_data;
255 ssize_t ret;
256 char *data;
257
258 ENTER();
259
260 /* Fast check if setup was canceled */
261 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
262 return -EIDRM;
263
264 /* Acquire mutex */
265 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
266 if (unlikely(ret < 0))
267 return ret;
268
269 /* Check state */
270 switch (ffs->state) {
271 case FFS_READ_DESCRIPTORS:
272 case FFS_READ_STRINGS:
273 /* Copy data */
274 if (unlikely(len < 16)) {
275 ret = -EINVAL;
276 break;
277 }
278
279 data = ffs_prepare_buffer(buf, len);
280 if (IS_ERR(data)) {
281 ret = PTR_ERR(data);
282 break;
283 }
284
285 /* Handle data */
286 if (ffs->state == FFS_READ_DESCRIPTORS) {
287 pr_info("read descriptors\n");
288 ret = __ffs_data_got_descs(ffs, data, len);
289 if (unlikely(ret < 0))
290 break;
291
292 ffs->state = FFS_READ_STRINGS;
293 ret = len;
294 } else {
295 pr_info("read strings\n");
296 ret = __ffs_data_got_strings(ffs, data, len);
297 if (unlikely(ret < 0))
298 break;
299
300 ret = ffs_epfiles_create(ffs);
301 if (unlikely(ret)) {
302 ffs->state = FFS_CLOSING;
303 break;
304 }
305
306 ffs->state = FFS_ACTIVE;
307 mutex_unlock(&ffs->mutex);
308
309 ret = ffs_ready(ffs);
310 if (unlikely(ret < 0)) {
311 ffs->state = FFS_CLOSING;
312 return ret;
313 }
314
315 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
316 return len;
317 }
318 break;
319
320 case FFS_ACTIVE:
321 data = NULL;
322 /*
323 * We're called from user space, we can use _irq
324 * rather then _irqsave
325 */
326 spin_lock_irq(&ffs->ev.waitq.lock);
327 switch (ffs_setup_state_clear_cancelled(ffs)) {
328 case FFS_SETUP_CANCELLED:
329 ret = -EIDRM;
330 goto done_spin;
331
332 case FFS_NO_SETUP:
333 ret = -ESRCH;
334 goto done_spin;
335
336 case FFS_SETUP_PENDING:
337 break;
338 }
339
340 /* FFS_SETUP_PENDING */
341 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
342 spin_unlock_irq(&ffs->ev.waitq.lock);
343 ret = __ffs_ep0_stall(ffs);
344 break;
345 }
346
347 /* FFS_SETUP_PENDING and not stall */
348 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
349
350 spin_unlock_irq(&ffs->ev.waitq.lock);
351
352 data = ffs_prepare_buffer(buf, len);
353 if (IS_ERR(data)) {
354 ret = PTR_ERR(data);
355 break;
356 }
357
358 spin_lock_irq(&ffs->ev.waitq.lock);
359
360 /*
361 * We are guaranteed to be still in FFS_ACTIVE state
362 * but the state of setup could have changed from
363 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
364 * to check for that. If that happened we copied data
365 * from user space in vain but it's unlikely.
366 *
367 * For sure we are not in FFS_NO_SETUP since this is
368 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
369 * transition can be performed and it's protected by
370 * mutex.
371 */
372 if (ffs_setup_state_clear_cancelled(ffs) ==
373 FFS_SETUP_CANCELLED) {
374 ret = -EIDRM;
375 done_spin:
376 spin_unlock_irq(&ffs->ev.waitq.lock);
377 } else {
378 /* unlocks spinlock */
379 ret = __ffs_ep0_queue_wait(ffs, data, len);
380 }
381 kfree(data);
382 break;
383
384 default:
385 ret = -EBADFD;
386 break;
387 }
388
389 mutex_unlock(&ffs->mutex);
390 return ret;
391 }
392
__ffs_ep0_read_events(struct ffs_data * ffs,char __user * buf,size_t n)393 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
394 size_t n)
395 {
396 /*
397 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
398 * to release them.
399 */
400 struct usb_functionfs_event events[n];
401 unsigned i = 0;
402
403 memset(events, 0, sizeof events);
404
405 do {
406 events[i].type = ffs->ev.types[i];
407 if (events[i].type == FUNCTIONFS_SETUP) {
408 events[i].u.setup = ffs->ev.setup;
409 ffs->setup_state = FFS_SETUP_PENDING;
410 }
411 } while (++i < n);
412
413 if (n < ffs->ev.count) {
414 ffs->ev.count -= n;
415 memmove(ffs->ev.types, ffs->ev.types + n,
416 ffs->ev.count * sizeof *ffs->ev.types);
417 } else {
418 ffs->ev.count = 0;
419 }
420
421 spin_unlock_irq(&ffs->ev.waitq.lock);
422 mutex_unlock(&ffs->mutex);
423
424 return unlikely(__copy_to_user(buf, events, sizeof events))
425 ? -EFAULT : sizeof events;
426 }
427
ffs_ep0_read(struct file * file,char __user * buf,size_t len,loff_t * ptr)428 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
429 size_t len, loff_t *ptr)
430 {
431 struct ffs_data *ffs = file->private_data;
432 char *data = NULL;
433 size_t n;
434 int ret;
435
436 ENTER();
437
438 /* Fast check if setup was canceled */
439 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
440 return -EIDRM;
441
442 /* Acquire mutex */
443 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
444 if (unlikely(ret < 0))
445 return ret;
446
447 /* Check state */
448 if (ffs->state != FFS_ACTIVE) {
449 ret = -EBADFD;
450 goto done_mutex;
451 }
452
453 /*
454 * We're called from user space, we can use _irq rather then
455 * _irqsave
456 */
457 spin_lock_irq(&ffs->ev.waitq.lock);
458
459 switch (ffs_setup_state_clear_cancelled(ffs)) {
460 case FFS_SETUP_CANCELLED:
461 ret = -EIDRM;
462 break;
463
464 case FFS_NO_SETUP:
465 n = len / sizeof(struct usb_functionfs_event);
466 if (unlikely(!n)) {
467 ret = -EINVAL;
468 break;
469 }
470
471 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
472 ret = -EAGAIN;
473 break;
474 }
475
476 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
477 ffs->ev.count)) {
478 ret = -EINTR;
479 break;
480 }
481
482 return __ffs_ep0_read_events(ffs, buf,
483 min(n, (size_t)ffs->ev.count));
484
485 case FFS_SETUP_PENDING:
486 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
487 spin_unlock_irq(&ffs->ev.waitq.lock);
488 ret = __ffs_ep0_stall(ffs);
489 goto done_mutex;
490 }
491
492 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
493
494 spin_unlock_irq(&ffs->ev.waitq.lock);
495
496 if (likely(len)) {
497 data = kmalloc(len, GFP_KERNEL);
498 if (unlikely(!data)) {
499 ret = -ENOMEM;
500 goto done_mutex;
501 }
502 }
503
504 spin_lock_irq(&ffs->ev.waitq.lock);
505
506 /* See ffs_ep0_write() */
507 if (ffs_setup_state_clear_cancelled(ffs) ==
508 FFS_SETUP_CANCELLED) {
509 ret = -EIDRM;
510 break;
511 }
512
513 /* unlocks spinlock */
514 ret = __ffs_ep0_queue_wait(ffs, data, len);
515 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
516 ret = -EFAULT;
517 goto done_mutex;
518
519 default:
520 ret = -EBADFD;
521 break;
522 }
523
524 spin_unlock_irq(&ffs->ev.waitq.lock);
525 done_mutex:
526 mutex_unlock(&ffs->mutex);
527 kfree(data);
528 return ret;
529 }
530
ffs_ep0_open(struct inode * inode,struct file * file)531 static int ffs_ep0_open(struct inode *inode, struct file *file)
532 {
533 struct ffs_data *ffs = inode->i_private;
534
535 ENTER();
536
537 if (unlikely(ffs->state == FFS_CLOSING))
538 return -EBUSY;
539
540 file->private_data = ffs;
541 ffs_data_opened(ffs);
542
543 return 0;
544 }
545
ffs_ep0_release(struct inode * inode,struct file * file)546 static int ffs_ep0_release(struct inode *inode, struct file *file)
547 {
548 struct ffs_data *ffs = file->private_data;
549
550 ENTER();
551
552 ffs_data_closed(ffs);
553
554 return 0;
555 }
556
ffs_ep0_ioctl(struct file * file,unsigned code,unsigned long value)557 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
558 {
559 struct ffs_data *ffs = file->private_data;
560 struct usb_gadget *gadget = ffs->gadget;
561 long ret;
562
563 ENTER();
564
565 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
566 struct ffs_function *func = ffs->func;
567 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
568 } else if (gadget && gadget->ops->ioctl) {
569 ret = gadget->ops->ioctl(gadget, code, value);
570 } else {
571 ret = -ENOTTY;
572 }
573
574 return ret;
575 }
576
ffs_ep0_poll(struct file * file,poll_table * wait)577 static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
578 {
579 struct ffs_data *ffs = file->private_data;
580 unsigned int mask = POLLWRNORM;
581 int ret;
582
583 poll_wait(file, &ffs->ev.waitq, wait);
584
585 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
586 if (unlikely(ret < 0))
587 return mask;
588
589 switch (ffs->state) {
590 case FFS_READ_DESCRIPTORS:
591 case FFS_READ_STRINGS:
592 mask |= POLLOUT;
593 break;
594
595 case FFS_ACTIVE:
596 switch (ffs->setup_state) {
597 case FFS_NO_SETUP:
598 if (ffs->ev.count)
599 mask |= POLLIN;
600 break;
601
602 case FFS_SETUP_PENDING:
603 case FFS_SETUP_CANCELLED:
604 mask |= (POLLIN | POLLOUT);
605 break;
606 }
607 case FFS_CLOSING:
608 break;
609 }
610
611 mutex_unlock(&ffs->mutex);
612
613 return mask;
614 }
615
616 static const struct file_operations ffs_ep0_operations = {
617 .llseek = no_llseek,
618
619 .open = ffs_ep0_open,
620 .write = ffs_ep0_write,
621 .read = ffs_ep0_read,
622 .release = ffs_ep0_release,
623 .unlocked_ioctl = ffs_ep0_ioctl,
624 .poll = ffs_ep0_poll,
625 };
626
627
628 /* "Normal" endpoints operations ********************************************/
629
ffs_epfile_io_complete(struct usb_ep * _ep,struct usb_request * req)630 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
631 {
632 ENTER();
633 if (likely(req->context)) {
634 struct ffs_ep *ep = _ep->driver_data;
635 ep->status = req->status ? req->status : req->actual;
636 complete(req->context);
637 }
638 }
639
ffs_user_copy_worker(struct work_struct * work)640 static void ffs_user_copy_worker(struct work_struct *work)
641 {
642 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
643 work);
644 int ret = io_data->req->status ? io_data->req->status :
645 io_data->req->actual;
646
647 if (io_data->read && ret > 0) {
648 int i;
649 size_t pos = 0;
650
651 /*
652 * Since req->length may be bigger than io_data->len (after
653 * being rounded up to maxpacketsize), we may end up with more
654 * data then user space has space for.
655 */
656 ret = min_t(int, ret, io_data->len);
657
658 use_mm(io_data->mm);
659 for (i = 0; i < io_data->nr_segs; i++) {
660 size_t len = min_t(size_t, ret - pos,
661 io_data->iovec[i].iov_len);
662 if (!len)
663 break;
664 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
665 &io_data->buf[pos], len))) {
666 ret = -EFAULT;
667 break;
668 }
669 pos += len;
670 }
671 unuse_mm(io_data->mm);
672 }
673
674 aio_complete(io_data->kiocb, ret, ret);
675
676 usb_ep_free_request(io_data->ep, io_data->req);
677
678 if (io_data->read)
679 kfree(io_data->iovec);
680 kfree(io_data->buf);
681 kfree(io_data);
682 }
683
ffs_epfile_async_io_complete(struct usb_ep * _ep,struct usb_request * req)684 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
685 struct usb_request *req)
686 {
687 struct ffs_io_data *io_data = req->context;
688
689 ENTER();
690
691 INIT_WORK(&io_data->work, ffs_user_copy_worker);
692 schedule_work(&io_data->work);
693 }
694
ffs_epfile_io(struct file * file,struct ffs_io_data * io_data)695 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
696 {
697 struct ffs_epfile *epfile = file->private_data;
698 struct ffs_ep *ep;
699 char *data = NULL;
700 ssize_t ret, data_len = -EINVAL;
701 int halt;
702
703 /* Are we still active? */
704 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
705 ret = -ENODEV;
706 goto error;
707 }
708
709 /* Wait for endpoint to be enabled */
710 ep = epfile->ep;
711 if (!ep) {
712 if (file->f_flags & O_NONBLOCK) {
713 ret = -EAGAIN;
714 goto error;
715 }
716
717 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
718 if (ret) {
719 ret = -EINTR;
720 goto error;
721 }
722 }
723
724 /* Do we halt? */
725 halt = (!io_data->read == !epfile->in);
726 if (halt && epfile->isoc) {
727 ret = -EINVAL;
728 goto error;
729 }
730
731 /* Allocate & copy */
732 if (!halt) {
733 /*
734 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
735 * before the waiting completes, so do not assign to 'gadget' earlier
736 */
737 struct usb_gadget *gadget = epfile->ffs->gadget;
738
739 spin_lock_irq(&epfile->ffs->eps_lock);
740 /* In the meantime, endpoint got disabled or changed. */
741 if (epfile->ep != ep) {
742 spin_unlock_irq(&epfile->ffs->eps_lock);
743 return -ESHUTDOWN;
744 }
745 /*
746 * Controller may require buffer size to be aligned to
747 * maxpacketsize of an out endpoint.
748 */
749 data_len = io_data->read ?
750 usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
751 io_data->len;
752 spin_unlock_irq(&epfile->ffs->eps_lock);
753
754 data = kmalloc(data_len, GFP_KERNEL);
755 if (unlikely(!data))
756 return -ENOMEM;
757 if (io_data->aio && !io_data->read) {
758 int i;
759 size_t pos = 0;
760 for (i = 0; i < io_data->nr_segs; i++) {
761 if (unlikely(copy_from_user(&data[pos],
762 io_data->iovec[i].iov_base,
763 io_data->iovec[i].iov_len))) {
764 ret = -EFAULT;
765 goto error;
766 }
767 pos += io_data->iovec[i].iov_len;
768 }
769 } else {
770 if (!io_data->read &&
771 unlikely(__copy_from_user(data, io_data->buf,
772 io_data->len))) {
773 ret = -EFAULT;
774 goto error;
775 }
776 }
777 }
778
779 /* We will be using request */
780 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
781 if (unlikely(ret))
782 goto error;
783
784 spin_lock_irq(&epfile->ffs->eps_lock);
785
786 if (epfile->ep != ep) {
787 /* In the meantime, endpoint got disabled or changed. */
788 ret = -ESHUTDOWN;
789 spin_unlock_irq(&epfile->ffs->eps_lock);
790 } else if (halt) {
791 /* Halt */
792 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
793 usb_ep_set_halt(ep->ep);
794 spin_unlock_irq(&epfile->ffs->eps_lock);
795 ret = -EBADMSG;
796 } else {
797 /* Fire the request */
798 struct usb_request *req;
799
800 /*
801 * Sanity Check: even though data_len can't be used
802 * uninitialized at the time I write this comment, some
803 * compilers complain about this situation.
804 * In order to keep the code clean from warnings, data_len is
805 * being initialized to -EINVAL during its declaration, which
806 * means we can't rely on compiler anymore to warn no future
807 * changes won't result in data_len being used uninitialized.
808 * For such reason, we're adding this redundant sanity check
809 * here.
810 */
811 if (unlikely(data_len == -EINVAL)) {
812 WARN(1, "%s: data_len == -EINVAL\n", __func__);
813 ret = -EINVAL;
814 goto error_lock;
815 }
816
817 if (io_data->aio) {
818 req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
819 if (unlikely(!req))
820 goto error_lock;
821
822 req->buf = data;
823 req->length = data_len;
824
825 io_data->buf = data;
826 io_data->ep = ep->ep;
827 io_data->req = req;
828
829 req->context = io_data;
830 req->complete = ffs_epfile_async_io_complete;
831
832 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
833 if (unlikely(ret)) {
834 usb_ep_free_request(ep->ep, req);
835 goto error_lock;
836 }
837 ret = -EIOCBQUEUED;
838
839 spin_unlock_irq(&epfile->ffs->eps_lock);
840 } else {
841 DECLARE_COMPLETION_ONSTACK(done);
842
843 req = ep->req;
844 req->buf = data;
845 req->length = data_len;
846
847 req->context = &done;
848 req->complete = ffs_epfile_io_complete;
849
850 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
851
852 spin_unlock_irq(&epfile->ffs->eps_lock);
853
854 if (unlikely(ret < 0)) {
855 /* nop */
856 } else if (unlikely(
857 wait_for_completion_interruptible(&done))) {
858 ret = -EINTR;
859 usb_ep_dequeue(ep->ep, req);
860 } else {
861 /*
862 * XXX We may end up silently droping data
863 * here. Since data_len (i.e. req->length) may
864 * be bigger than len (after being rounded up
865 * to maxpacketsize), we may end up with more
866 * data then user space has space for.
867 */
868 ret = ep->status;
869 if (io_data->read && ret > 0) {
870 ret = min_t(size_t, ret, io_data->len);
871
872 if (unlikely(copy_to_user(io_data->buf,
873 data, ret)))
874 ret = -EFAULT;
875 }
876 }
877 kfree(data);
878 }
879 }
880
881 mutex_unlock(&epfile->mutex);
882 return ret;
883
884 error_lock:
885 spin_unlock_irq(&epfile->ffs->eps_lock);
886 mutex_unlock(&epfile->mutex);
887 error:
888 kfree(data);
889 return ret;
890 }
891
892 static ssize_t
ffs_epfile_write(struct file * file,const char __user * buf,size_t len,loff_t * ptr)893 ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
894 loff_t *ptr)
895 {
896 struct ffs_io_data io_data;
897
898 ENTER();
899
900 io_data.aio = false;
901 io_data.read = false;
902 io_data.buf = (char * __user)buf;
903 io_data.len = len;
904
905 return ffs_epfile_io(file, &io_data);
906 }
907
908 static ssize_t
ffs_epfile_read(struct file * file,char __user * buf,size_t len,loff_t * ptr)909 ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
910 {
911 struct ffs_io_data io_data;
912
913 ENTER();
914
915 io_data.aio = false;
916 io_data.read = true;
917 io_data.buf = buf;
918 io_data.len = len;
919
920 return ffs_epfile_io(file, &io_data);
921 }
922
923 static int
ffs_epfile_open(struct inode * inode,struct file * file)924 ffs_epfile_open(struct inode *inode, struct file *file)
925 {
926 struct ffs_epfile *epfile = inode->i_private;
927
928 ENTER();
929
930 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
931 return -ENODEV;
932
933 file->private_data = epfile;
934 ffs_data_opened(epfile->ffs);
935
936 return 0;
937 }
938
ffs_aio_cancel(struct kiocb * kiocb)939 static int ffs_aio_cancel(struct kiocb *kiocb)
940 {
941 struct ffs_io_data *io_data = kiocb->private;
942 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
943 int value;
944
945 ENTER();
946
947 spin_lock_irq(&epfile->ffs->eps_lock);
948
949 if (likely(io_data && io_data->ep && io_data->req))
950 value = usb_ep_dequeue(io_data->ep, io_data->req);
951 else
952 value = -EINVAL;
953
954 spin_unlock_irq(&epfile->ffs->eps_lock);
955
956 return value;
957 }
958
ffs_epfile_aio_write(struct kiocb * kiocb,const struct iovec * iovec,unsigned long nr_segs,loff_t loff)959 static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
960 const struct iovec *iovec,
961 unsigned long nr_segs, loff_t loff)
962 {
963 struct ffs_io_data *io_data;
964
965 ENTER();
966
967 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
968 if (unlikely(!io_data))
969 return -ENOMEM;
970
971 io_data->aio = true;
972 io_data->read = false;
973 io_data->kiocb = kiocb;
974 io_data->iovec = iovec;
975 io_data->nr_segs = nr_segs;
976 io_data->len = kiocb->ki_nbytes;
977 io_data->mm = current->mm;
978
979 kiocb->private = io_data;
980
981 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
982
983 return ffs_epfile_io(kiocb->ki_filp, io_data);
984 }
985
ffs_epfile_aio_read(struct kiocb * kiocb,const struct iovec * iovec,unsigned long nr_segs,loff_t loff)986 static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
987 const struct iovec *iovec,
988 unsigned long nr_segs, loff_t loff)
989 {
990 struct ffs_io_data *io_data;
991 struct iovec *iovec_copy;
992
993 ENTER();
994
995 iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
996 if (unlikely(!iovec_copy))
997 return -ENOMEM;
998
999 memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
1000
1001 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
1002 if (unlikely(!io_data)) {
1003 kfree(iovec_copy);
1004 return -ENOMEM;
1005 }
1006
1007 io_data->aio = true;
1008 io_data->read = true;
1009 io_data->kiocb = kiocb;
1010 io_data->iovec = iovec_copy;
1011 io_data->nr_segs = nr_segs;
1012 io_data->len = kiocb->ki_nbytes;
1013 io_data->mm = current->mm;
1014
1015 kiocb->private = io_data;
1016
1017 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1018
1019 return ffs_epfile_io(kiocb->ki_filp, io_data);
1020 }
1021
1022 static int
ffs_epfile_release(struct inode * inode,struct file * file)1023 ffs_epfile_release(struct inode *inode, struct file *file)
1024 {
1025 struct ffs_epfile *epfile = inode->i_private;
1026
1027 ENTER();
1028
1029 ffs_data_closed(epfile->ffs);
1030
1031 return 0;
1032 }
1033
ffs_epfile_ioctl(struct file * file,unsigned code,unsigned long value)1034 static long ffs_epfile_ioctl(struct file *file, unsigned code,
1035 unsigned long value)
1036 {
1037 struct ffs_epfile *epfile = file->private_data;
1038 int ret;
1039
1040 ENTER();
1041
1042 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1043 return -ENODEV;
1044
1045 spin_lock_irq(&epfile->ffs->eps_lock);
1046 if (likely(epfile->ep)) {
1047 switch (code) {
1048 case FUNCTIONFS_FIFO_STATUS:
1049 ret = usb_ep_fifo_status(epfile->ep->ep);
1050 break;
1051 case FUNCTIONFS_FIFO_FLUSH:
1052 usb_ep_fifo_flush(epfile->ep->ep);
1053 ret = 0;
1054 break;
1055 case FUNCTIONFS_CLEAR_HALT:
1056 ret = usb_ep_clear_halt(epfile->ep->ep);
1057 break;
1058 case FUNCTIONFS_ENDPOINT_REVMAP:
1059 ret = epfile->ep->num;
1060 break;
1061 case FUNCTIONFS_ENDPOINT_DESC:
1062 {
1063 int desc_idx;
1064 struct usb_endpoint_descriptor *desc;
1065
1066 switch (epfile->ffs->gadget->speed) {
1067 case USB_SPEED_SUPER:
1068 desc_idx = 2;
1069 break;
1070 case USB_SPEED_HIGH:
1071 desc_idx = 1;
1072 break;
1073 default:
1074 desc_idx = 0;
1075 }
1076 desc = epfile->ep->descs[desc_idx];
1077
1078 spin_unlock_irq(&epfile->ffs->eps_lock);
1079 ret = copy_to_user((void *)value, desc, sizeof(*desc));
1080 if (ret)
1081 ret = -EFAULT;
1082 return ret;
1083 }
1084 default:
1085 ret = -ENOTTY;
1086 }
1087 } else {
1088 ret = -ENODEV;
1089 }
1090 spin_unlock_irq(&epfile->ffs->eps_lock);
1091
1092 return ret;
1093 }
1094
1095 static const struct file_operations ffs_epfile_operations = {
1096 .llseek = no_llseek,
1097
1098 .open = ffs_epfile_open,
1099 .write = ffs_epfile_write,
1100 .read = ffs_epfile_read,
1101 .aio_write = ffs_epfile_aio_write,
1102 .aio_read = ffs_epfile_aio_read,
1103 .release = ffs_epfile_release,
1104 .unlocked_ioctl = ffs_epfile_ioctl,
1105 };
1106
1107
1108 /* File system and super block operations ***********************************/
1109
1110 /*
1111 * Mounting the file system creates a controller file, used first for
1112 * function configuration then later for event monitoring.
1113 */
1114
1115 static struct inode *__must_check
ffs_sb_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,const struct inode_operations * iops,struct ffs_file_perms * perms)1116 ffs_sb_make_inode(struct super_block *sb, void *data,
1117 const struct file_operations *fops,
1118 const struct inode_operations *iops,
1119 struct ffs_file_perms *perms)
1120 {
1121 struct inode *inode;
1122
1123 ENTER();
1124
1125 inode = new_inode(sb);
1126
1127 if (likely(inode)) {
1128 struct timespec current_time = CURRENT_TIME;
1129
1130 inode->i_ino = get_next_ino();
1131 inode->i_mode = perms->mode;
1132 inode->i_uid = perms->uid;
1133 inode->i_gid = perms->gid;
1134 inode->i_atime = current_time;
1135 inode->i_mtime = current_time;
1136 inode->i_ctime = current_time;
1137 inode->i_private = data;
1138 if (fops)
1139 inode->i_fop = fops;
1140 if (iops)
1141 inode->i_op = iops;
1142 }
1143
1144 return inode;
1145 }
1146
1147 /* Create "regular" file */
ffs_sb_create_file(struct super_block * sb,const char * name,void * data,const struct file_operations * fops)1148 static struct dentry *ffs_sb_create_file(struct super_block *sb,
1149 const char *name, void *data,
1150 const struct file_operations *fops)
1151 {
1152 struct ffs_data *ffs = sb->s_fs_info;
1153 struct dentry *dentry;
1154 struct inode *inode;
1155
1156 ENTER();
1157
1158 dentry = d_alloc_name(sb->s_root, name);
1159 if (unlikely(!dentry))
1160 return NULL;
1161
1162 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1163 if (unlikely(!inode)) {
1164 dput(dentry);
1165 return NULL;
1166 }
1167
1168 d_add(dentry, inode);
1169 return dentry;
1170 }
1171
1172 /* Super block */
1173 static const struct super_operations ffs_sb_operations = {
1174 .statfs = simple_statfs,
1175 .drop_inode = generic_delete_inode,
1176 };
1177
1178 struct ffs_sb_fill_data {
1179 struct ffs_file_perms perms;
1180 umode_t root_mode;
1181 const char *dev_name;
1182 struct ffs_data *ffs_data;
1183 };
1184
ffs_sb_fill(struct super_block * sb,void * _data,int silent)1185 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1186 {
1187 struct ffs_sb_fill_data *data = _data;
1188 struct inode *inode;
1189 struct ffs_data *ffs = data->ffs_data;
1190
1191 ENTER();
1192
1193 ffs->sb = sb;
1194 data->ffs_data = NULL;
1195 sb->s_fs_info = ffs;
1196 sb->s_blocksize = PAGE_CACHE_SIZE;
1197 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1198 sb->s_magic = FUNCTIONFS_MAGIC;
1199 sb->s_op = &ffs_sb_operations;
1200 sb->s_time_gran = 1;
1201
1202 /* Root inode */
1203 data->perms.mode = data->root_mode;
1204 inode = ffs_sb_make_inode(sb, NULL,
1205 &simple_dir_operations,
1206 &simple_dir_inode_operations,
1207 &data->perms);
1208 sb->s_root = d_make_root(inode);
1209 if (unlikely(!sb->s_root))
1210 return -ENOMEM;
1211
1212 /* EP0 file */
1213 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1214 &ffs_ep0_operations)))
1215 return -ENOMEM;
1216
1217 return 0;
1218 }
1219
ffs_fs_parse_opts(struct ffs_sb_fill_data * data,char * opts)1220 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1221 {
1222 ENTER();
1223
1224 if (!opts || !*opts)
1225 return 0;
1226
1227 for (;;) {
1228 unsigned long value;
1229 char *eq, *comma;
1230
1231 /* Option limit */
1232 comma = strchr(opts, ',');
1233 if (comma)
1234 *comma = 0;
1235
1236 /* Value limit */
1237 eq = strchr(opts, '=');
1238 if (unlikely(!eq)) {
1239 pr_err("'=' missing in %s\n", opts);
1240 return -EINVAL;
1241 }
1242 *eq = 0;
1243
1244 /* Parse value */
1245 if (kstrtoul(eq + 1, 0, &value)) {
1246 pr_err("%s: invalid value: %s\n", opts, eq + 1);
1247 return -EINVAL;
1248 }
1249
1250 /* Interpret option */
1251 switch (eq - opts) {
1252 case 5:
1253 if (!memcmp(opts, "rmode", 5))
1254 data->root_mode = (value & 0555) | S_IFDIR;
1255 else if (!memcmp(opts, "fmode", 5))
1256 data->perms.mode = (value & 0666) | S_IFREG;
1257 else
1258 goto invalid;
1259 break;
1260
1261 case 4:
1262 if (!memcmp(opts, "mode", 4)) {
1263 data->root_mode = (value & 0555) | S_IFDIR;
1264 data->perms.mode = (value & 0666) | S_IFREG;
1265 } else {
1266 goto invalid;
1267 }
1268 break;
1269
1270 case 3:
1271 if (!memcmp(opts, "uid", 3)) {
1272 data->perms.uid = make_kuid(current_user_ns(), value);
1273 if (!uid_valid(data->perms.uid)) {
1274 pr_err("%s: unmapped value: %lu\n", opts, value);
1275 return -EINVAL;
1276 }
1277 } else if (!memcmp(opts, "gid", 3)) {
1278 data->perms.gid = make_kgid(current_user_ns(), value);
1279 if (!gid_valid(data->perms.gid)) {
1280 pr_err("%s: unmapped value: %lu\n", opts, value);
1281 return -EINVAL;
1282 }
1283 } else {
1284 goto invalid;
1285 }
1286 break;
1287
1288 default:
1289 invalid:
1290 pr_err("%s: invalid option\n", opts);
1291 return -EINVAL;
1292 }
1293
1294 /* Next iteration */
1295 if (!comma)
1296 break;
1297 opts = comma + 1;
1298 }
1299
1300 return 0;
1301 }
1302
1303 /* "mount -t functionfs dev_name /dev/function" ends up here */
1304
1305 static struct dentry *
ffs_fs_mount(struct file_system_type * t,int flags,const char * dev_name,void * opts)1306 ffs_fs_mount(struct file_system_type *t, int flags,
1307 const char *dev_name, void *opts)
1308 {
1309 struct ffs_sb_fill_data data = {
1310 .perms = {
1311 .mode = S_IFREG | 0600,
1312 .uid = GLOBAL_ROOT_UID,
1313 .gid = GLOBAL_ROOT_GID,
1314 },
1315 .root_mode = S_IFDIR | 0500,
1316 };
1317 struct dentry *rv;
1318 int ret;
1319 void *ffs_dev;
1320 struct ffs_data *ffs;
1321
1322 ENTER();
1323
1324 ret = ffs_fs_parse_opts(&data, opts);
1325 if (unlikely(ret < 0))
1326 return ERR_PTR(ret);
1327
1328 ffs = ffs_data_new();
1329 if (unlikely(!ffs))
1330 return ERR_PTR(-ENOMEM);
1331 ffs->file_perms = data.perms;
1332
1333 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1334 if (unlikely(!ffs->dev_name)) {
1335 ffs_data_put(ffs);
1336 return ERR_PTR(-ENOMEM);
1337 }
1338
1339 ffs_dev = ffs_acquire_dev(dev_name);
1340 if (IS_ERR(ffs_dev)) {
1341 ffs_data_put(ffs);
1342 return ERR_CAST(ffs_dev);
1343 }
1344 ffs->private_data = ffs_dev;
1345 data.ffs_data = ffs;
1346
1347 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1348 if (IS_ERR(rv) && data.ffs_data) {
1349 ffs_release_dev(data.ffs_data);
1350 ffs_data_put(data.ffs_data);
1351 }
1352 return rv;
1353 }
1354
1355 static void
ffs_fs_kill_sb(struct super_block * sb)1356 ffs_fs_kill_sb(struct super_block *sb)
1357 {
1358 ENTER();
1359
1360 kill_litter_super(sb);
1361 if (sb->s_fs_info) {
1362 ffs_release_dev(sb->s_fs_info);
1363 ffs_data_put(sb->s_fs_info);
1364 }
1365 }
1366
1367 static struct file_system_type ffs_fs_type = {
1368 .owner = THIS_MODULE,
1369 .name = "functionfs",
1370 .mount = ffs_fs_mount,
1371 .kill_sb = ffs_fs_kill_sb,
1372 };
1373 MODULE_ALIAS_FS("functionfs");
1374
1375
1376 /* Driver's main init/cleanup functions *************************************/
1377
functionfs_init(void)1378 static int functionfs_init(void)
1379 {
1380 int ret;
1381
1382 ENTER();
1383
1384 ret = register_filesystem(&ffs_fs_type);
1385 if (likely(!ret))
1386 pr_info("file system registered\n");
1387 else
1388 pr_err("failed registering file system (%d)\n", ret);
1389
1390 return ret;
1391 }
1392
functionfs_cleanup(void)1393 static void functionfs_cleanup(void)
1394 {
1395 ENTER();
1396
1397 pr_info("unloading\n");
1398 unregister_filesystem(&ffs_fs_type);
1399 }
1400
1401
1402 /* ffs_data and ffs_function construction and destruction code **************/
1403
1404 static void ffs_data_clear(struct ffs_data *ffs);
1405 static void ffs_data_reset(struct ffs_data *ffs);
1406
ffs_data_get(struct ffs_data * ffs)1407 static void ffs_data_get(struct ffs_data *ffs)
1408 {
1409 ENTER();
1410
1411 atomic_inc(&ffs->ref);
1412 }
1413
ffs_data_opened(struct ffs_data * ffs)1414 static void ffs_data_opened(struct ffs_data *ffs)
1415 {
1416 ENTER();
1417
1418 atomic_inc(&ffs->ref);
1419 atomic_inc(&ffs->opened);
1420 }
1421
ffs_data_put(struct ffs_data * ffs)1422 static void ffs_data_put(struct ffs_data *ffs)
1423 {
1424 ENTER();
1425
1426 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1427 pr_info("%s(): freeing\n", __func__);
1428 ffs_data_clear(ffs);
1429 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1430 waitqueue_active(&ffs->ep0req_completion.wait));
1431 kfree(ffs->dev_name);
1432 kfree(ffs);
1433 }
1434 }
1435
ffs_data_closed(struct ffs_data * ffs)1436 static void ffs_data_closed(struct ffs_data *ffs)
1437 {
1438 ENTER();
1439
1440 if (atomic_dec_and_test(&ffs->opened)) {
1441 ffs->state = FFS_CLOSING;
1442 ffs_data_reset(ffs);
1443 }
1444
1445 ffs_data_put(ffs);
1446 }
1447
ffs_data_new(void)1448 static struct ffs_data *ffs_data_new(void)
1449 {
1450 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1451 if (unlikely(!ffs))
1452 return NULL;
1453
1454 ENTER();
1455
1456 atomic_set(&ffs->ref, 1);
1457 atomic_set(&ffs->opened, 0);
1458 ffs->state = FFS_READ_DESCRIPTORS;
1459 mutex_init(&ffs->mutex);
1460 spin_lock_init(&ffs->eps_lock);
1461 init_waitqueue_head(&ffs->ev.waitq);
1462 init_completion(&ffs->ep0req_completion);
1463
1464 /* XXX REVISIT need to update it in some places, or do we? */
1465 ffs->ev.can_stall = 1;
1466
1467 return ffs;
1468 }
1469
ffs_data_clear(struct ffs_data * ffs)1470 static void ffs_data_clear(struct ffs_data *ffs)
1471 {
1472 ENTER();
1473
1474 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
1475 ffs_closed(ffs);
1476
1477 BUG_ON(ffs->gadget);
1478
1479 if (ffs->epfiles)
1480 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1481
1482 kfree(ffs->raw_descs_data);
1483 kfree(ffs->raw_strings);
1484 kfree(ffs->stringtabs);
1485 }
1486
ffs_data_reset(struct ffs_data * ffs)1487 static void ffs_data_reset(struct ffs_data *ffs)
1488 {
1489 ENTER();
1490
1491 ffs_data_clear(ffs);
1492
1493 ffs->epfiles = NULL;
1494 ffs->raw_descs_data = NULL;
1495 ffs->raw_descs = NULL;
1496 ffs->raw_strings = NULL;
1497 ffs->stringtabs = NULL;
1498
1499 ffs->raw_descs_length = 0;
1500 ffs->fs_descs_count = 0;
1501 ffs->hs_descs_count = 0;
1502 ffs->ss_descs_count = 0;
1503
1504 ffs->strings_count = 0;
1505 ffs->interfaces_count = 0;
1506 ffs->eps_count = 0;
1507
1508 ffs->ev.count = 0;
1509
1510 ffs->state = FFS_READ_DESCRIPTORS;
1511 ffs->setup_state = FFS_NO_SETUP;
1512 ffs->flags = 0;
1513 }
1514
1515
functionfs_bind(struct ffs_data * ffs,struct usb_composite_dev * cdev)1516 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1517 {
1518 struct usb_gadget_strings **lang;
1519 int first_id;
1520
1521 ENTER();
1522
1523 if (WARN_ON(ffs->state != FFS_ACTIVE
1524 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1525 return -EBADFD;
1526
1527 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1528 if (unlikely(first_id < 0))
1529 return first_id;
1530
1531 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1532 if (unlikely(!ffs->ep0req))
1533 return -ENOMEM;
1534 ffs->ep0req->complete = ffs_ep0_complete;
1535 ffs->ep0req->context = ffs;
1536
1537 lang = ffs->stringtabs;
1538 if (lang) {
1539 for (; *lang; ++lang) {
1540 struct usb_string *str = (*lang)->strings;
1541 int id = first_id;
1542 for (; str->s; ++id, ++str)
1543 str->id = id;
1544 }
1545 }
1546
1547 ffs->gadget = cdev->gadget;
1548 ffs_data_get(ffs);
1549 return 0;
1550 }
1551
functionfs_unbind(struct ffs_data * ffs)1552 static void functionfs_unbind(struct ffs_data *ffs)
1553 {
1554 ENTER();
1555
1556 if (!WARN_ON(!ffs->gadget)) {
1557 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1558 ffs->ep0req = NULL;
1559 ffs->gadget = NULL;
1560 clear_bit(FFS_FL_BOUND, &ffs->flags);
1561 ffs_data_put(ffs);
1562 }
1563 }
1564
ffs_epfiles_create(struct ffs_data * ffs)1565 static int ffs_epfiles_create(struct ffs_data *ffs)
1566 {
1567 struct ffs_epfile *epfile, *epfiles;
1568 unsigned i, count;
1569
1570 ENTER();
1571
1572 count = ffs->eps_count;
1573 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1574 if (!epfiles)
1575 return -ENOMEM;
1576
1577 epfile = epfiles;
1578 for (i = 1; i <= count; ++i, ++epfile) {
1579 epfile->ffs = ffs;
1580 mutex_init(&epfile->mutex);
1581 init_waitqueue_head(&epfile->wait);
1582 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
1583 sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]);
1584 else
1585 sprintf(epfiles->name, "ep%u", i);
1586 epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name,
1587 epfile,
1588 &ffs_epfile_operations);
1589 if (unlikely(!epfile->dentry)) {
1590 ffs_epfiles_destroy(epfiles, i - 1);
1591 return -ENOMEM;
1592 }
1593 }
1594
1595 ffs->epfiles = epfiles;
1596 return 0;
1597 }
1598
ffs_epfiles_destroy(struct ffs_epfile * epfiles,unsigned count)1599 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1600 {
1601 struct ffs_epfile *epfile = epfiles;
1602
1603 ENTER();
1604
1605 for (; count; --count, ++epfile) {
1606 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1607 waitqueue_active(&epfile->wait));
1608 if (epfile->dentry) {
1609 d_delete(epfile->dentry);
1610 dput(epfile->dentry);
1611 epfile->dentry = NULL;
1612 }
1613 }
1614
1615 kfree(epfiles);
1616 }
1617
1618
ffs_func_eps_disable(struct ffs_function * func)1619 static void ffs_func_eps_disable(struct ffs_function *func)
1620 {
1621 struct ffs_ep *ep = func->eps;
1622 struct ffs_epfile *epfile = func->ffs->epfiles;
1623 unsigned count = func->ffs->eps_count;
1624 unsigned long flags;
1625
1626 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1627 do {
1628 /* pending requests get nuked */
1629 if (likely(ep->ep))
1630 usb_ep_disable(ep->ep);
1631 epfile->ep = NULL;
1632
1633 ++ep;
1634 ++epfile;
1635 } while (--count);
1636 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1637 }
1638
ffs_func_eps_enable(struct ffs_function * func)1639 static int ffs_func_eps_enable(struct ffs_function *func)
1640 {
1641 struct ffs_data *ffs = func->ffs;
1642 struct ffs_ep *ep = func->eps;
1643 struct ffs_epfile *epfile = ffs->epfiles;
1644 unsigned count = ffs->eps_count;
1645 unsigned long flags;
1646 int ret = 0;
1647
1648 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1649 do {
1650 struct usb_endpoint_descriptor *ds;
1651 struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
1652 int needs_comp_desc = false;
1653 int desc_idx;
1654
1655 if (ffs->gadget->speed == USB_SPEED_SUPER) {
1656 desc_idx = 2;
1657 needs_comp_desc = true;
1658 } else if (ffs->gadget->speed == USB_SPEED_HIGH)
1659 desc_idx = 1;
1660 else
1661 desc_idx = 0;
1662
1663 /* fall-back to lower speed if desc missing for current speed */
1664 do {
1665 ds = ep->descs[desc_idx];
1666 } while (!ds && --desc_idx >= 0);
1667
1668 if (!ds) {
1669 ret = -EINVAL;
1670 break;
1671 }
1672
1673 ep->ep->driver_data = ep;
1674 ep->ep->desc = ds;
1675
1676 if (needs_comp_desc) {
1677 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
1678 USB_DT_ENDPOINT_SIZE);
1679 ep->ep->maxburst = comp_desc->bMaxBurst + 1;
1680 ep->ep->comp_desc = comp_desc;
1681 }
1682
1683 ret = usb_ep_enable(ep->ep);
1684 if (likely(!ret)) {
1685 epfile->ep = ep;
1686 epfile->in = usb_endpoint_dir_in(ds);
1687 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1688 } else {
1689 break;
1690 }
1691
1692 wake_up(&epfile->wait);
1693
1694 ++ep;
1695 ++epfile;
1696 } while (--count);
1697 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1698
1699 return ret;
1700 }
1701
1702
1703 /* Parsing and building descriptors and strings *****************************/
1704
1705 /*
1706 * This validates if data pointed by data is a valid USB descriptor as
1707 * well as record how many interfaces, endpoints and strings are
1708 * required by given configuration. Returns address after the
1709 * descriptor or NULL if data is invalid.
1710 */
1711
1712 enum ffs_entity_type {
1713 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1714 };
1715
1716 enum ffs_os_desc_type {
1717 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1718 };
1719
1720 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1721 u8 *valuep,
1722 struct usb_descriptor_header *desc,
1723 void *priv);
1724
1725 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1726 struct usb_os_desc_header *h, void *data,
1727 unsigned len, void *priv);
1728
ffs_do_single_desc(char * data,unsigned len,ffs_entity_callback entity,void * priv)1729 static int __must_check ffs_do_single_desc(char *data, unsigned len,
1730 ffs_entity_callback entity,
1731 void *priv)
1732 {
1733 struct usb_descriptor_header *_ds = (void *)data;
1734 u8 length;
1735 int ret;
1736
1737 ENTER();
1738
1739 /* At least two bytes are required: length and type */
1740 if (len < 2) {
1741 pr_vdebug("descriptor too short\n");
1742 return -EINVAL;
1743 }
1744
1745 /* If we have at least as many bytes as the descriptor takes? */
1746 length = _ds->bLength;
1747 if (len < length) {
1748 pr_vdebug("descriptor longer then available data\n");
1749 return -EINVAL;
1750 }
1751
1752 #define __entity_check_INTERFACE(val) 1
1753 #define __entity_check_STRING(val) (val)
1754 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1755 #define __entity(type, val) do { \
1756 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1757 if (unlikely(!__entity_check_ ##type(val))) { \
1758 pr_vdebug("invalid entity's value\n"); \
1759 return -EINVAL; \
1760 } \
1761 ret = entity(FFS_ ##type, &val, _ds, priv); \
1762 if (unlikely(ret < 0)) { \
1763 pr_debug("entity " #type "(%02x); ret = %d\n", \
1764 (val), ret); \
1765 return ret; \
1766 } \
1767 } while (0)
1768
1769 /* Parse descriptor depending on type. */
1770 switch (_ds->bDescriptorType) {
1771 case USB_DT_DEVICE:
1772 case USB_DT_CONFIG:
1773 case USB_DT_STRING:
1774 case USB_DT_DEVICE_QUALIFIER:
1775 /* function can't have any of those */
1776 pr_vdebug("descriptor reserved for gadget: %d\n",
1777 _ds->bDescriptorType);
1778 return -EINVAL;
1779
1780 case USB_DT_INTERFACE: {
1781 struct usb_interface_descriptor *ds = (void *)_ds;
1782 pr_vdebug("interface descriptor\n");
1783 if (length != sizeof *ds)
1784 goto inv_length;
1785
1786 __entity(INTERFACE, ds->bInterfaceNumber);
1787 if (ds->iInterface)
1788 __entity(STRING, ds->iInterface);
1789 }
1790 break;
1791
1792 case USB_DT_ENDPOINT: {
1793 struct usb_endpoint_descriptor *ds = (void *)_ds;
1794 pr_vdebug("endpoint descriptor\n");
1795 if (length != USB_DT_ENDPOINT_SIZE &&
1796 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1797 goto inv_length;
1798 __entity(ENDPOINT, ds->bEndpointAddress);
1799 }
1800 break;
1801
1802 case HID_DT_HID:
1803 pr_vdebug("hid descriptor\n");
1804 if (length != sizeof(struct hid_descriptor))
1805 goto inv_length;
1806 break;
1807
1808 case USB_DT_OTG:
1809 if (length != sizeof(struct usb_otg_descriptor))
1810 goto inv_length;
1811 break;
1812
1813 case USB_DT_INTERFACE_ASSOCIATION: {
1814 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1815 pr_vdebug("interface association descriptor\n");
1816 if (length != sizeof *ds)
1817 goto inv_length;
1818 if (ds->iFunction)
1819 __entity(STRING, ds->iFunction);
1820 }
1821 break;
1822
1823 case USB_DT_SS_ENDPOINT_COMP:
1824 pr_vdebug("EP SS companion descriptor\n");
1825 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
1826 goto inv_length;
1827 break;
1828
1829 case USB_DT_OTHER_SPEED_CONFIG:
1830 case USB_DT_INTERFACE_POWER:
1831 case USB_DT_DEBUG:
1832 case USB_DT_SECURITY:
1833 case USB_DT_CS_RADIO_CONTROL:
1834 /* TODO */
1835 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1836 return -EINVAL;
1837
1838 default:
1839 /* We should never be here */
1840 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1841 return -EINVAL;
1842
1843 inv_length:
1844 pr_vdebug("invalid length: %d (descriptor %d)\n",
1845 _ds->bLength, _ds->bDescriptorType);
1846 return -EINVAL;
1847 }
1848
1849 #undef __entity
1850 #undef __entity_check_DESCRIPTOR
1851 #undef __entity_check_INTERFACE
1852 #undef __entity_check_STRING
1853 #undef __entity_check_ENDPOINT
1854
1855 return length;
1856 }
1857
ffs_do_descs(unsigned count,char * data,unsigned len,ffs_entity_callback entity,void * priv)1858 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1859 ffs_entity_callback entity, void *priv)
1860 {
1861 const unsigned _len = len;
1862 unsigned long num = 0;
1863
1864 ENTER();
1865
1866 for (;;) {
1867 int ret;
1868
1869 if (num == count)
1870 data = NULL;
1871
1872 /* Record "descriptor" entity */
1873 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1874 if (unlikely(ret < 0)) {
1875 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1876 num, ret);
1877 return ret;
1878 }
1879
1880 if (!data)
1881 return _len - len;
1882
1883 ret = ffs_do_single_desc(data, len, entity, priv);
1884 if (unlikely(ret < 0)) {
1885 pr_debug("%s returns %d\n", __func__, ret);
1886 return ret;
1887 }
1888
1889 len -= ret;
1890 data += ret;
1891 ++num;
1892 }
1893 }
1894
__ffs_data_do_entity(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)1895 static int __ffs_data_do_entity(enum ffs_entity_type type,
1896 u8 *valuep, struct usb_descriptor_header *desc,
1897 void *priv)
1898 {
1899 struct ffs_desc_helper *helper = priv;
1900 struct usb_endpoint_descriptor *d;
1901
1902 ENTER();
1903
1904 switch (type) {
1905 case FFS_DESCRIPTOR:
1906 break;
1907
1908 case FFS_INTERFACE:
1909 /*
1910 * Interfaces are indexed from zero so if we
1911 * encountered interface "n" then there are at least
1912 * "n+1" interfaces.
1913 */
1914 if (*valuep >= helper->interfaces_count)
1915 helper->interfaces_count = *valuep + 1;
1916 break;
1917
1918 case FFS_STRING:
1919 /*
1920 * Strings are indexed from 1 (0 is magic ;) reserved
1921 * for languages list or some such)
1922 */
1923 if (*valuep > helper->ffs->strings_count)
1924 helper->ffs->strings_count = *valuep;
1925 break;
1926
1927 case FFS_ENDPOINT:
1928 d = (void *)desc;
1929 helper->eps_count++;
1930 if (helper->eps_count >= 15)
1931 return -EINVAL;
1932 /* Check if descriptors for any speed were already parsed */
1933 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
1934 helper->ffs->eps_addrmap[helper->eps_count] =
1935 d->bEndpointAddress;
1936 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
1937 d->bEndpointAddress)
1938 return -EINVAL;
1939 break;
1940 }
1941
1942 return 0;
1943 }
1944
__ffs_do_os_desc_header(enum ffs_os_desc_type * next_type,struct usb_os_desc_header * desc)1945 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
1946 struct usb_os_desc_header *desc)
1947 {
1948 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
1949 u16 w_index = le16_to_cpu(desc->wIndex);
1950
1951 if (bcd_version != 1) {
1952 pr_vdebug("unsupported os descriptors version: %d",
1953 bcd_version);
1954 return -EINVAL;
1955 }
1956 switch (w_index) {
1957 case 0x4:
1958 *next_type = FFS_OS_DESC_EXT_COMPAT;
1959 break;
1960 case 0x5:
1961 *next_type = FFS_OS_DESC_EXT_PROP;
1962 break;
1963 default:
1964 pr_vdebug("unsupported os descriptor type: %d", w_index);
1965 return -EINVAL;
1966 }
1967
1968 return sizeof(*desc);
1969 }
1970
1971 /*
1972 * Process all extended compatibility/extended property descriptors
1973 * of a feature descriptor
1974 */
ffs_do_single_os_desc(char * data,unsigned len,enum ffs_os_desc_type type,u16 feature_count,ffs_os_desc_callback entity,void * priv,struct usb_os_desc_header * h)1975 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
1976 enum ffs_os_desc_type type,
1977 u16 feature_count,
1978 ffs_os_desc_callback entity,
1979 void *priv,
1980 struct usb_os_desc_header *h)
1981 {
1982 int ret;
1983 const unsigned _len = len;
1984
1985 ENTER();
1986
1987 /* loop over all ext compat/ext prop descriptors */
1988 while (feature_count--) {
1989 ret = entity(type, h, data, len, priv);
1990 if (unlikely(ret < 0)) {
1991 pr_debug("bad OS descriptor, type: %d\n", type);
1992 return ret;
1993 }
1994 data += ret;
1995 len -= ret;
1996 }
1997 return _len - len;
1998 }
1999
2000 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
ffs_do_os_descs(unsigned count,char * data,unsigned len,ffs_os_desc_callback entity,void * priv)2001 static int __must_check ffs_do_os_descs(unsigned count,
2002 char *data, unsigned len,
2003 ffs_os_desc_callback entity, void *priv)
2004 {
2005 const unsigned _len = len;
2006 unsigned long num = 0;
2007
2008 ENTER();
2009
2010 for (num = 0; num < count; ++num) {
2011 int ret;
2012 enum ffs_os_desc_type type;
2013 u16 feature_count;
2014 struct usb_os_desc_header *desc = (void *)data;
2015
2016 if (len < sizeof(*desc))
2017 return -EINVAL;
2018
2019 /*
2020 * Record "descriptor" entity.
2021 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2022 * Move the data pointer to the beginning of extended
2023 * compatibilities proper or extended properties proper
2024 * portions of the data
2025 */
2026 if (le32_to_cpu(desc->dwLength) > len)
2027 return -EINVAL;
2028
2029 ret = __ffs_do_os_desc_header(&type, desc);
2030 if (unlikely(ret < 0)) {
2031 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2032 num, ret);
2033 return ret;
2034 }
2035 /*
2036 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2037 */
2038 feature_count = le16_to_cpu(desc->wCount);
2039 if (type == FFS_OS_DESC_EXT_COMPAT &&
2040 (feature_count > 255 || desc->Reserved))
2041 return -EINVAL;
2042 len -= ret;
2043 data += ret;
2044
2045 /*
2046 * Process all function/property descriptors
2047 * of this Feature Descriptor
2048 */
2049 ret = ffs_do_single_os_desc(data, len, type,
2050 feature_count, entity, priv, desc);
2051 if (unlikely(ret < 0)) {
2052 pr_debug("%s returns %d\n", __func__, ret);
2053 return ret;
2054 }
2055
2056 len -= ret;
2057 data += ret;
2058 }
2059 return _len - len;
2060 }
2061
2062 /**
2063 * Validate contents of the buffer from userspace related to OS descriptors.
2064 */
__ffs_data_do_os_desc(enum ffs_os_desc_type type,struct usb_os_desc_header * h,void * data,unsigned len,void * priv)2065 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2066 struct usb_os_desc_header *h, void *data,
2067 unsigned len, void *priv)
2068 {
2069 struct ffs_data *ffs = priv;
2070 u8 length;
2071
2072 ENTER();
2073
2074 switch (type) {
2075 case FFS_OS_DESC_EXT_COMPAT: {
2076 struct usb_ext_compat_desc *d = data;
2077 int i;
2078
2079 if (len < sizeof(*d) ||
2080 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2081 d->Reserved1)
2082 return -EINVAL;
2083 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2084 if (d->Reserved2[i])
2085 return -EINVAL;
2086
2087 length = sizeof(struct usb_ext_compat_desc);
2088 }
2089 break;
2090 case FFS_OS_DESC_EXT_PROP: {
2091 struct usb_ext_prop_desc *d = data;
2092 u32 type, pdl;
2093 u16 pnl;
2094
2095 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2096 return -EINVAL;
2097 length = le32_to_cpu(d->dwSize);
2098 type = le32_to_cpu(d->dwPropertyDataType);
2099 if (type < USB_EXT_PROP_UNICODE ||
2100 type > USB_EXT_PROP_UNICODE_MULTI) {
2101 pr_vdebug("unsupported os descriptor property type: %d",
2102 type);
2103 return -EINVAL;
2104 }
2105 pnl = le16_to_cpu(d->wPropertyNameLength);
2106 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2107 if (length != 14 + pnl + pdl) {
2108 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2109 length, pnl, pdl, type);
2110 return -EINVAL;
2111 }
2112 ++ffs->ms_os_descs_ext_prop_count;
2113 /* property name reported to the host as "WCHAR"s */
2114 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2115 ffs->ms_os_descs_ext_prop_data_len += pdl;
2116 }
2117 break;
2118 default:
2119 pr_vdebug("unknown descriptor: %d\n", type);
2120 return -EINVAL;
2121 }
2122 return length;
2123 }
2124
__ffs_data_got_descs(struct ffs_data * ffs,char * const _data,size_t len)2125 static int __ffs_data_got_descs(struct ffs_data *ffs,
2126 char *const _data, size_t len)
2127 {
2128 char *data = _data, *raw_descs;
2129 unsigned os_descs_count = 0, counts[3], flags;
2130 int ret = -EINVAL, i;
2131 struct ffs_desc_helper helper;
2132
2133 ENTER();
2134
2135 if (get_unaligned_le32(data + 4) != len)
2136 goto error;
2137
2138 switch (get_unaligned_le32(data)) {
2139 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2140 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2141 data += 8;
2142 len -= 8;
2143 break;
2144 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2145 flags = get_unaligned_le32(data + 8);
2146 ffs->user_flags = flags;
2147 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2148 FUNCTIONFS_HAS_HS_DESC |
2149 FUNCTIONFS_HAS_SS_DESC |
2150 FUNCTIONFS_HAS_MS_OS_DESC |
2151 FUNCTIONFS_VIRTUAL_ADDR)) {
2152 ret = -ENOSYS;
2153 goto error;
2154 }
2155 data += 12;
2156 len -= 12;
2157 break;
2158 default:
2159 goto error;
2160 }
2161
2162 /* Read fs_count, hs_count and ss_count (if present) */
2163 for (i = 0; i < 3; ++i) {
2164 if (!(flags & (1 << i))) {
2165 counts[i] = 0;
2166 } else if (len < 4) {
2167 goto error;
2168 } else {
2169 counts[i] = get_unaligned_le32(data);
2170 data += 4;
2171 len -= 4;
2172 }
2173 }
2174 if (flags & (1 << i)) {
2175 os_descs_count = get_unaligned_le32(data);
2176 data += 4;
2177 len -= 4;
2178 };
2179
2180 /* Read descriptors */
2181 raw_descs = data;
2182 helper.ffs = ffs;
2183 for (i = 0; i < 3; ++i) {
2184 if (!counts[i])
2185 continue;
2186 helper.interfaces_count = 0;
2187 helper.eps_count = 0;
2188 ret = ffs_do_descs(counts[i], data, len,
2189 __ffs_data_do_entity, &helper);
2190 if (ret < 0)
2191 goto error;
2192 if (!ffs->eps_count && !ffs->interfaces_count) {
2193 ffs->eps_count = helper.eps_count;
2194 ffs->interfaces_count = helper.interfaces_count;
2195 } else {
2196 if (ffs->eps_count != helper.eps_count) {
2197 ret = -EINVAL;
2198 goto error;
2199 }
2200 if (ffs->interfaces_count != helper.interfaces_count) {
2201 ret = -EINVAL;
2202 goto error;
2203 }
2204 }
2205 data += ret;
2206 len -= ret;
2207 }
2208 if (os_descs_count) {
2209 ret = ffs_do_os_descs(os_descs_count, data, len,
2210 __ffs_data_do_os_desc, ffs);
2211 if (ret < 0)
2212 goto error;
2213 data += ret;
2214 len -= ret;
2215 }
2216
2217 if (raw_descs == data || len) {
2218 ret = -EINVAL;
2219 goto error;
2220 }
2221
2222 ffs->raw_descs_data = _data;
2223 ffs->raw_descs = raw_descs;
2224 ffs->raw_descs_length = data - raw_descs;
2225 ffs->fs_descs_count = counts[0];
2226 ffs->hs_descs_count = counts[1];
2227 ffs->ss_descs_count = counts[2];
2228 ffs->ms_os_descs_count = os_descs_count;
2229
2230 return 0;
2231
2232 error:
2233 kfree(_data);
2234 return ret;
2235 }
2236
__ffs_data_got_strings(struct ffs_data * ffs,char * const _data,size_t len)2237 static int __ffs_data_got_strings(struct ffs_data *ffs,
2238 char *const _data, size_t len)
2239 {
2240 u32 str_count, needed_count, lang_count;
2241 struct usb_gadget_strings **stringtabs, *t;
2242 struct usb_string *strings, *s;
2243 const char *data = _data;
2244
2245 ENTER();
2246
2247 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2248 get_unaligned_le32(data + 4) != len))
2249 goto error;
2250 str_count = get_unaligned_le32(data + 8);
2251 lang_count = get_unaligned_le32(data + 12);
2252
2253 /* if one is zero the other must be zero */
2254 if (unlikely(!str_count != !lang_count))
2255 goto error;
2256
2257 /* Do we have at least as many strings as descriptors need? */
2258 needed_count = ffs->strings_count;
2259 if (unlikely(str_count < needed_count))
2260 goto error;
2261
2262 /*
2263 * If we don't need any strings just return and free all
2264 * memory.
2265 */
2266 if (!needed_count) {
2267 kfree(_data);
2268 return 0;
2269 }
2270
2271 /* Allocate everything in one chunk so there's less maintenance. */
2272 {
2273 unsigned i = 0;
2274 vla_group(d);
2275 vla_item(d, struct usb_gadget_strings *, stringtabs,
2276 lang_count + 1);
2277 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2278 vla_item(d, struct usb_string, strings,
2279 lang_count*(needed_count+1));
2280
2281 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2282
2283 if (unlikely(!vlabuf)) {
2284 kfree(_data);
2285 return -ENOMEM;
2286 }
2287
2288 /* Initialize the VLA pointers */
2289 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2290 t = vla_ptr(vlabuf, d, stringtab);
2291 i = lang_count;
2292 do {
2293 *stringtabs++ = t++;
2294 } while (--i);
2295 *stringtabs = NULL;
2296
2297 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2298 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2299 t = vla_ptr(vlabuf, d, stringtab);
2300 s = vla_ptr(vlabuf, d, strings);
2301 strings = s;
2302 }
2303
2304 /* For each language */
2305 data += 16;
2306 len -= 16;
2307
2308 do { /* lang_count > 0 so we can use do-while */
2309 unsigned needed = needed_count;
2310
2311 if (unlikely(len < 3))
2312 goto error_free;
2313 t->language = get_unaligned_le16(data);
2314 t->strings = s;
2315 ++t;
2316
2317 data += 2;
2318 len -= 2;
2319
2320 /* For each string */
2321 do { /* str_count > 0 so we can use do-while */
2322 size_t length = strnlen(data, len);
2323
2324 if (unlikely(length == len))
2325 goto error_free;
2326
2327 /*
2328 * User may provide more strings then we need,
2329 * if that's the case we simply ignore the
2330 * rest
2331 */
2332 if (likely(needed)) {
2333 /*
2334 * s->id will be set while adding
2335 * function to configuration so for
2336 * now just leave garbage here.
2337 */
2338 s->s = data;
2339 --needed;
2340 ++s;
2341 }
2342
2343 data += length + 1;
2344 len -= length + 1;
2345 } while (--str_count);
2346
2347 s->id = 0; /* terminator */
2348 s->s = NULL;
2349 ++s;
2350
2351 } while (--lang_count);
2352
2353 /* Some garbage left? */
2354 if (unlikely(len))
2355 goto error_free;
2356
2357 /* Done! */
2358 ffs->stringtabs = stringtabs;
2359 ffs->raw_strings = _data;
2360
2361 return 0;
2362
2363 error_free:
2364 kfree(stringtabs);
2365 error:
2366 kfree(_data);
2367 return -EINVAL;
2368 }
2369
2370
2371 /* Events handling and management *******************************************/
2372
__ffs_event_add(struct ffs_data * ffs,enum usb_functionfs_event_type type)2373 static void __ffs_event_add(struct ffs_data *ffs,
2374 enum usb_functionfs_event_type type)
2375 {
2376 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2377 int neg = 0;
2378
2379 /*
2380 * Abort any unhandled setup
2381 *
2382 * We do not need to worry about some cmpxchg() changing value
2383 * of ffs->setup_state without holding the lock because when
2384 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2385 * the source does nothing.
2386 */
2387 if (ffs->setup_state == FFS_SETUP_PENDING)
2388 ffs->setup_state = FFS_SETUP_CANCELLED;
2389
2390 switch (type) {
2391 case FUNCTIONFS_RESUME:
2392 rem_type2 = FUNCTIONFS_SUSPEND;
2393 /* FALL THROUGH */
2394 case FUNCTIONFS_SUSPEND:
2395 case FUNCTIONFS_SETUP:
2396 rem_type1 = type;
2397 /* Discard all similar events */
2398 break;
2399
2400 case FUNCTIONFS_BIND:
2401 case FUNCTIONFS_UNBIND:
2402 case FUNCTIONFS_DISABLE:
2403 case FUNCTIONFS_ENABLE:
2404 /* Discard everything other then power management. */
2405 rem_type1 = FUNCTIONFS_SUSPEND;
2406 rem_type2 = FUNCTIONFS_RESUME;
2407 neg = 1;
2408 break;
2409
2410 default:
2411 WARN(1, "%d: unknown event, this should not happen\n", type);
2412 return;
2413 }
2414
2415 {
2416 u8 *ev = ffs->ev.types, *out = ev;
2417 unsigned n = ffs->ev.count;
2418 for (; n; --n, ++ev)
2419 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2420 *out++ = *ev;
2421 else
2422 pr_vdebug("purging event %d\n", *ev);
2423 ffs->ev.count = out - ffs->ev.types;
2424 }
2425
2426 pr_vdebug("adding event %d\n", type);
2427 ffs->ev.types[ffs->ev.count++] = type;
2428 wake_up_locked(&ffs->ev.waitq);
2429 }
2430
ffs_event_add(struct ffs_data * ffs,enum usb_functionfs_event_type type)2431 static void ffs_event_add(struct ffs_data *ffs,
2432 enum usb_functionfs_event_type type)
2433 {
2434 unsigned long flags;
2435 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2436 __ffs_event_add(ffs, type);
2437 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2438 }
2439
2440 /* Bind/unbind USB function hooks *******************************************/
2441
ffs_ep_addr2idx(struct ffs_data * ffs,u8 endpoint_address)2442 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2443 {
2444 int i;
2445
2446 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2447 if (ffs->eps_addrmap[i] == endpoint_address)
2448 return i;
2449 return -ENOENT;
2450 }
2451
__ffs_func_bind_do_descs(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)2452 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2453 struct usb_descriptor_header *desc,
2454 void *priv)
2455 {
2456 struct usb_endpoint_descriptor *ds = (void *)desc;
2457 struct ffs_function *func = priv;
2458 struct ffs_ep *ffs_ep;
2459 unsigned ep_desc_id;
2460 int idx;
2461 static const char *speed_names[] = { "full", "high", "super" };
2462
2463 if (type != FFS_DESCRIPTOR)
2464 return 0;
2465
2466 /*
2467 * If ss_descriptors is not NULL, we are reading super speed
2468 * descriptors; if hs_descriptors is not NULL, we are reading high
2469 * speed descriptors; otherwise, we are reading full speed
2470 * descriptors.
2471 */
2472 if (func->function.ss_descriptors) {
2473 ep_desc_id = 2;
2474 func->function.ss_descriptors[(long)valuep] = desc;
2475 } else if (func->function.hs_descriptors) {
2476 ep_desc_id = 1;
2477 func->function.hs_descriptors[(long)valuep] = desc;
2478 } else {
2479 ep_desc_id = 0;
2480 func->function.fs_descriptors[(long)valuep] = desc;
2481 }
2482
2483 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2484 return 0;
2485
2486 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2487 if (idx < 0)
2488 return idx;
2489
2490 ffs_ep = func->eps + idx;
2491
2492 if (unlikely(ffs_ep->descs[ep_desc_id])) {
2493 pr_err("two %sspeed descriptors for EP %d\n",
2494 speed_names[ep_desc_id],
2495 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2496 return -EINVAL;
2497 }
2498 ffs_ep->descs[ep_desc_id] = ds;
2499
2500 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2501 if (ffs_ep->ep) {
2502 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2503 if (!ds->wMaxPacketSize)
2504 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2505 } else {
2506 struct usb_request *req;
2507 struct usb_ep *ep;
2508 u8 bEndpointAddress;
2509
2510 /*
2511 * We back up bEndpointAddress because autoconfig overwrites
2512 * it with physical endpoint address.
2513 */
2514 bEndpointAddress = ds->bEndpointAddress;
2515 pr_vdebug("autoconfig\n");
2516 ep = usb_ep_autoconfig(func->gadget, ds);
2517 if (unlikely(!ep))
2518 return -ENOTSUPP;
2519 ep->driver_data = func->eps + idx;
2520
2521 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2522 if (unlikely(!req))
2523 return -ENOMEM;
2524
2525 ffs_ep->ep = ep;
2526 ffs_ep->req = req;
2527 func->eps_revmap[ds->bEndpointAddress &
2528 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2529 /*
2530 * If we use virtual address mapping, we restore
2531 * original bEndpointAddress value.
2532 */
2533 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2534 ds->bEndpointAddress = bEndpointAddress;
2535 }
2536 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2537
2538 return 0;
2539 }
2540
__ffs_func_bind_do_nums(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)2541 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2542 struct usb_descriptor_header *desc,
2543 void *priv)
2544 {
2545 struct ffs_function *func = priv;
2546 unsigned idx;
2547 u8 newValue;
2548
2549 switch (type) {
2550 default:
2551 case FFS_DESCRIPTOR:
2552 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2553 return 0;
2554
2555 case FFS_INTERFACE:
2556 idx = *valuep;
2557 if (func->interfaces_nums[idx] < 0) {
2558 int id = usb_interface_id(func->conf, &func->function);
2559 if (unlikely(id < 0))
2560 return id;
2561 func->interfaces_nums[idx] = id;
2562 }
2563 newValue = func->interfaces_nums[idx];
2564 break;
2565
2566 case FFS_STRING:
2567 /* String' IDs are allocated when fsf_data is bound to cdev */
2568 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2569 break;
2570
2571 case FFS_ENDPOINT:
2572 /*
2573 * USB_DT_ENDPOINT are handled in
2574 * __ffs_func_bind_do_descs().
2575 */
2576 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2577 return 0;
2578
2579 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2580 if (unlikely(!func->eps[idx].ep))
2581 return -EINVAL;
2582
2583 {
2584 struct usb_endpoint_descriptor **descs;
2585 descs = func->eps[idx].descs;
2586 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2587 }
2588 break;
2589 }
2590
2591 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2592 *valuep = newValue;
2593 return 0;
2594 }
2595
__ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,struct usb_os_desc_header * h,void * data,unsigned len,void * priv)2596 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2597 struct usb_os_desc_header *h, void *data,
2598 unsigned len, void *priv)
2599 {
2600 struct ffs_function *func = priv;
2601 u8 length = 0;
2602
2603 switch (type) {
2604 case FFS_OS_DESC_EXT_COMPAT: {
2605 struct usb_ext_compat_desc *desc = data;
2606 struct usb_os_desc_table *t;
2607
2608 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2609 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2610 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2611 ARRAY_SIZE(desc->CompatibleID) +
2612 ARRAY_SIZE(desc->SubCompatibleID));
2613 length = sizeof(*desc);
2614 }
2615 break;
2616 case FFS_OS_DESC_EXT_PROP: {
2617 struct usb_ext_prop_desc *desc = data;
2618 struct usb_os_desc_table *t;
2619 struct usb_os_desc_ext_prop *ext_prop;
2620 char *ext_prop_name;
2621 char *ext_prop_data;
2622
2623 t = &func->function.os_desc_table[h->interface];
2624 t->if_id = func->interfaces_nums[h->interface];
2625
2626 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2627 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2628
2629 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2630 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2631 ext_prop->data_len = le32_to_cpu(*(u32 *)
2632 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2633 length = ext_prop->name_len + ext_prop->data_len + 14;
2634
2635 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2636 func->ffs->ms_os_descs_ext_prop_name_avail +=
2637 ext_prop->name_len;
2638
2639 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2640 func->ffs->ms_os_descs_ext_prop_data_avail +=
2641 ext_prop->data_len;
2642 memcpy(ext_prop_data,
2643 usb_ext_prop_data_ptr(data, ext_prop->name_len),
2644 ext_prop->data_len);
2645 /* unicode data reported to the host as "WCHAR"s */
2646 switch (ext_prop->type) {
2647 case USB_EXT_PROP_UNICODE:
2648 case USB_EXT_PROP_UNICODE_ENV:
2649 case USB_EXT_PROP_UNICODE_LINK:
2650 case USB_EXT_PROP_UNICODE_MULTI:
2651 ext_prop->data_len *= 2;
2652 break;
2653 }
2654 ext_prop->data = ext_prop_data;
2655
2656 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2657 ext_prop->name_len);
2658 /* property name reported to the host as "WCHAR"s */
2659 ext_prop->name_len *= 2;
2660 ext_prop->name = ext_prop_name;
2661
2662 t->os_desc->ext_prop_len +=
2663 ext_prop->name_len + ext_prop->data_len + 14;
2664 ++t->os_desc->ext_prop_count;
2665 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2666 }
2667 break;
2668 default:
2669 pr_vdebug("unknown descriptor: %d\n", type);
2670 }
2671
2672 return length;
2673 }
2674
ffs_do_functionfs_bind(struct usb_function * f,struct usb_configuration * c)2675 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2676 struct usb_configuration *c)
2677 {
2678 struct ffs_function *func = ffs_func_from_usb(f);
2679 struct f_fs_opts *ffs_opts =
2680 container_of(f->fi, struct f_fs_opts, func_inst);
2681 int ret;
2682
2683 ENTER();
2684
2685 /*
2686 * Legacy gadget triggers binding in functionfs_ready_callback,
2687 * which already uses locking; taking the same lock here would
2688 * cause a deadlock.
2689 *
2690 * Configfs-enabled gadgets however do need ffs_dev_lock.
2691 */
2692 if (!ffs_opts->no_configfs)
2693 ffs_dev_lock();
2694 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2695 func->ffs = ffs_opts->dev->ffs_data;
2696 if (!ffs_opts->no_configfs)
2697 ffs_dev_unlock();
2698 if (ret)
2699 return ERR_PTR(ret);
2700
2701 func->conf = c;
2702 func->gadget = c->cdev->gadget;
2703
2704 /*
2705 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2706 * configurations are bound in sequence with list_for_each_entry,
2707 * in each configuration its functions are bound in sequence
2708 * with list_for_each_entry, so we assume no race condition
2709 * with regard to ffs_opts->bound access
2710 */
2711 if (!ffs_opts->refcnt) {
2712 ret = functionfs_bind(func->ffs, c->cdev);
2713 if (ret)
2714 return ERR_PTR(ret);
2715 }
2716 ffs_opts->refcnt++;
2717 func->function.strings = func->ffs->stringtabs;
2718
2719 return ffs_opts;
2720 }
2721
_ffs_func_bind(struct usb_configuration * c,struct usb_function * f)2722 static int _ffs_func_bind(struct usb_configuration *c,
2723 struct usb_function *f)
2724 {
2725 struct ffs_function *func = ffs_func_from_usb(f);
2726 struct ffs_data *ffs = func->ffs;
2727
2728 const int full = !!func->ffs->fs_descs_count;
2729 const int high = gadget_is_dualspeed(func->gadget) &&
2730 func->ffs->hs_descs_count;
2731 const int super = gadget_is_superspeed(func->gadget) &&
2732 func->ffs->ss_descs_count;
2733
2734 int fs_len, hs_len, ss_len, ret, i;
2735
2736 /* Make it a single chunk, less management later on */
2737 vla_group(d);
2738 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2739 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2740 full ? ffs->fs_descs_count + 1 : 0);
2741 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2742 high ? ffs->hs_descs_count + 1 : 0);
2743 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2744 super ? ffs->ss_descs_count + 1 : 0);
2745 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2746 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2747 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2748 vla_item_with_sz(d, char[16], ext_compat,
2749 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2750 vla_item_with_sz(d, struct usb_os_desc, os_desc,
2751 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2752 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2753 ffs->ms_os_descs_ext_prop_count);
2754 vla_item_with_sz(d, char, ext_prop_name,
2755 ffs->ms_os_descs_ext_prop_name_len);
2756 vla_item_with_sz(d, char, ext_prop_data,
2757 ffs->ms_os_descs_ext_prop_data_len);
2758 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2759 char *vlabuf;
2760
2761 ENTER();
2762
2763 /* Has descriptors only for speeds gadget does not support */
2764 if (unlikely(!(full | high | super)))
2765 return -ENOTSUPP;
2766
2767 /* Allocate a single chunk, less management later on */
2768 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2769 if (unlikely(!vlabuf))
2770 return -ENOMEM;
2771
2772 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2773 ffs->ms_os_descs_ext_prop_name_avail =
2774 vla_ptr(vlabuf, d, ext_prop_name);
2775 ffs->ms_os_descs_ext_prop_data_avail =
2776 vla_ptr(vlabuf, d, ext_prop_data);
2777
2778 /* Copy descriptors */
2779 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
2780 ffs->raw_descs_length);
2781
2782 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2783 for (ret = ffs->eps_count; ret; --ret) {
2784 struct ffs_ep *ptr;
2785
2786 ptr = vla_ptr(vlabuf, d, eps);
2787 ptr[ret].num = -1;
2788 }
2789
2790 /* Save pointers
2791 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2792 */
2793 func->eps = vla_ptr(vlabuf, d, eps);
2794 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
2795
2796 /*
2797 * Go through all the endpoint descriptors and allocate
2798 * endpoints first, so that later we can rewrite the endpoint
2799 * numbers without worrying that it may be described later on.
2800 */
2801 if (likely(full)) {
2802 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
2803 fs_len = ffs_do_descs(ffs->fs_descs_count,
2804 vla_ptr(vlabuf, d, raw_descs),
2805 d_raw_descs__sz,
2806 __ffs_func_bind_do_descs, func);
2807 if (unlikely(fs_len < 0)) {
2808 ret = fs_len;
2809 goto error;
2810 }
2811 } else {
2812 fs_len = 0;
2813 }
2814
2815 if (likely(high)) {
2816 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
2817 hs_len = ffs_do_descs(ffs->hs_descs_count,
2818 vla_ptr(vlabuf, d, raw_descs) + fs_len,
2819 d_raw_descs__sz - fs_len,
2820 __ffs_func_bind_do_descs, func);
2821 if (unlikely(hs_len < 0)) {
2822 ret = hs_len;
2823 goto error;
2824 }
2825 } else {
2826 hs_len = 0;
2827 }
2828
2829 if (likely(super)) {
2830 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
2831 ss_len = ffs_do_descs(ffs->ss_descs_count,
2832 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
2833 d_raw_descs__sz - fs_len - hs_len,
2834 __ffs_func_bind_do_descs, func);
2835 if (unlikely(ss_len < 0)) {
2836 ret = ss_len;
2837 goto error;
2838 }
2839 } else {
2840 ss_len = 0;
2841 }
2842
2843 /*
2844 * Now handle interface numbers allocation and interface and
2845 * endpoint numbers rewriting. We can do that in one go
2846 * now.
2847 */
2848 ret = ffs_do_descs(ffs->fs_descs_count +
2849 (high ? ffs->hs_descs_count : 0) +
2850 (super ? ffs->ss_descs_count : 0),
2851 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
2852 __ffs_func_bind_do_nums, func);
2853 if (unlikely(ret < 0))
2854 goto error;
2855
2856 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2857 if (c->cdev->use_os_string)
2858 for (i = 0; i < ffs->interfaces_count; ++i) {
2859 struct usb_os_desc *desc;
2860
2861 desc = func->function.os_desc_table[i].os_desc =
2862 vla_ptr(vlabuf, d, os_desc) +
2863 i * sizeof(struct usb_os_desc);
2864 desc->ext_compat_id =
2865 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2866 INIT_LIST_HEAD(&desc->ext_prop);
2867 }
2868 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2869 vla_ptr(vlabuf, d, raw_descs) +
2870 fs_len + hs_len + ss_len,
2871 d_raw_descs__sz - fs_len - hs_len - ss_len,
2872 __ffs_func_bind_do_os_desc, func);
2873 if (unlikely(ret < 0))
2874 goto error;
2875 func->function.os_desc_n =
2876 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2877
2878 /* And we're done */
2879 ffs_event_add(ffs, FUNCTIONFS_BIND);
2880 return 0;
2881
2882 error:
2883 /* XXX Do we need to release all claimed endpoints here? */
2884 return ret;
2885 }
2886
ffs_func_bind(struct usb_configuration * c,struct usb_function * f)2887 static int ffs_func_bind(struct usb_configuration *c,
2888 struct usb_function *f)
2889 {
2890 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
2891
2892 if (IS_ERR(ffs_opts))
2893 return PTR_ERR(ffs_opts);
2894
2895 return _ffs_func_bind(c, f);
2896 }
2897
2898
2899 /* Other USB function hooks *************************************************/
2900
ffs_func_set_alt(struct usb_function * f,unsigned interface,unsigned alt)2901 static int ffs_func_set_alt(struct usb_function *f,
2902 unsigned interface, unsigned alt)
2903 {
2904 struct ffs_function *func = ffs_func_from_usb(f);
2905 struct ffs_data *ffs = func->ffs;
2906 int ret = 0, intf;
2907
2908 if (alt != (unsigned)-1) {
2909 intf = ffs_func_revmap_intf(func, interface);
2910 if (unlikely(intf < 0))
2911 return intf;
2912 }
2913
2914 if (ffs->func)
2915 ffs_func_eps_disable(ffs->func);
2916
2917 if (ffs->state != FFS_ACTIVE)
2918 return -ENODEV;
2919
2920 if (alt == (unsigned)-1) {
2921 ffs->func = NULL;
2922 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2923 return 0;
2924 }
2925
2926 ffs->func = func;
2927 ret = ffs_func_eps_enable(func);
2928 if (likely(ret >= 0))
2929 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2930 return ret;
2931 }
2932
ffs_func_disable(struct usb_function * f)2933 static void ffs_func_disable(struct usb_function *f)
2934 {
2935 ffs_func_set_alt(f, 0, (unsigned)-1);
2936 }
2937
ffs_func_setup(struct usb_function * f,const struct usb_ctrlrequest * creq)2938 static int ffs_func_setup(struct usb_function *f,
2939 const struct usb_ctrlrequest *creq)
2940 {
2941 struct ffs_function *func = ffs_func_from_usb(f);
2942 struct ffs_data *ffs = func->ffs;
2943 unsigned long flags;
2944 int ret;
2945
2946 ENTER();
2947
2948 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2949 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
2950 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
2951 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
2952 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
2953
2954 /*
2955 * Most requests directed to interface go through here
2956 * (notable exceptions are set/get interface) so we need to
2957 * handle them. All other either handled by composite or
2958 * passed to usb_configuration->setup() (if one is set). No
2959 * matter, we will handle requests directed to endpoint here
2960 * as well (as it's straightforward) but what to do with any
2961 * other request?
2962 */
2963 if (ffs->state != FFS_ACTIVE)
2964 return -ENODEV;
2965
2966 switch (creq->bRequestType & USB_RECIP_MASK) {
2967 case USB_RECIP_INTERFACE:
2968 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
2969 if (unlikely(ret < 0))
2970 return ret;
2971 break;
2972
2973 case USB_RECIP_ENDPOINT:
2974 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
2975 if (unlikely(ret < 0))
2976 return ret;
2977 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2978 ret = func->ffs->eps_addrmap[ret];
2979 break;
2980
2981 default:
2982 return -EOPNOTSUPP;
2983 }
2984
2985 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2986 ffs->ev.setup = *creq;
2987 ffs->ev.setup.wIndex = cpu_to_le16(ret);
2988 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
2989 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2990
2991 return 0;
2992 }
2993
ffs_func_suspend(struct usb_function * f)2994 static void ffs_func_suspend(struct usb_function *f)
2995 {
2996 ENTER();
2997 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
2998 }
2999
ffs_func_resume(struct usb_function * f)3000 static void ffs_func_resume(struct usb_function *f)
3001 {
3002 ENTER();
3003 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3004 }
3005
3006
3007 /* Endpoint and interface numbers reverse mapping ***************************/
3008
ffs_func_revmap_ep(struct ffs_function * func,u8 num)3009 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3010 {
3011 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3012 return num ? num : -EDOM;
3013 }
3014
ffs_func_revmap_intf(struct ffs_function * func,u8 intf)3015 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3016 {
3017 short *nums = func->interfaces_nums;
3018 unsigned count = func->ffs->interfaces_count;
3019
3020 for (; count; --count, ++nums) {
3021 if (*nums >= 0 && *nums == intf)
3022 return nums - func->interfaces_nums;
3023 }
3024
3025 return -EDOM;
3026 }
3027
3028
3029 /* Devices management *******************************************************/
3030
3031 static LIST_HEAD(ffs_devices);
3032
_ffs_do_find_dev(const char * name)3033 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3034 {
3035 struct ffs_dev *dev;
3036
3037 list_for_each_entry(dev, &ffs_devices, entry) {
3038 if (!dev->name || !name)
3039 continue;
3040 if (strcmp(dev->name, name) == 0)
3041 return dev;
3042 }
3043
3044 return NULL;
3045 }
3046
3047 /*
3048 * ffs_lock must be taken by the caller of this function
3049 */
_ffs_get_single_dev(void)3050 static struct ffs_dev *_ffs_get_single_dev(void)
3051 {
3052 struct ffs_dev *dev;
3053
3054 if (list_is_singular(&ffs_devices)) {
3055 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3056 if (dev->single)
3057 return dev;
3058 }
3059
3060 return NULL;
3061 }
3062
3063 /*
3064 * ffs_lock must be taken by the caller of this function
3065 */
_ffs_find_dev(const char * name)3066 static struct ffs_dev *_ffs_find_dev(const char *name)
3067 {
3068 struct ffs_dev *dev;
3069
3070 dev = _ffs_get_single_dev();
3071 if (dev)
3072 return dev;
3073
3074 return _ffs_do_find_dev(name);
3075 }
3076
3077 /* Configfs support *********************************************************/
3078
to_ffs_opts(struct config_item * item)3079 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3080 {
3081 return container_of(to_config_group(item), struct f_fs_opts,
3082 func_inst.group);
3083 }
3084
ffs_attr_release(struct config_item * item)3085 static void ffs_attr_release(struct config_item *item)
3086 {
3087 struct f_fs_opts *opts = to_ffs_opts(item);
3088
3089 usb_put_function_instance(&opts->func_inst);
3090 }
3091
3092 static struct configfs_item_operations ffs_item_ops = {
3093 .release = ffs_attr_release,
3094 };
3095
3096 static struct config_item_type ffs_func_type = {
3097 .ct_item_ops = &ffs_item_ops,
3098 .ct_owner = THIS_MODULE,
3099 };
3100
3101
3102 /* Function registration interface ******************************************/
3103
ffs_free_inst(struct usb_function_instance * f)3104 static void ffs_free_inst(struct usb_function_instance *f)
3105 {
3106 struct f_fs_opts *opts;
3107
3108 opts = to_f_fs_opts(f);
3109 ffs_dev_lock();
3110 _ffs_free_dev(opts->dev);
3111 ffs_dev_unlock();
3112 kfree(opts);
3113 }
3114
3115 #define MAX_INST_NAME_LEN 40
3116
ffs_set_inst_name(struct usb_function_instance * fi,const char * name)3117 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3118 {
3119 struct f_fs_opts *opts;
3120 char *ptr;
3121 const char *tmp;
3122 int name_len, ret;
3123
3124 name_len = strlen(name) + 1;
3125 if (name_len > MAX_INST_NAME_LEN)
3126 return -ENAMETOOLONG;
3127
3128 ptr = kstrndup(name, name_len, GFP_KERNEL);
3129 if (!ptr)
3130 return -ENOMEM;
3131
3132 opts = to_f_fs_opts(fi);
3133 tmp = NULL;
3134
3135 ffs_dev_lock();
3136
3137 tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
3138 ret = _ffs_name_dev(opts->dev, ptr);
3139 if (ret) {
3140 kfree(ptr);
3141 ffs_dev_unlock();
3142 return ret;
3143 }
3144 opts->dev->name_allocated = true;
3145
3146 ffs_dev_unlock();
3147
3148 kfree(tmp);
3149
3150 return 0;
3151 }
3152
ffs_alloc_inst(void)3153 static struct usb_function_instance *ffs_alloc_inst(void)
3154 {
3155 struct f_fs_opts *opts;
3156 struct ffs_dev *dev;
3157
3158 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3159 if (!opts)
3160 return ERR_PTR(-ENOMEM);
3161
3162 opts->func_inst.set_inst_name = ffs_set_inst_name;
3163 opts->func_inst.free_func_inst = ffs_free_inst;
3164 ffs_dev_lock();
3165 dev = _ffs_alloc_dev();
3166 ffs_dev_unlock();
3167 if (IS_ERR(dev)) {
3168 kfree(opts);
3169 return ERR_CAST(dev);
3170 }
3171 opts->dev = dev;
3172 dev->opts = opts;
3173
3174 config_group_init_type_name(&opts->func_inst.group, "",
3175 &ffs_func_type);
3176 return &opts->func_inst;
3177 }
3178
ffs_free(struct usb_function * f)3179 static void ffs_free(struct usb_function *f)
3180 {
3181 kfree(ffs_func_from_usb(f));
3182 }
3183
ffs_func_unbind(struct usb_configuration * c,struct usb_function * f)3184 static void ffs_func_unbind(struct usb_configuration *c,
3185 struct usb_function *f)
3186 {
3187 struct ffs_function *func = ffs_func_from_usb(f);
3188 struct ffs_data *ffs = func->ffs;
3189 struct f_fs_opts *opts =
3190 container_of(f->fi, struct f_fs_opts, func_inst);
3191 struct ffs_ep *ep = func->eps;
3192 unsigned count = ffs->eps_count;
3193 unsigned long flags;
3194
3195 ENTER();
3196 if (ffs->func == func) {
3197 ffs_func_eps_disable(func);
3198 ffs->func = NULL;
3199 }
3200
3201 if (!--opts->refcnt)
3202 functionfs_unbind(ffs);
3203
3204 /* cleanup after autoconfig */
3205 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3206 do {
3207 if (ep->ep && ep->req)
3208 usb_ep_free_request(ep->ep, ep->req);
3209 ep->req = NULL;
3210 ++ep;
3211 } while (--count);
3212 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3213 kfree(func->eps);
3214 func->eps = NULL;
3215 /*
3216 * eps, descriptors and interfaces_nums are allocated in the
3217 * same chunk so only one free is required.
3218 */
3219 func->function.fs_descriptors = NULL;
3220 func->function.hs_descriptors = NULL;
3221 func->function.ss_descriptors = NULL;
3222 func->interfaces_nums = NULL;
3223
3224 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3225 }
3226
ffs_alloc(struct usb_function_instance * fi)3227 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3228 {
3229 struct ffs_function *func;
3230
3231 ENTER();
3232
3233 func = kzalloc(sizeof(*func), GFP_KERNEL);
3234 if (unlikely(!func))
3235 return ERR_PTR(-ENOMEM);
3236
3237 func->function.name = "Function FS Gadget";
3238
3239 func->function.bind = ffs_func_bind;
3240 func->function.unbind = ffs_func_unbind;
3241 func->function.set_alt = ffs_func_set_alt;
3242 func->function.disable = ffs_func_disable;
3243 func->function.setup = ffs_func_setup;
3244 func->function.suspend = ffs_func_suspend;
3245 func->function.resume = ffs_func_resume;
3246 func->function.free_func = ffs_free;
3247
3248 return &func->function;
3249 }
3250
3251 /*
3252 * ffs_lock must be taken by the caller of this function
3253 */
_ffs_alloc_dev(void)3254 static struct ffs_dev *_ffs_alloc_dev(void)
3255 {
3256 struct ffs_dev *dev;
3257 int ret;
3258
3259 if (_ffs_get_single_dev())
3260 return ERR_PTR(-EBUSY);
3261
3262 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3263 if (!dev)
3264 return ERR_PTR(-ENOMEM);
3265
3266 if (list_empty(&ffs_devices)) {
3267 ret = functionfs_init();
3268 if (ret) {
3269 kfree(dev);
3270 return ERR_PTR(ret);
3271 }
3272 }
3273
3274 list_add(&dev->entry, &ffs_devices);
3275
3276 return dev;
3277 }
3278
3279 /*
3280 * ffs_lock must be taken by the caller of this function
3281 * The caller is responsible for "name" being available whenever f_fs needs it
3282 */
_ffs_name_dev(struct ffs_dev * dev,const char * name)3283 static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
3284 {
3285 struct ffs_dev *existing;
3286
3287 existing = _ffs_do_find_dev(name);
3288 if (existing)
3289 return -EBUSY;
3290
3291 dev->name = name;
3292
3293 return 0;
3294 }
3295
3296 /*
3297 * The caller is responsible for "name" being available whenever f_fs needs it
3298 */
ffs_name_dev(struct ffs_dev * dev,const char * name)3299 int ffs_name_dev(struct ffs_dev *dev, const char *name)
3300 {
3301 int ret;
3302
3303 ffs_dev_lock();
3304 ret = _ffs_name_dev(dev, name);
3305 ffs_dev_unlock();
3306
3307 return ret;
3308 }
3309 EXPORT_SYMBOL_GPL(ffs_name_dev);
3310
ffs_single_dev(struct ffs_dev * dev)3311 int ffs_single_dev(struct ffs_dev *dev)
3312 {
3313 int ret;
3314
3315 ret = 0;
3316 ffs_dev_lock();
3317
3318 if (!list_is_singular(&ffs_devices))
3319 ret = -EBUSY;
3320 else
3321 dev->single = true;
3322
3323 ffs_dev_unlock();
3324 return ret;
3325 }
3326 EXPORT_SYMBOL_GPL(ffs_single_dev);
3327
3328 /*
3329 * ffs_lock must be taken by the caller of this function
3330 */
_ffs_free_dev(struct ffs_dev * dev)3331 static void _ffs_free_dev(struct ffs_dev *dev)
3332 {
3333 list_del(&dev->entry);
3334 if (dev->name_allocated)
3335 kfree(dev->name);
3336 kfree(dev);
3337 if (list_empty(&ffs_devices))
3338 functionfs_cleanup();
3339 }
3340
ffs_acquire_dev(const char * dev_name)3341 static void *ffs_acquire_dev(const char *dev_name)
3342 {
3343 struct ffs_dev *ffs_dev;
3344
3345 ENTER();
3346 ffs_dev_lock();
3347
3348 ffs_dev = _ffs_find_dev(dev_name);
3349 if (!ffs_dev)
3350 ffs_dev = ERR_PTR(-ENOENT);
3351 else if (ffs_dev->mounted)
3352 ffs_dev = ERR_PTR(-EBUSY);
3353 else if (ffs_dev->ffs_acquire_dev_callback &&
3354 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3355 ffs_dev = ERR_PTR(-ENOENT);
3356 else
3357 ffs_dev->mounted = true;
3358
3359 ffs_dev_unlock();
3360 return ffs_dev;
3361 }
3362
ffs_release_dev(struct ffs_data * ffs_data)3363 static void ffs_release_dev(struct ffs_data *ffs_data)
3364 {
3365 struct ffs_dev *ffs_dev;
3366
3367 ENTER();
3368 ffs_dev_lock();
3369
3370 ffs_dev = ffs_data->private_data;
3371 if (ffs_dev) {
3372 ffs_dev->mounted = false;
3373
3374 if (ffs_dev->ffs_release_dev_callback)
3375 ffs_dev->ffs_release_dev_callback(ffs_dev);
3376 }
3377
3378 ffs_dev_unlock();
3379 }
3380
ffs_ready(struct ffs_data * ffs)3381 static int ffs_ready(struct ffs_data *ffs)
3382 {
3383 struct ffs_dev *ffs_obj;
3384 int ret = 0;
3385
3386 ENTER();
3387 ffs_dev_lock();
3388
3389 ffs_obj = ffs->private_data;
3390 if (!ffs_obj) {
3391 ret = -EINVAL;
3392 goto done;
3393 }
3394 if (WARN_ON(ffs_obj->desc_ready)) {
3395 ret = -EBUSY;
3396 goto done;
3397 }
3398
3399 ffs_obj->desc_ready = true;
3400 ffs_obj->ffs_data = ffs;
3401
3402 if (ffs_obj->ffs_ready_callback)
3403 ret = ffs_obj->ffs_ready_callback(ffs);
3404
3405 done:
3406 ffs_dev_unlock();
3407 return ret;
3408 }
3409
ffs_closed(struct ffs_data * ffs)3410 static void ffs_closed(struct ffs_data *ffs)
3411 {
3412 struct ffs_dev *ffs_obj;
3413 struct f_fs_opts *opts;
3414 struct config_item *ci;
3415
3416 ENTER();
3417 ffs_dev_lock();
3418
3419 ffs_obj = ffs->private_data;
3420 if (!ffs_obj)
3421 goto done;
3422
3423 ffs_obj->desc_ready = false;
3424
3425 if (ffs_obj->ffs_closed_callback)
3426 ffs_obj->ffs_closed_callback(ffs);
3427
3428 if (ffs_obj->opts)
3429 opts = ffs_obj->opts;
3430 else
3431 goto done;
3432
3433 if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
3434 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
3435 goto done;
3436
3437 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3438 ffs_dev_unlock();
3439
3440 if (test_bit(FFS_FL_BOUND, &ffs->flags))
3441 unregister_gadget_item(ci);
3442 return;
3443 done:
3444 ffs_dev_unlock();
3445 }
3446
3447 /* Misc helper functions ****************************************************/
3448
ffs_mutex_lock(struct mutex * mutex,unsigned nonblock)3449 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3450 {
3451 return nonblock
3452 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3453 : mutex_lock_interruptible(mutex);
3454 }
3455
ffs_prepare_buffer(const char __user * buf,size_t len)3456 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3457 {
3458 char *data;
3459
3460 if (unlikely(!len))
3461 return NULL;
3462
3463 data = kmalloc(len, GFP_KERNEL);
3464 if (unlikely(!data))
3465 return ERR_PTR(-ENOMEM);
3466
3467 if (unlikely(__copy_from_user(data, buf, len))) {
3468 kfree(data);
3469 return ERR_PTR(-EFAULT);
3470 }
3471
3472 pr_vdebug("Buffer from user space:\n");
3473 ffs_dump_mem("", data, len);
3474
3475 return data;
3476 }
3477
3478 DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3479 MODULE_LICENSE("GPL");
3480 MODULE_AUTHOR("Michal Nazarewicz");
3481