• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * f_fs.c -- user mode file system API for USB composite function controllers
4  *
5  * Copyright (C) 2010 Samsung Electronics
6  * Author: Michal Nazarewicz <mina86@mina86.com>
7  *
8  * Based on inode.c (GadgetFS) which was:
9  * Copyright (C) 2003-2004 David Brownell
10  * Copyright (C) 2003 Agilent Technologies
11  */
12 
13 /* #define DEBUG */
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/export.h>
17 #include <linux/hid.h>
18 #include <linux/miscdevice.h>
19 #include <linux/usb/functionfs.h>
20 #include <linux/kfifo.h>
21 #include <linux/module.h>
22 #include <linux/poll.h>
23 #include <linux/eventfd.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/usb/cdc.h>
26 #include <linux/interrupt.h>
27 #include "u_generic.h"
28 #include "u_f.h"
29 #include "u_os_desc.h"
30 #include "configfs.h"
31 
32 #define FUNCTIONFS_MAGIC    0xa647361 /* Chosen by a honest dice roll ;) */
33 
34 /* Reference counter handling */
35 static void ffs_data_get(struct ffs_data *ffs);
36 static void ffs_data_put(struct ffs_data *ffs);
37 /* Creates new ffs_data object. */
38 static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
39     __attribute__((malloc));
40 
41 /* Called with ffs->mutex held; take over ownership of data. */
42 static int __must_check
43 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
44 static int __must_check
45 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
46 
47 /* The function structure ***************************************************/
48 
49 struct ffs_ep;
50 
51 struct ffs_function {
52     struct usb_configuration    *conf;
53     struct usb_gadget        *gadget;
54     struct ffs_data            *ffs;
55 
56     struct ffs_ep            *eps;
57     u8                eps_revmap[16];
58     short                *interfaces_nums;
59 
60     struct usb_function        function;
61 };
ffs_func_from_usb(struct usb_function * f)62 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
63 {
64     return container_of(f, struct ffs_function, function);
65 }
ffs_setup_state_clear_cancelled(struct ffs_data * ffs)66 static inline enum ffs_setup_state ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
67 {
68     return (enum ffs_setup_state)
69         cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
70 }
71 static void ffs_func_eps_disable(struct ffs_function *func);
72 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
73 
74 static int ffs_func_bind(struct usb_configuration *,
75              struct usb_function *);
76 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
77 static void ffs_func_disable(struct usb_function *);
78 static int ffs_func_setup(struct usb_function *,
79               const struct usb_ctrlrequest *);
80 static bool ffs_func_req_match(struct usb_function *,
81                    const struct usb_ctrlrequest *,
82                    bool config0);
83 static void ffs_func_suspend(struct usb_function *);
84 static void ffs_func_resume(struct usb_function *);
85 
86 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
87 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
88 
89 /* The endpoints structures *************************************************/
90 struct ffs_ep {
91     struct usb_ep            *ep;    /* P: ffs->eps_lock */
92     struct usb_request        *req;    /* P: epfile->mutex */
93 
94     /* [0]: full speed, [1]: high speed, [2]: super speed */
95     struct usb_endpoint_descriptor    *descs[3];
96 
97     u8                num;
98 
99     int                status;    /* P: epfile->mutex */
100 };
101 
102 struct ffs_epfile {
103     /* Protects ep->ep and ep->req. */
104     struct mutex            mutex;
105     struct list_head         memory_list;
106     struct ffs_data            *ffs;
107     struct ffs_ep            *ep;    /* P: ffs->eps_lock */
108     /*
109      * Buffer for holding data from partial reads which may happen since
110      * we’re rounding user read requests to a multiple of a max packet size.
111      *
112      * The pointer is initialised with NULL value and may be set by
113      * __ffs_epfile_read_data function to point to a temporary buffer.
114      *
115      * In normal operation, calls to __ffs_epfile_read_buffered will consume
116      * data from said buffer and eventually free it.  Importantly, while the
117      * function is using the buffer, it sets the pointer to NULL.  This is
118      * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
119      * can never run concurrently (they are synchronised by epfile->mutex)
120      * so the latter will not assign a new value to the pointer.
121      *
122      * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
123      * valid) and sets the pointer to READ_BUFFER_DROP value.  This special
124      * value is crux of the synchronisation between ffs_func_eps_disable and
125      * __ffs_epfile_read_data.
126      *
127      * Once __ffs_epfile_read_data is about to finish it will try to set the
128      * pointer back to its old value (as described above), but seeing as the
129      * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
130      * the buffer.
131      *
132      * == State transitions ==
133      *
134      * • ptr == NULL:  (initial state)
135      *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
136      *   ◦ __ffs_epfile_read_buffered:    nop
137      *   ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
138      *   ◦ reading finishes:              n/a, not in ‘and reading’ state
139      * • ptr == DROP:
140      *   ◦ __ffs_epfile_read_buffer_free: nop
141      *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL
142      *   ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
143      *   ◦ reading finishes:              n/a, not in ‘and reading’ state
144      * • ptr == buf:
145      *   ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
146      *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL and reading
147      *   ◦ __ffs_epfile_read_data:        n/a, __ffs_epfile_read_buffered
148      *                                    is always called first
149      *   ◦ reading finishes:              n/a, not in ‘and reading’ state
150      * • ptr == NULL and reading:
151      *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
152      *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
153      *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
154      *   ◦ reading finishes and …
155      *     … all data read:               free buf, go to ptr == NULL
156      *     … otherwise:                   go to ptr == buf and reading
157      * • ptr == DROP and reading:
158      *   ◦ __ffs_epfile_read_buffer_free: nop
159      *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
160      *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
161      *   ◦ reading finishes:              free buf, go to ptr == DROP
162      */
163     struct ffs_buffer        *read_buffer;
164 #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
165 
166     char                name[MAX_NAMELEN];
167     dev_t                devno;
168     struct cdev         cdev;
169     struct device         *device;
170 
171     unsigned char            in;    /* P: ffs->eps_lock */
172     unsigned char            isoc;    /* P: ffs->eps_lock */
173 
174     struct kfifo        reqEventFifo;
175     wait_queue_head_t   wait_que;
176 
177     unsigned char            _pad;
178 };
179 
180 struct ffs_buffer {
181     size_t length;
182     char *data;
183     char storage[];
184 };
185 
186 /*  ffs_io_data structure ***************************************************/
187 
188 struct ffs_io_data {
189     uint32_t aio;
190     uint32_t read;
191     uint32_t len;
192     uint32_t timeout;
193     uint64_t buf;
194     uint32_t actual;
195     int      status;
196     struct tasklet_struct task;
197     struct usb_ep *ep;
198     struct usb_request *req;
199     struct ffs_epfile *epfile;
200     struct ffs_data *ffs;
201 };
202 
203 struct ffs_desc_helper {
204     struct ffs_data *ffs;
205     unsigned interfaces_count;
206     unsigned eps_count;
207 };
208 
209 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
210 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
211 
212 /* Devices management *******************************************************/
213 
214 DEFINE_MUTEX(ffs_lock_adapter);
215 EXPORT_SYMBOL_GPL(ffs_lock_adapter);
216 
217 static struct ffs_dev *_ffs_find_dev(const char *name);
218 static struct ffs_dev *_ffs_alloc_dev(void);
219 static void _ffs_free_dev(struct ffs_dev *dev);
220 static void *ffs_acquire_dev(const char *dev_name);
221 static void ffs_release_dev(struct ffs_data *ffs_data);
222 static int ffs_ready(struct ffs_data *ffs);
223 static void ffs_closed(struct ffs_data *ffs);
224 
225 /* Misc helper functions ****************************************************/
226 
227 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
228     __attribute__((warn_unused_result, nonnull));
229 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
230     __attribute__((warn_unused_result, nonnull));
231 
232 struct class *ffs_class;
ffs_devnode(struct device * dev,umode_t * mode)233 static char *ffs_devnode(struct device *dev, umode_t *mode)
234 {
235     if (mode)
236         *mode = 0666;
237     return kasprintf(GFP_KERNEL, "functionfs/%s", dev_name(dev));
238 }
239 
240 /* Control file aka ep0 *****************************************************/
generic_find_ep0_memory_area(struct ffs_data * ffs,uint64_t buf,uint32_t len)241 static struct ffs_memory *generic_find_ep0_memory_area(struct ffs_data *ffs, uint64_t buf, uint32_t len)
242 {
243     struct ffs_memory *ffsm = NULL;
244     struct ffs_memory *iter = NULL;
245     uint64_t buf_start = buf;
246     unsigned long flags;
247 
248     spin_lock_irqsave(&ffs->mem_lock, flags);
249     list_for_each_entry(iter, &ffs->memory_list, memlist) {
250         if (buf_start >= iter->vm_start &&
251             buf_start < iter->vm_start + iter->size) {
252             if (len <= iter->vm_start + iter->size - buf_start) {
253                 ffsm = iter;
254                 break;
255             }
256         }
257     }
258     spin_unlock_irqrestore(&ffs->mem_lock, flags);
259     return ffsm;
260 }
261 
ffs_ep0_complete(struct usb_ep * ep,struct usb_request * req)262 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
263 {
264     struct ffs_data *ffs = req->context;
265 
266     complete(&ffs->ep0req_completion);
267 
268     ffs->setup_state = FFS_NO_SETUP;
269 }
270 
ffs_ep0_async_io_complete(struct usb_ep * _ep,struct usb_request * req)271 static void ffs_ep0_async_io_complete(struct usb_ep *_ep, struct usb_request *req)
272 {
273     struct ffs_io_data *io_data = req->context;
274     struct ffs_data *ffs = io_data->ffs;
275     ENTER();
276 
277     io_data->status = io_data->req->status;
278     io_data->actual = io_data->req->actual;
279     kfifo_in(&ffs->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent));
280     wake_up_all(&ffs->wait_que);
281 
282     list_del(&req->list);
283     usb_ep_free_request(io_data->ep, io_data->req);
284     kfree(io_data);
285 
286 }
287 
__ffs_ep0_queue_wait(struct ffs_data * ffs,char * data,size_t len)288 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
289     __releases(&ffs->ev.waitq.lock)
290 {
291     struct usb_request *req = ffs->ep0req;
292     int ret;
293 
294     req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
295 
296     spin_unlock_irq(&ffs->ev.waitq.lock);
297 
298     req->buf      = data;
299     req->length   = len;
300 
301     /*
302      * UDC layer requires to provide a buffer even for ZLP, but should
303      * not use it at all. Let's provide some poisoned pointer to catch
304      * possible bug in the driver.
305      */
306     if (req->buf == NULL)
307         req->buf = (void *)0xDEADBABE;
308 
309     reinit_completion(&ffs->ep0req_completion);
310 
311     ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
312     if (unlikely(ret < 0))
313         return ret;
314 
315     ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
316     if (unlikely(ret)) {
317         usb_ep_dequeue(ffs->gadget->ep0, req);
318         return -EINTR;
319     }
320 
321     ffs->setup_state = FFS_NO_SETUP;
322     return req->status ? req->status : req->actual;
323 }
324 
__ffs_ep0_stall(struct ffs_data * ffs)325 static int __ffs_ep0_stall(struct ffs_data *ffs)
326 {
327     if (ffs->ev.can_stall) {
328         pr_vdebug("ep0 stall\n");
329         usb_ep_set_halt(ffs->gadget->ep0);
330         ffs->setup_state = FFS_NO_SETUP;
331         return -EL2HLT;
332     } else {
333         pr_debug("bogus ep0 stall!\n");
334         return -ESRCH;
335     }
336 }
337 
ffs_ep0_write(struct file * file,const char __user * buf,size_t len,loff_t * ptr)338 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr)
339 {
340     struct ffs_data *ffs = file->private_data;
341     ssize_t ret;
342     char *data = NULL;
343 
344     ENTER();
345 
346     /* Fast check if setup was canceled */
347     if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
348         return -EIDRM;
349 
350     /* Acquire mutex */
351     ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
352     if (unlikely(ret < 0))
353         return ret;
354 
355     /* Check state */
356     switch (ffs->state) {
357     case FFS_READ_DESCRIPTORS:
358     case FFS_READ_STRINGS:
359         /* Copy data */
360         if (unlikely(len < 16)) {
361             ret = -EINVAL;
362             break;
363         }
364 
365         data = ffs_prepare_buffer(buf, len);
366         if (IS_ERR(data)) {
367             ret = PTR_ERR(data);
368             break;
369         }
370 
371         /* Handle data */
372         if (ffs->state == FFS_READ_DESCRIPTORS) {
373             pr_info("read descriptors\n");
374             ret = __ffs_data_got_descs(ffs, data, len);
375             if (unlikely(ret < 0))
376                 break;
377 
378             ffs->state = FFS_READ_STRINGS;
379             ret = len;
380         } else {
381             pr_info("read strings\n");
382             ret = __ffs_data_got_strings(ffs, data, len);
383             if (unlikely(ret < 0))
384                 break;
385 
386             ret = ffs_epfiles_create(ffs);
387             if (unlikely(ret)) {
388                 ffs->state = FFS_CLOSING;
389                 break;
390             }
391 
392             ffs->state = FFS_ACTIVE;
393             mutex_unlock(&ffs->mutex);
394 
395             ret = ffs_ready(ffs);
396             if (unlikely(ret < 0)) {
397                 ffs->state = FFS_CLOSING;
398                 return ret;
399             }
400 
401             return len;
402         }
403         break;
404 
405     case FFS_ACTIVE:
406         data = NULL;
407         /*
408          * We're called from user space, we can use _irq
409          * rather then _irqsave
410          */
411         spin_lock_irq(&ffs->ev.waitq.lock);
412         switch (ffs_setup_state_clear_cancelled(ffs)) {
413         case FFS_SETUP_CANCELLED:
414             ret = -EIDRM;
415             goto done_spin;
416 
417         case FFS_NO_SETUP:
418             ret = -ESRCH;
419             goto done_spin;
420 
421         case FFS_SETUP_PENDING:
422             break;
423         }
424 
425         /* FFS_SETUP_PENDING */
426         if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
427             spin_unlock_irq(&ffs->ev.waitq.lock);
428             ret = __ffs_ep0_stall(ffs);
429             break;
430         }
431 
432         /* FFS_SETUP_PENDING and not stall */
433         len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
434 
435         spin_unlock_irq(&ffs->ev.waitq.lock);
436 
437         data = ffs_prepare_buffer(buf, len);
438         if (IS_ERR(data)) {
439             ret = PTR_ERR(data);
440             break;
441         }
442 
443         spin_lock_irq(&ffs->ev.waitq.lock);
444 
445         /*
446          * We are guaranteed to be still in FFS_ACTIVE state
447          * but the state of setup could have changed from
448          * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
449          * to check for that.  If that happened we copied data
450          * from user space in vain but it's unlikely.
451          *
452          * For sure we are not in FFS_NO_SETUP since this is
453          * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
454          * transition can be performed and it's protected by
455          * mutex.
456          */
457         if (ffs_setup_state_clear_cancelled(ffs) ==
458                 FFS_SETUP_CANCELLED) {
459                 ret = -EIDRM;
460 done_spin:
461             spin_unlock_irq(&ffs->ev.waitq.lock);
462         } else {
463             /* unlocks spinlock */
464             ret = __ffs_ep0_queue_wait(ffs, data, len);
465         }
466         kfree(data);
467         break;
468 
469     default:
470         ret = -EBADFD;
471         break;
472     }
473 
474     mutex_unlock(&ffs->mutex);
475     return ret;
476 }
477 
478 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
__ffs_ep0_read_events(struct ffs_data * ffs,char __user * buf,size_t n)479 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, size_t n)
480     __releases(&ffs->ev.waitq.lock)
481 {
482     /*
483      * n cannot be bigger than ffs->ev.count, which cannot be bigger than
484      * size of ffs->ev.types array (which is four) so that's how much space
485      * we reserve.
486      */
487     struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
488     const size_t size = n * sizeof *events;
489     unsigned i = 0;
490 
491     memset(events, 0, size);
492 
493     do {
494         events[i].type = ffs->ev.types[i];
495         if (events[i].type == FUNCTIONFS_SETUP) {
496             events[i].u.setup = ffs->ev.setup;
497             ffs->setup_state = FFS_SETUP_PENDING;
498         }
499     } while (++i < n);
500 
501     ffs->ev.count -= n;
502     if (ffs->ev.count)
503         memmove(ffs->ev.types, ffs->ev.types + n, ffs->ev.count * sizeof *ffs->ev.types);
504 
505     spin_unlock_irq(&ffs->ev.waitq.lock);
506     mutex_unlock(&ffs->mutex);
507 
508     return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
509 }
510 
ffs_ep0_read(struct file * file,char __user * buf,size_t len,loff_t * ptr)511 static ssize_t ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
512 {
513     struct ffs_data *ffs = file->private_data;
514     char *data = NULL;
515     size_t n;
516     int ret;
517 
518     ENTER();
519 
520     /* Fast check if setup was canceled */
521     if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
522         return -EIDRM;
523 
524     /* Acquire mutex */
525     ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
526     if (unlikely(ret < 0))
527         return ret;
528 
529     /* Check state */
530     if (ffs->state != FFS_ACTIVE) {
531         ret = -EBADFD;
532         goto done_mutex;
533     }
534 
535     /*
536      * We're called from user space, we can use _irq rather then
537      * _irqsave
538      */
539     spin_lock_irq(&ffs->ev.waitq.lock);
540 
541     switch (ffs_setup_state_clear_cancelled(ffs)) {
542     case FFS_SETUP_CANCELLED:
543         ret = -EIDRM;
544         break;
545 
546     case FFS_NO_SETUP:
547         n = len / sizeof(struct usb_functionfs_event);
548         if (unlikely(!n)) {
549             ret = -EINVAL;
550             break;
551         }
552 
553         if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
554             ret = -EAGAIN;
555             break;
556         }
557 
558         if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
559                             ffs->ev.count)) {
560             ret = -EINTR;
561             break;
562         }
563 
564         /* unlocks spinlock */
565         return __ffs_ep0_read_events(ffs, buf,
566                          min(n, (size_t)ffs->ev.count));
567 
568     case FFS_SETUP_PENDING:
569         if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
570             spin_unlock_irq(&ffs->ev.waitq.lock);
571             ret = __ffs_ep0_stall(ffs);
572             goto done_mutex;
573         }
574 
575         len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
576 
577         spin_unlock_irq(&ffs->ev.waitq.lock);
578 
579         if (likely(len)) {
580             data = kmalloc(len, GFP_KERNEL);
581             if (unlikely(!data)) {
582                 ret = -ENOMEM;
583                 goto done_mutex;
584             }
585         }
586 
587         spin_lock_irq(&ffs->ev.waitq.lock);
588 
589         /* See ffs_ep0_write() */
590         if (ffs_setup_state_clear_cancelled(ffs) ==
591             FFS_SETUP_CANCELLED) {
592             ret = -EIDRM;
593             break;
594         }
595 
596         /* unlocks spinlock */
597         ret = __ffs_ep0_queue_wait(ffs, data, len);
598         if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
599             ret = -EFAULT;
600         goto done_mutex;
601 
602     default:
603         ret = -EBADFD;
604         break;
605     }
606 
607     spin_unlock_irq(&ffs->ev.waitq.lock);
608 done_mutex:
609     mutex_unlock(&ffs->mutex);
610     kfree(data);
611     return ret;
612 }
613 
ffs_ep0_open(struct inode * inode,struct file * file)614 static int ffs_ep0_open(struct inode *inode, struct file *file)
615 {
616     struct ffs_data *ffs  = container_of(inode->i_cdev, struct ffs_data, cdev);
617     ENTER();
618 
619     if (unlikely(ffs->state == FFS_CLOSING))
620         return -EBUSY;
621 
622     file->private_data = ffs;
623     return 0;
624 }
625 
ffs_ep0_release(struct inode * inode,struct file * file)626 static int ffs_ep0_release(struct inode *inode, struct file *file)
627 {
628     ENTER();
629     return 0;
630 }
631 
ffs_ep0_iorw(struct file * file,struct ffs_io_data * io_data)632 static ssize_t ffs_ep0_iorw(struct file *file, struct ffs_io_data *io_data)
633 {
634     struct ffs_data *ffs = file->private_data;
635     struct usb_request *req = NULL;
636     ssize_t ret, data_len = io_data->len;
637     bool interrupted = false;
638     struct ffs_memory *ffsm = NULL;
639 
640     /* Are we still active? */
641     if (WARN_ON(ffs->state != FFS_ACTIVE))
642         return -ENODEV;
643     ffsm = generic_find_ep0_memory_area(ffs, io_data->buf, data_len);
644     if (ffsm == NULL)
645     {
646         return -ENODEV;
647     }
648     if (!io_data->aio) {
649         reinit_completion(&ffs->ep0req_completion);
650 
651         req = ffs->ep0req;
652         req->buf      = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
653         req->length   = data_len;
654         req->complete = ffs_ep0_complete;
655 
656         ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
657         if (unlikely(ret < 0))
658             goto error;
659 
660         if (io_data->timeout > 0) {
661             ret = wait_for_completion_interruptible_timeout(&ffs->ep0req_completion, io_data->timeout);
662             if (ret < 0) {
663                 /*
664                  * To avoid race condition with ffs_epfile_io_complete,
665                  * dequeue the request first then check
666                  * status. usb_ep_dequeue API should guarantee no race
667                  * condition with req->complete callback.
668                  */
669                 usb_ep_dequeue(ffs->gadget->ep0, req);
670                 wait_for_completion(&ffs->ep0req_completion);
671                 interrupted = req->status < 0;
672             } else if (ret == 0) {
673                 ret = -EBUSY;
674                 usb_ep_dequeue(ffs->gadget->ep0, req);
675                 wait_for_completion(&ffs->ep0req_completion);
676                 goto error;
677             }
678         } else {
679             ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
680             if (ret < 0) {
681                 usb_ep_dequeue(ffs->gadget->ep0, req);
682                 wait_for_completion(&ffs->ep0req_completion);
683                 interrupted = req->status < 0;
684             }
685         }
686 
687         if (interrupted) {
688             ret = -EINTR;
689         } else {
690             ret = req->actual;
691         }
692         goto error;
693     }
694     else if (!(req = usb_ep_alloc_request(ffs->gadget->ep0, GFP_ATOMIC))) {
695         ret = -ENOMEM;
696     }
697     else {
698         req->buf     = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
699         req->length   = data_len;
700 
701         io_data->ep = ffs->gadget->ep0;
702         io_data->req = req;
703         io_data->ffs = ffs;
704 
705         req->context  = io_data;
706         req->complete = ffs_ep0_async_io_complete;
707         list_add(&req->list, &ffs->ep0req->list);
708         ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
709         if (unlikely(ret)) {
710             usb_ep_free_request(ffs->gadget->ep0, req);
711             goto error;
712         }
713 
714         ret = -EIOCBQUEUED;
715     }
716 
717 error:
718     return ret;
719 }
720 
ffs_ep0_ioctl(struct file * file,unsigned code,unsigned long value)721 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
722 {
723     struct ffs_data *ffs = file->private_data;
724     long ret = 0;
725     unsigned int copied = 0;
726     struct ffs_memory *ffsm = NULL;
727     struct generic_memory mem;
728 
729     ENTER();
730 
731     switch (code) {
732     case FUNCTIONFS_ENDPOINT_QUEUE_INIT:
733         ret = kfifo_alloc(&ffs->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL);
734         break;
735     case FUNCTIONFS_ENDPOINT_QUEUE_DEL:
736         kfifo_free(&ffs->reqEventFifo);
737         break;
738     case FUNCTIONFS_ENDPOINT_RELEASE_BUF:
739         if (copy_from_user(&mem, (void __user *)value, sizeof(mem)))
740         {
741             pr_info("copy from user failed\n");
742             return -EFAULT;
743         }
744         ffsm = generic_find_ep0_memory_area(ffs, mem.buf, mem.size);
745         if (ffsm == NULL)
746         {
747             return -EFAULT;
748         }
749         list_del(&ffsm->memlist);
750         kfree((void *)ffsm->mem);
751         kfree(ffsm);
752         break;
753     case FUNCTIONFS_ENDPOINT_READ:
754     case FUNCTIONFS_ENDPOINT_WRITE:
755     {
756         struct IoData myIoData;
757         struct ffs_io_data io_data, *p = &io_data;
758         ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
759         if (unlikely(ret)) {
760             return -EFAULT;
761         }
762         if (myIoData.aio) {
763             p = kmalloc(sizeof(io_data), GFP_KERNEL);
764             if (unlikely(!p))
765                 return -ENOMEM;
766         } else {
767             memset(p, 0, sizeof(*p));
768         }
769         memcpy(p, &myIoData, sizeof(struct IoData));
770 
771         ret = ffs_ep0_iorw(file, p);
772         if (ret == -EIOCBQUEUED) {
773             return 0;
774         }
775         if (p->aio)
776             kfree(p);
777         return ret;
778     }
779     case FUNCTIONFS_ENDPOINT_RW_CANCEL:
780     {
781         struct usb_request *req;
782         struct IoData myIoData;
783         ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
784         if (unlikely(ret)) {
785             return -EFAULT;
786         }
787         ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len);
788         if (ffsm == NULL)
789         {
790             return -EFAULT;
791         }
792         list_for_each_entry(req, &ffs->ep0req->list, list) {
793             if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
794                 usb_ep_dequeue(ffs->gadget->ep0, req);
795                 return 0;
796             }
797         }
798         return -EFAULT;
799     }
800     case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS:
801     {
802         struct usb_request *req;
803         struct IoData myIoData;
804         ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
805         if (unlikely(ret)) {
806             return -EFAULT;
807         }
808         ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len);
809         if (ffsm == NULL)
810         {
811             return -EFAULT;
812         }
813         list_for_each_entry(req, &ffs->ep0req->list, list) {
814             if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
815                 return req->status;
816             }
817         }
818         return -EFAULT;
819     }
820     case FUNCTIONFS_ENDPOINT_GET_EP0_EVENT:
821         if (!kfifo_is_empty(&ffs->reqEventFifo)) {
822             ret = kfifo_to_user(&ffs->reqEventFifo, (void __user *)value,
823             sizeof(struct UsbFnReqEvent), &copied) == 0 ? copied : -1;
824             if (ret > 0) {
825                 ffs->setup_state = FFS_NO_SETUP;
826                 return ret;
827             }
828         }
829 
830         return -EFAULT;
831     }
832 
833     return ret;
834 }
835 
836 #ifdef CONFIG_COMPAT
ffs_ep0_compat_ioctl(struct file * file,unsigned code,unsigned long value)837 static long ffs_ep0_compat_ioctl(struct file *file, unsigned code,
838         unsigned long value)
839 {
840     return ffs_ep0_ioctl(file, code, value);
841 }
842 #endif
843 
ffs_ep0_poll(struct file * file,poll_table * wait)844 static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
845 {
846     struct ffs_data *ffs = file->private_data;
847     __poll_t mask = EPOLLWRNORM;
848     int ret;
849 
850     ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
851     if (unlikely(ret < 0))
852         return mask;
853 
854     switch (ffs->state) {
855     case FFS_READ_DESCRIPTORS:
856     case FFS_READ_STRINGS:
857         mask |= EPOLLOUT;
858         break;
859 
860     case FFS_ACTIVE:
861         switch (ffs->setup_state) {
862         case FFS_NO_SETUP:
863             poll_wait(file, &ffs->ev.waitq, wait);
864             if (ffs->ev.count)
865                 mask |= EPOLLIN;
866             break;
867 
868         case FFS_SETUP_PENDING:
869         case FFS_SETUP_CANCELLED:
870             poll_wait(file, &ffs->wait_que, wait);
871             if (!kfifo_is_empty(&ffs->reqEventFifo))
872             {
873                 mask |= EPOLLOUT;
874             }
875             break;
876         }
877     case FFS_CLOSING:
878         break;
879     case FFS_DEACTIVATED:
880         break;
881     }
882 
883     mutex_unlock(&ffs->mutex);
884 
885     return mask;
886 }
887 
ffs_ep0_mmap(struct file * file,struct vm_area_struct * vma)888 static int ffs_ep0_mmap(struct file *file, struct vm_area_struct *vma)
889 {
890     struct ffs_data *ffs = file->private_data;
891     size_t size = vma->vm_end - vma->vm_start;
892     unsigned long flags;
893     struct ffs_memory *ffsm = NULL;
894     void *virt_mem = NULL;
895 
896     if (ffs == NULL) {
897         pr_info("Invalid private parameter!\n");
898         return -EINVAL;
899     }
900     virt_mem = kmalloc(size, GFP_KERNEL);
901     if (virt_mem == NULL)
902     {
903         pr_info("%s alloc memory failed!\n", __FUNCTION__);
904         return -ENOMEM;
905     }
906     ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL);
907     if (ffsm == NULL)
908     {
909         pr_info("%s alloc memory failed!\n", __FUNCTION__);
910         goto error_free_mem;
911     }
912     if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT,
913         vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
914         goto error_free_ffsm;
915     }
916     ffsm->mem      = (uint64_t)virt_mem;
917     ffsm->size     = size;
918     ffsm->vm_start = vma->vm_start;
919     INIT_LIST_HEAD(&ffsm->memlist);
920     spin_lock_irqsave(&ffs->mem_lock, flags);
921     list_add_tail(&ffsm->memlist, &ffs->memory_list);
922     spin_unlock_irqrestore(&ffs->mem_lock, flags);
923     return 0;
924 error_free_ffsm:
925     kfree(ffsm);
926 error_free_mem:
927     kfree(virt_mem);
928     return -1;
929 }
930 
931 static const struct file_operations ffs_ep0_operations = {
932     .owner   = THIS_MODULE,
933     .llseek =    no_llseek,
934     .open =        ffs_ep0_open,
935     .write =    ffs_ep0_write,
936     .read =        ffs_ep0_read,
937     .release =    ffs_ep0_release,
938     .unlocked_ioctl =    ffs_ep0_ioctl,
939 #ifdef CONFIG_COMPAT
940     .compat_ioctl = ffs_ep0_compat_ioctl,
941 #endif
942     .poll =        ffs_ep0_poll,
943     .mmap =     ffs_ep0_mmap,
944 };
945 
946 /* "Normal" endpoints operations ********************************************/
generic_find_memory_area(struct ffs_epfile * epfile,uint64_t buf,uint32_t len)947 static struct ffs_memory *generic_find_memory_area(struct ffs_epfile *epfile, uint64_t buf, uint32_t len)
948 {
949     struct ffs_memory *ffsm = NULL, *iter = NULL;
950     uint64_t buf_start = buf;
951 
952     list_for_each_entry(iter, &epfile->memory_list, memlist) {
953         if (buf_start >= iter->vm_start &&
954             buf_start < iter->vm_start + iter->size) {
955             if (len <= iter->vm_start + iter->size - buf_start) {
956                 ffsm = iter;
957                 break;
958             }
959         }
960     }
961     return ffsm;
962 }
963 
ffs_epfile_io_complete(struct usb_ep * _ep,struct usb_request * req)964 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
965 {
966     ENTER();
967     if (likely(req->context)) {
968         struct ffs_ep *ep = _ep->driver_data;
969         ep->status = req->status ? req->status : req->actual;
970         complete(req->context);
971     }
972 }
973 
epfile_task_proc(unsigned long context)974 static void epfile_task_proc(unsigned long context)
975 {
976     struct ffs_io_data *io_data = (struct ffs_io_data *)context;
977     struct ffs_epfile *epfile = io_data->epfile;
978     unsigned long flags;
979 
980     spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
981     io_data->status = io_data->req->status;
982     io_data->actual = io_data->req->actual;
983     kfifo_in(&epfile->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent));
984     list_del(&io_data->req->list);
985     usb_ep_free_request(io_data->ep, io_data->req);
986     kfree(io_data);
987     spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
988     wake_up_all(&epfile->wait_que);
989 }
990 
ffs_epfile_async_io_complete(struct usb_ep * _ep,struct usb_request * req)991 static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req)
992 {
993     struct ffs_io_data *io_data = req->context;
994 
995     tasklet_init(&io_data->task, epfile_task_proc, (uintptr_t)io_data);
996     tasklet_schedule(&io_data->task);
997 
998 }
999 
ffs_epfile_open(struct inode * inode,struct file * file)1000 static int ffs_epfile_open(struct inode *inode, struct file *file)
1001 {
1002     struct ffs_epfile *epfile  = container_of(inode->i_cdev, struct ffs_epfile, cdev);
1003     ENTER();
1004     if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1005         return -ENODEV;
1006 
1007     file->private_data = epfile;
1008     return 0;
1009 }
1010 
ffs_epfile_release(struct inode * inode,struct file * file)1011 static int ffs_epfile_release(struct inode *inode, struct file *file)
1012 {
1013     ENTER();
1014     return 0;
1015 }
1016 
ffs_epfile_mmap(struct file * file,struct vm_area_struct * vma)1017 static int ffs_epfile_mmap(struct file *file, struct vm_area_struct *vma)
1018 {
1019     struct ffs_epfile *epfile = file->private_data;
1020     size_t size = vma->vm_end - vma->vm_start;
1021     struct ffs_memory *ffsm = NULL;
1022     unsigned long flags;
1023     void *virt_mem = NULL;
1024 
1025     if (epfile == NULL)
1026     {
1027         pr_info("Invalid private parameter!\n");
1028         return -EINVAL;
1029     }
1030     virt_mem = kmalloc(size, GFP_KERNEL);
1031     if (virt_mem == NULL)
1032     {
1033         pr_info("%s alloc memory failed!\n", __FUNCTION__);
1034         return -ENOMEM;
1035     }
1036     ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL);
1037     if (ffsm == NULL)
1038     {
1039         pr_info("%s alloc memory failed!\n", __FUNCTION__);
1040         goto error_free_mem;
1041     }
1042     if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT,
1043                 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1044     {
1045         goto error_free_ffsm;
1046     }
1047     ffsm->mem = (uint64_t)virt_mem;
1048     ffsm->size = size;
1049     ffsm->vm_start = vma->vm_start;
1050     INIT_LIST_HEAD(&ffsm->memlist);
1051     spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
1052     list_add_tail(&ffsm->memlist, &epfile->memory_list);
1053     spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
1054 
1055     return 0;
1056 error_free_ffsm:
1057     kfree(ffsm);
1058 error_free_mem:
1059     kfree(virt_mem);
1060 
1061     return -1;
1062 }
1063 
ffs_epfile_iorw(struct file * file,struct ffs_io_data * io_data)1064 static ssize_t ffs_epfile_iorw(struct file *file, struct ffs_io_data *io_data)
1065 {
1066     struct ffs_epfile *epfile = file->private_data;
1067     struct usb_request *req = NULL;
1068     struct ffs_ep *ep = NULL;
1069     struct ffs_memory *ffsm = NULL;
1070     ssize_t ret, data_len = -EINVAL;
1071     int halt;
1072 
1073     /* Are we still active? */
1074     if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1075         return -ENODEV;
1076 
1077     /* Wait for endpoint to be enabled */
1078     ep = epfile->ep;
1079     if (!ep) {
1080         if (file->f_flags & O_NONBLOCK)
1081             return -EAGAIN;
1082 
1083         ret = wait_event_interruptible(
1084                 epfile->ffs->wait, (ep = epfile->ep));
1085         if (ret)
1086             return -EINTR;
1087     }
1088 
1089     /* Do we halt? */
1090     halt = (!io_data->read == !epfile->in);
1091     if (halt && epfile->isoc)
1092         return -EINVAL;
1093 
1094     /* We will be using request and read_buffer */
1095     ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
1096     if (unlikely(ret))
1097         goto error;
1098 
1099     /* Allocate & copy */
1100     if (!halt) {
1101         struct usb_gadget *gadget;
1102         /*
1103          * if we _do_ wait above, the epfile->ffs->gadget might be NULL
1104          * before the waiting completes, so do not assign to 'gadget'
1105          * earlier
1106          */
1107         gadget = epfile->ffs->gadget;
1108 
1109         spin_lock_irq(&epfile->ffs->eps_lock);
1110         /* In the meantime, endpoint got disabled or changed. */
1111         if (epfile->ep != ep) {
1112             ret = -ESHUTDOWN;
1113             goto error_lock;
1114         }
1115         data_len = io_data->len;
1116         /*
1117          * Controller may require buffer size to be aligned to
1118          * maxpacketsize of an out endpoint.
1119          */
1120         if (io_data->read)
1121             data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
1122         spin_unlock_irq(&epfile->ffs->eps_lock);
1123     }
1124 
1125     spin_lock_irq(&epfile->ffs->eps_lock);
1126     ffsm = generic_find_memory_area(epfile, io_data->buf, io_data->len);
1127     if (ffsm == NULL)
1128     {
1129         return -EFAULT;
1130     }
1131     if (epfile->ep != ep) {
1132         /* In the meantime, endpoint got disabled or changed. */
1133         ret = -ESHUTDOWN;
1134     }
1135     else if (halt) {
1136         ret = usb_ep_set_halt(ep->ep);
1137         if (!ret)
1138             ret = -EBADMSG;
1139     }
1140     else if (!io_data->aio) {
1141         DECLARE_COMPLETION_ONSTACK(done);
1142         bool interrupted = false;
1143 
1144         req = ep->req;
1145         req->buf      = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
1146         req->length   = data_len;
1147 
1148         req->context  = &done;
1149         req->complete = ffs_epfile_io_complete;
1150 
1151         ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1152         if (unlikely(ret < 0))
1153             goto error_lock;
1154 
1155         spin_unlock_irq(&epfile->ffs->eps_lock);
1156         if (io_data->timeout > 0) {
1157             ret = wait_for_completion_interruptible_timeout(&done, io_data->timeout);
1158             if (ret < 0) {
1159                 /*
1160                  * To avoid race condition with ffs_epfile_io_complete,
1161                  * dequeue the request first then check
1162                  * status. usb_ep_dequeue API should guarantee no race
1163                  * condition with req->complete callback.
1164                  */
1165                 usb_ep_dequeue(ep->ep, req);
1166                 wait_for_completion(&done);
1167                 interrupted = ep->status < 0;
1168             } else if (ret == 0) {
1169                 ret = -EBUSY;
1170                 usb_ep_dequeue(ep->ep, req);
1171                 wait_for_completion(&done);
1172                 goto error_mutex;
1173             }
1174         } else {
1175             ret = wait_for_completion_interruptible(&done);
1176             if (ret < 0) {
1177                 usb_ep_dequeue(ep->ep, req);
1178                 wait_for_completion(&done);
1179                 interrupted = ep->status < 0;
1180             }
1181         }
1182 
1183         if (interrupted) {
1184             ret = -EINTR;
1185         } else {
1186             ret = req->actual;
1187         }
1188         goto error_mutex;
1189     }
1190     else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
1191         ret = -ENOMEM;
1192     }
1193     else {
1194         req->buf     = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
1195         req->length  = data_len;
1196 
1197         io_data->ep     = ep->ep;
1198         io_data->req    = req;
1199         io_data->epfile = epfile;
1200 
1201         req->context  = io_data;
1202         req->complete = ffs_epfile_async_io_complete;
1203         list_add(&req->list, &ep->req->list);
1204         ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1205         if (unlikely(ret)) {
1206             usb_ep_free_request(ep->ep, req);
1207             goto error_lock;
1208         }
1209 
1210         ret = -EIOCBQUEUED;
1211     }
1212 
1213 error_lock:
1214     spin_unlock_irq(&epfile->ffs->eps_lock);
1215 error_mutex:
1216     mutex_unlock(&epfile->mutex);
1217 error:
1218     return ret;
1219 }
1220 
ffs_epfile_ioctl(struct file * file,unsigned code,unsigned long value)1221 static long ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value)
1222 {
1223     struct ffs_epfile *epfile = file->private_data;
1224     struct ffs_ep *ep = epfile->ep;
1225     int ret = 0;
1226     struct generic_memory mem;
1227     struct ffs_memory *ffsm = NULL;
1228 
1229     ENTER();
1230 
1231     if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1232         return -ENODEV;
1233 
1234     spin_lock_irq(&epfile->ffs->eps_lock);
1235 
1236     switch (code) {
1237     case FUNCTIONFS_ENDPOINT_QUEUE_INIT:
1238         ret = kfifo_alloc(&epfile->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL);
1239         break;
1240     case FUNCTIONFS_ENDPOINT_QUEUE_DEL:
1241         kfifo_free(&epfile->reqEventFifo);
1242         break;
1243     case FUNCTIONFS_ENDPOINT_RELEASE_BUF:
1244         if (copy_from_user(&mem, (void __user *)value, sizeof(mem)))
1245         {
1246             pr_info("copy from user failed\n");
1247             return -EFAULT;
1248         }
1249         ffsm = generic_find_memory_area(epfile, mem.buf, mem.size);
1250         if (ffsm == NULL)
1251         {
1252             return -EFAULT;
1253         }
1254         list_del(&ffsm->memlist);
1255         kfree((void *)ffsm->mem);
1256         kfree(ffsm);
1257         break;
1258     case FUNCTIONFS_ENDPOINT_READ:
1259     case FUNCTIONFS_ENDPOINT_WRITE:
1260     {
1261         struct IoData myIoData;
1262         struct ffs_io_data io_data, *p = &io_data;
1263         ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
1264         if (unlikely(ret)) {
1265             spin_unlock_irq(&epfile->ffs->eps_lock);
1266             return -EFAULT;
1267         }
1268         if (myIoData.aio) {
1269             p = kmalloc(sizeof(io_data), GFP_KERNEL);
1270             if (unlikely(!p)) {
1271                 spin_unlock_irq(&epfile->ffs->eps_lock);
1272                 return -ENOMEM;
1273             }
1274         } else {
1275             memset(p,  0, sizeof(*p));
1276         }
1277         memcpy(p, &myIoData, sizeof(struct IoData));
1278 
1279         spin_unlock_irq(&epfile->ffs->eps_lock);
1280         ret = ffs_epfile_iorw(file, p);
1281         if (ret == -EIOCBQUEUED) {
1282             return 0;
1283         }
1284         if (p->aio)
1285             kfree(p);
1286         return ret;
1287     }
1288     case FUNCTIONFS_ENDPOINT_RW_CANCEL:
1289     {
1290         struct usb_request *req;
1291         struct IoData myIoData;
1292         if (!ep) {
1293             spin_unlock_irq(&epfile->ffs->eps_lock);
1294             return -EFAULT;
1295         }
1296         ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
1297         if (unlikely(ret)) {
1298             spin_unlock_irq(&epfile->ffs->eps_lock);
1299             return -EFAULT;
1300         }
1301         ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len);
1302         if (ffsm == NULL)
1303         {
1304             return -EFAULT;
1305         }
1306         list_for_each_entry(req, &epfile->ep->req->list, list) {
1307             if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
1308                 usb_ep_dequeue(epfile->ep->ep, req);
1309                 spin_unlock_irq(&epfile->ffs->eps_lock);
1310                 return 0;
1311             }
1312         }
1313         spin_unlock_irq(&epfile->ffs->eps_lock);
1314         return -EFAULT;
1315     }
1316     case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS:
1317     {
1318         struct usb_request *req;
1319         struct IoData myIoData;
1320         if (!ep) {
1321             spin_unlock_irq(&epfile->ffs->eps_lock);
1322             return -EFAULT;
1323         }
1324         ret = copy_from_user(&myIoData,(void __user *)value, sizeof(struct IoData));
1325         if (unlikely(ret)) {
1326             spin_unlock_irq(&epfile->ffs->eps_lock);
1327             return -EFAULT;
1328         }
1329         ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len);
1330         if (ffsm == NULL)
1331         {
1332             return -EFAULT;
1333         }
1334         list_for_each_entry(req, &epfile->ep->req->list, list) {
1335             if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
1336                 spin_unlock_irq(&epfile->ffs->eps_lock);
1337                 return req->status;
1338             }
1339         }
1340         spin_unlock_irq(&epfile->ffs->eps_lock);
1341         return -EFAULT;
1342     }
1343     case FUNCTIONFS_FIFO_STATUS:
1344         ret = usb_ep_fifo_status(epfile->ep->ep);
1345         break;
1346     case FUNCTIONFS_FIFO_FLUSH:
1347         usb_ep_fifo_flush(epfile->ep->ep);
1348         ret = 0;
1349         break;
1350     case FUNCTIONFS_CLEAR_HALT:
1351         ret = usb_ep_clear_halt(epfile->ep->ep);
1352         break;
1353     case FUNCTIONFS_ENDPOINT_REVMAP:
1354         ret = epfile->ep->num;
1355         break;
1356     case FUNCTIONFS_ENDPOINT_DESC:
1357     {
1358         int desc_idx;
1359         int i;
1360         struct usb_endpoint_descriptor *desc;
1361 
1362         switch (epfile->ffs->speed) {
1363         case USB_SPEED_SUPER:
1364             desc_idx = 2;
1365             break;
1366         case USB_SPEED_HIGH:
1367             desc_idx = 1;
1368             break;
1369         default:
1370             desc_idx = 1;
1371         }
1372         for (i = 0; i < epfile->ffs->eps_count; i++) {
1373             if (epfile->ffs->epfiles + i == epfile)
1374                 break;
1375         }
1376         ep = epfile->ffs->eps + i;
1377         desc = ep->descs[desc_idx];
1378         spin_unlock_irq(&epfile->ffs->eps_lock);
1379         ret = copy_to_user((void __user *)value, desc, desc->bLength);
1380         if (ret)
1381             ret = -EFAULT;
1382         return ret;
1383     }
1384     default:
1385         ret = -ENOTTY;
1386     }
1387     spin_unlock_irq(&epfile->ffs->eps_lock);
1388 
1389     return ret;
1390 }
1391 
ffs_epfile_read(struct file * file,char __user * buf,size_t count,loff_t * f_pos)1392 static ssize_t ffs_epfile_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
1393 {
1394     int status = 0;
1395     unsigned int copied = 0;
1396     unsigned long flags;
1397     struct ffs_epfile *epfile = file->private_data;
1398     ENTER();
1399     if (kfifo_is_empty(&epfile->reqEventFifo)) {
1400         return 0;
1401     }
1402     spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
1403     status = kfifo_to_user(&epfile->reqEventFifo, buf, count, &copied) == 0 ? copied : -1;
1404     spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
1405 
1406     return status;
1407 }
1408 
ffs_epfile_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)1409 static ssize_t ffs_epfile_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos)
1410 {
1411     return count;
1412 }
1413 
ffs_epfile_poll(struct file * file,struct poll_table_struct * wait)1414 static unsigned int ffs_epfile_poll(struct file *file, struct poll_table_struct * wait)
1415 {
1416     unsigned int mask = 0;
1417     struct ffs_epfile *epfile = file->private_data;
1418     ENTER();
1419     poll_wait(file, &epfile->wait_que, wait);
1420     if (!kfifo_is_empty(&epfile->reqEventFifo)) {
1421         mask |= POLLIN;
1422     }
1423     return mask;
1424 }
1425 
1426 #ifdef CONFIG_COMPAT
ffs_epfile_compat_ioctl(struct file * file,unsigned code,unsigned long value)1427 static long ffs_epfile_compat_ioctl(struct file *file, unsigned code,
1428         unsigned long value)
1429 {
1430     return ffs_epfile_ioctl(file, code, value);
1431 }
1432 #endif
1433 
1434 static const struct file_operations ffs_epfile_operations = {
1435     .owner   = THIS_MODULE,
1436     .llseek =    no_llseek,
1437     .mmap = ffs_epfile_mmap,
1438     .read    = ffs_epfile_read,
1439     .write   = ffs_epfile_write,
1440     .poll = ffs_epfile_poll,
1441     .open =        ffs_epfile_open,
1442     .release =    ffs_epfile_release,
1443     .unlocked_ioctl =    ffs_epfile_ioctl,
1444 #ifdef CONFIG_COMPAT
1445     .compat_ioctl = ffs_epfile_compat_ioctl,
1446 #endif
1447 };
1448 
1449 /* ffs_data and ffs_function construction and destruction code **************/
1450 static void ffs_data_clear(struct ffs_data *ffs);
1451 static void ffs_data_reset(struct ffs_data *ffs);
1452 static dev_t g_dev;
1453 #define MAX_EP_DEV 10
usbfn_ioctl(struct file * file,unsigned int cmd,unsigned long value)1454 static long usbfn_ioctl(struct file *file, unsigned int cmd, unsigned long value)
1455 {
1456     long ret;
1457     ENTER();
1458     switch(cmd)
1459     {
1460         case FUNCTIONFS_NEWFN:
1461         {
1462             struct ffs_dev *ffs_dev;
1463             struct ffs_data    *ffs;
1464             struct FuncNew newfn;
1465             char nameEp0[MAX_NAMELEN];
1466             ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew ));
1467             if (unlikely(ret)) {
1468                 return -EFAULT;
1469             }
1470             ffs = ffs_data_new(newfn.name);
1471             if (unlikely(!ffs)) {
1472                 return (-ENOMEM);
1473             }
1474 
1475             if (newfn.nameLen > MAX_NAMELEN) {
1476                 return -EPERM;
1477             }
1478             memcpy(ffs->dev_name, newfn.name, newfn.nameLen);
1479 
1480             if (unlikely(!ffs->dev_name)) {
1481                 ffs_data_put(ffs);
1482                 return (-ENOMEM);
1483             }
1484 
1485             if (sprintf(nameEp0, "%s.ep%u", ffs->dev_name, 0) < 0) {
1486                 ffs_data_put(ffs);
1487                 return -EFAULT;
1488             }
1489             ffs_dev = ffs_acquire_dev(newfn.name);
1490             if (IS_ERR(ffs_dev)) {
1491                 ffs_data_put(ffs);
1492                 return (-ENODEV);
1493             }
1494             ffs->private_data = ffs_dev;
1495 
1496             ret = alloc_chrdev_region(&g_dev, 0, MAX_EP_DEV, nameEp0);
1497             if (ret < 0) {
1498                 ffs_release_dev(ffs);
1499                 ffs_data_put(ffs);
1500                 return -EBUSY;
1501             }
1502             cdev_init(&ffs->cdev, &ffs_ep0_operations);
1503             ffs->devno = MKDEV(MAJOR(g_dev), 0);
1504             ret = cdev_add(&ffs->cdev, ffs->devno, 1);
1505             if (ret) {
1506                 ffs_release_dev(ffs);
1507                 ffs_data_put(ffs);
1508                 return -EBUSY;
1509             }
1510 
1511             ffs->fn_device = device_create(ffs_class, NULL, ffs->devno, NULL, nameEp0);
1512             if (IS_ERR(ffs->fn_device)) {
1513                 cdev_del(&ffs->cdev);
1514                 ffs_release_dev(ffs);
1515                 ffs_data_put(ffs);
1516                 return -EBUSY;
1517             }
1518             return 0;
1519         }
1520         case FUNCTIONFS_DELFN:
1521         {
1522             struct FuncNew newfn;
1523             struct ffs_data    *ffs;
1524             struct ffs_dev *ffs_dev;
1525             ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew ));
1526             if (unlikely(ret)) {
1527                 return -EFAULT;
1528             }
1529 
1530             ffs_dev = _ffs_find_dev(newfn.name);
1531             if (IS_ERR(ffs_dev)) {
1532                 return -EFAULT;
1533             }
1534             ffs = ffs_dev->ffs_data;
1535             device_destroy(ffs_class, ffs->devno);
1536             cdev_del(&ffs->cdev);
1537             unregister_chrdev_region(g_dev, MAX_EP_DEV);
1538             ffs_release_dev(ffs);
1539             ffs_data_clear(ffs);
1540             destroy_workqueue(ffs->io_completion_wq);
1541             kfree(ffs);
1542             return 0;
1543         }
1544         default:
1545             ret = -ENOTTY;
1546         }
1547 
1548     return ret;
1549 }
1550 
usbfn_open(struct inode * inode,struct file * file)1551 static int usbfn_open(struct inode *inode, struct file *file)
1552 {
1553     return 0;
1554 }
1555 
usbfn_release(struct inode * inode,struct file * file)1556 static int usbfn_release(struct inode *inode, struct file *file)
1557 {
1558     return 0;
1559 }
1560 
1561 static struct file_operations usbfn_fops = {
1562     .owner   = THIS_MODULE,
1563     .unlocked_ioctl   = usbfn_ioctl,
1564     .open    = usbfn_open,
1565     .release = usbfn_release,
1566 #ifdef CONFIG_COMPAT
1567     .compat_ioctl = usbfn_ioctl,
1568 #endif
1569 };
1570 
1571 static struct miscdevice usbfn_misc = {
1572     .minor = MISC_DYNAMIC_MINOR,
1573     .name = "usbfn",
1574     .fops = &usbfn_fops,
1575 };
1576 
1577 /* Driver's main init/cleanup functions *************************************/
functionfs_init(void)1578 static int functionfs_init(void)
1579 {
1580     int ret;
1581 
1582     ENTER();
1583     ret = misc_register(&usbfn_misc);
1584     if (likely(!ret))
1585         pr_info("file system registered\n");
1586     else
1587         pr_err("failed registering file system (%d)\n", ret);
1588 
1589     ffs_class = class_create(THIS_MODULE, "functionfs");
1590     if (IS_ERR(ffs_class))
1591         return PTR_ERR(ffs_class);
1592 
1593     ffs_class->devnode = ffs_devnode;
1594 
1595     return ret;
1596 }
1597 
functionfs_cleanup(void)1598 static void functionfs_cleanup(void)
1599 {
1600     ENTER();
1601     class_destroy(ffs_class);
1602     misc_deregister(&usbfn_misc);
1603 }
1604 
ffs_data_get(struct ffs_data * ffs)1605 static void ffs_data_get(struct ffs_data *ffs)
1606 {
1607     ENTER();
1608     refcount_inc(&ffs->ref);
1609 }
1610 
ffs_data_put(struct ffs_data * ffs)1611 static void ffs_data_put(struct ffs_data *ffs)
1612 {
1613     ENTER();
1614     if (unlikely(refcount_dec_and_test(&ffs->ref))) {
1615         pr_info("%s(): freeing\n", __func__);
1616         ffs_data_clear(ffs);
1617         BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
1619             swait_active(&ffs->ep0req_completion.wait) ||
1620 #else
1621             waitqueue_active(&ffs->ep0req_completion.wait) ||
1622 #endif
1623                waitqueue_active(&ffs->wait) ||
1624                waitqueue_active(&ffs->wait_que));
1625         destroy_workqueue(ffs->io_completion_wq);
1626         kfree(ffs);
1627     }
1628 }
1629 
ffs_data_new(const char * dev_name)1630 static struct ffs_data *ffs_data_new(const char *dev_name)
1631 {
1632     struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1633     if (unlikely(!ffs))
1634         return NULL;
1635 
1636     ENTER();
1637 
1638     ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
1639     if (!ffs->io_completion_wq) {
1640         kfree(ffs);
1641         return NULL;
1642     }
1643 
1644     refcount_set(&ffs->ref, 1);
1645     atomic_set(&ffs->opened, 0);
1646     ffs->state = FFS_READ_DESCRIPTORS;
1647     mutex_init(&ffs->mutex);
1648     spin_lock_init(&ffs->eps_lock);
1649     spin_lock_init(&ffs->mem_lock);
1650     init_waitqueue_head(&ffs->ev.waitq);
1651     init_waitqueue_head(&ffs->wait);
1652     init_waitqueue_head(&ffs->wait_que);
1653     init_completion(&ffs->ep0req_completion);
1654     INIT_LIST_HEAD(&ffs->memory_list);
1655     ffs->ev.can_stall = 1;
1656 
1657     return ffs;
1658 }
1659 
ffs_data_clear(struct ffs_data * ffs)1660 static void ffs_data_clear(struct ffs_data *ffs)
1661 {
1662     ENTER();
1663 
1664     ffs_closed(ffs);
1665 
1666     BUG_ON(ffs->gadget);
1667 
1668     if (ffs->epfiles)
1669         ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1670 
1671     if (ffs->ffs_eventfd)
1672         eventfd_ctx_put(ffs->ffs_eventfd);
1673 
1674     kfree(ffs->raw_descs_data);
1675     kfree(ffs->raw_strings);
1676     kfree(ffs->stringtabs);
1677 }
1678 
ffs_data_reset(struct ffs_data * ffs)1679 static void ffs_data_reset(struct ffs_data *ffs)
1680 {
1681     ENTER();
1682 
1683     ffs_data_clear(ffs);
1684 
1685     ffs->epfiles = NULL;
1686     ffs->raw_descs_data = NULL;
1687     ffs->raw_descs = NULL;
1688     ffs->raw_strings = NULL;
1689     ffs->stringtabs = NULL;
1690 
1691     ffs->raw_descs_length = 0;
1692     ffs->fs_descs_count = 0;
1693     ffs->hs_descs_count = 0;
1694     ffs->ss_descs_count = 0;
1695 
1696     ffs->strings_count = 0;
1697     ffs->interfaces_count = 0;
1698     ffs->eps_count = 0;
1699 
1700     ffs->ev.count = 0;
1701 
1702     ffs->state = FFS_READ_DESCRIPTORS;
1703     ffs->setup_state = FFS_NO_SETUP;
1704     ffs->flags = 0;
1705 }
1706 
functionfs_bind(struct ffs_data * ffs,struct usb_composite_dev * cdev)1707 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1708 {
1709     struct usb_gadget_strings **lang;
1710     int first_id;
1711 
1712     ENTER();
1713 
1714     if (WARN_ON(ffs->state != FFS_ACTIVE
1715          || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1716         return -EBADFD;
1717 
1718     first_id = usb_string_ids_n(cdev, ffs->strings_count);
1719     if (unlikely(first_id < 0))
1720         return first_id;
1721 
1722     ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1723     if (unlikely(!ffs->ep0req))
1724         return -ENOMEM;
1725     ffs->ep0req->complete = ffs_ep0_complete;
1726     ffs->ep0req->context = ffs;
1727     INIT_LIST_HEAD(&ffs->ep0req->list);
1728 
1729     lang = ffs->stringtabs;
1730     if (lang) {
1731         for (; *lang; ++lang) {
1732             struct usb_string *str = (*lang)->strings;
1733             int id = first_id;
1734             for (; str->s; ++id, ++str)
1735                 str->id = id;
1736         }
1737     }
1738 
1739     ffs->gadget = cdev->gadget;
1740     ffs->speed = cdev->gadget->speed;
1741     ffs_data_get(ffs);
1742     return 0;
1743 }
1744 
functionfs_unbind(struct ffs_data * ffs)1745 static void functionfs_unbind(struct ffs_data *ffs)
1746 {
1747     ENTER();
1748 
1749     if (!WARN_ON(!ffs->gadget)) {
1750         usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1751         ffs->ep0req = NULL;
1752         ffs->gadget = NULL;
1753         clear_bit(FFS_FL_BOUND, &ffs->flags);
1754         ffs_data_put(ffs);
1755     }
1756 }
1757 
ffs_epfiles_create(struct ffs_data * ffs)1758 static int ffs_epfiles_create(struct ffs_data *ffs)
1759 {
1760     struct ffs_epfile *epfile = NULL, *epfiles = NULL;
1761     unsigned int i, count ,ret;
1762 
1763     ENTER();
1764 
1765     count = ffs->eps_count;
1766     epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1767     if (!epfiles)
1768         return -ENOMEM;
1769 
1770     epfile = epfiles;
1771     for (i = 1; i <= count; ++i, ++epfile) {
1772         epfile->ffs = ffs;
1773         mutex_init(&epfile->mutex);
1774         INIT_LIST_HEAD(&epfile->memory_list);
1775         init_waitqueue_head(&epfile->wait_que);
1776         if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) {
1777             if (sprintf(epfile->name, "%s.ep%02x", ffs->dev_name, ffs->eps_addrmap[i]) < 0) {
1778                 return -EFAULT;
1779             }
1780         } else {
1781             if (sprintf(epfile->name, "%s.ep%u", ffs->dev_name, i) < 0) {
1782                 return -EFAULT;
1783             }
1784         }
1785 
1786         cdev_init(&epfile->cdev, &ffs_epfile_operations);
1787         epfile->devno=MKDEV(MAJOR(ffs->devno), i);
1788         ret = cdev_add(&epfile->cdev, epfile->devno, 1);
1789         if (ret)
1790         {
1791             ffs_epfiles_destroy(epfiles, i - 1);
1792             return -EBUSY;
1793         }
1794 
1795         epfile->device = device_create(ffs_class, NULL, epfile->devno, NULL, epfile->name);
1796         if (IS_ERR(epfile->device))
1797         {
1798             cdev_del(&epfile->cdev);
1799             ffs_epfiles_destroy(epfiles, i - 1);
1800             return -EBUSY;
1801         }
1802     }
1803 
1804     ffs->epfiles = epfiles;
1805     return 0;
1806 }
1807 
ffs_epfiles_destroy(struct ffs_epfile * epfiles,unsigned count)1808 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1809 {
1810     struct ffs_epfile *epfile = epfiles;
1811 
1812     ENTER();
1813 
1814     for (; count; --count, ++epfile) {
1815         BUG_ON(mutex_is_locked(&epfile->mutex));
1816         device_destroy(ffs_class, epfile->devno);
1817         cdev_del(&epfile->cdev);
1818     }
1819 
1820     kfree(epfiles);
1821 }
1822 
ffs_func_eps_disable(struct ffs_function * func)1823 static void ffs_func_eps_disable(struct ffs_function *func)
1824 {
1825     struct ffs_ep *ep         = func->eps;
1826     struct ffs_epfile *epfile = func->ffs->epfiles;
1827     unsigned count            = func->ffs->eps_count;
1828     unsigned long flags;
1829 
1830     spin_lock_irqsave(&func->ffs->eps_lock, flags);
1831     while (count--) {
1832         /* pending requests get nuked */
1833         if (likely(ep->ep))
1834             usb_ep_disable(ep->ep);
1835         ++ep;
1836 
1837         if (epfile) {
1838             epfile->ep = NULL;
1839             ++epfile;
1840         }
1841     }
1842     spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1843 }
1844 
ffs_func_eps_enable(struct ffs_function * func)1845 static int ffs_func_eps_enable(struct ffs_function *func)
1846 {
1847     struct ffs_data *ffs      = func->ffs;
1848     struct ffs_ep *ep         = func->eps;
1849     struct ffs_epfile *epfile = ffs->epfiles;
1850     unsigned count            = ffs->eps_count;
1851     unsigned long flags;
1852     int ret = 0;
1853 
1854     spin_lock_irqsave(&func->ffs->eps_lock, flags);
1855     while(count--) {
1856         ep->ep->driver_data = ep;
1857 
1858         ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
1859         if (ret) {
1860             pr_err("%s: config_ep_by_speed(%s) returned %d\n",
1861                     __func__, ep->ep->name, ret);
1862             break;
1863         }
1864 
1865         ret = usb_ep_enable(ep->ep);
1866         if (likely(!ret)) {
1867             epfile->ep = ep;
1868             epfile->in = usb_endpoint_dir_in(ep->ep->desc);
1869             epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
1870         } else {
1871             break;
1872         }
1873 
1874         ++ep;
1875         ++epfile;
1876     }
1877 
1878     wake_up_interruptible(&ffs->wait);
1879     spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1880 
1881     return ret;
1882 }
1883 
1884 /* Parsing and building descriptors and strings *****************************/
1885 
1886 /*
1887  * This validates if data pointed by data is a valid USB descriptor as
1888  * well as record how many interfaces, endpoints and strings are
1889  * required by given configuration.  Returns address after the
1890  * descriptor or NULL if data is invalid.
1891  */
1892 enum ffs_entity_type {
1893     FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1894 };
1895 
1896 enum ffs_os_desc_type {
1897     FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1898 };
1899 
1900 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, u8 *valuep,
1901                 struct usb_descriptor_header *desc,
1902                 void *priv);
1903 
1904 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1905                 struct usb_os_desc_header *h, void *data,
1906                 unsigned len, void *priv);
1907 
ffs_do_single_desc(char * data,unsigned len,ffs_entity_callback entity,void * priv)1908 static int __must_check ffs_do_single_desc(char *data, unsigned len,
1909                 ffs_entity_callback entity,
1910                 void *priv)
1911 {
1912     struct usb_descriptor_header *_ds = (void *)data;
1913     u8 length;
1914     int ret;
1915 
1916     ENTER();
1917 
1918     /* At least two bytes are required: length and type */
1919     if (len < 2) {
1920         pr_vdebug("descriptor too short\n");
1921         return -EINVAL;
1922     }
1923 
1924     /* If we have at least as many bytes as the descriptor takes? */
1925     length = _ds->bLength;
1926     if (len < length) {
1927         pr_vdebug("descriptor longer then available data\n");
1928         return -EINVAL;
1929     }
1930 
1931 #define __entity_check_INTERFACE(val)  1
1932 #define __entity_check_STRING(val)     (val)
1933 #define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
1934 #define __entity(type, val) do {                    \
1935         pr_vdebug("entity " #type "(%02x)\n", (val));        \
1936         if (unlikely(!__entity_check_ ##type(val))) {        \
1937             pr_vdebug("invalid entity's value\n");        \
1938             return -EINVAL;                    \
1939         }                            \
1940         ret = entity(FFS_ ##type, &val, _ds, priv);        \
1941         if (unlikely(ret < 0)) {                \
1942             pr_debug("entity " #type "(%02x); ret = %d\n",    \
1943                  (val), ret);                \
1944             return ret;                    \
1945         }                            \
1946     } while (0)
1947 
1948     /* Parse descriptor depending on type. */
1949     switch (_ds->bDescriptorType) {
1950     case USB_DT_DEVICE:
1951     case USB_DT_CONFIG:
1952     case USB_DT_STRING:
1953     case USB_DT_DEVICE_QUALIFIER:
1954         /* function can't have any of those */
1955         pr_vdebug("descriptor reserved for gadget: %d\n",
1956               _ds->bDescriptorType);
1957         return -EINVAL;
1958 
1959     case USB_DT_INTERFACE: {
1960         struct usb_interface_descriptor *ds = (void *)_ds;
1961         pr_vdebug("interface descriptor\n");
1962         if (length != sizeof *ds)
1963             goto inv_length;
1964 
1965         __entity(INTERFACE, ds->bInterfaceNumber);
1966         if (ds->iInterface)
1967             __entity(STRING, ds->iInterface);
1968     }
1969         break;
1970 
1971     case USB_DT_ENDPOINT: {
1972         struct usb_endpoint_descriptor *ds = (void *)_ds;
1973         pr_vdebug("endpoint descriptor\n");
1974         if (length != USB_DT_ENDPOINT_SIZE &&
1975             length != USB_DT_ENDPOINT_AUDIO_SIZE)
1976             goto inv_length;
1977         __entity(ENDPOINT, ds->bEndpointAddress);
1978     }
1979         break;
1980 
1981     case HID_DT_HID:
1982         pr_vdebug("hid descriptor\n");
1983         if (length != sizeof(struct hid_descriptor))
1984             goto inv_length;
1985         break;
1986 
1987     case USB_DT_OTG:
1988         if (length != sizeof(struct usb_otg_descriptor))
1989             goto inv_length;
1990         break;
1991 
1992     case USB_DT_INTERFACE_ASSOCIATION: {
1993         struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1994         pr_vdebug("interface association descriptor\n");
1995         if (length != sizeof *ds)
1996             goto inv_length;
1997         if (ds->iFunction)
1998             __entity(STRING, ds->iFunction);
1999     }
2000         break;
2001 
2002     case USB_DT_SS_ENDPOINT_COMP:
2003         pr_vdebug("EP SS companion descriptor\n");
2004         if (length != sizeof(struct usb_ss_ep_comp_descriptor))
2005             goto inv_length;
2006         break;
2007 
2008     case USB_DT_OTHER_SPEED_CONFIG:
2009     case USB_DT_INTERFACE_POWER:
2010     case USB_DT_DEBUG:
2011     case USB_DT_SECURITY:
2012     case USB_DT_CS_RADIO_CONTROL:
2013         pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
2014         break;
2015     default:
2016         /* We should never be here */
2017         pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
2018         break;
2019 inv_length:
2020         pr_vdebug("invalid length: %d (descriptor %d)\n",
2021               _ds->bLength, _ds->bDescriptorType);
2022         return -EINVAL;
2023     }
2024 
2025 #undef __entity
2026 #undef __entity_check_DESCRIPTOR
2027 #undef __entity_check_INTERFACE
2028 #undef __entity_check_STRING
2029 #undef __entity_check_ENDPOINT
2030 
2031     return length;
2032 }
2033 
ffs_do_descs(unsigned count,char * data,unsigned len,ffs_entity_callback entity,void * priv)2034 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
2035                 ffs_entity_callback entity, void *priv)
2036 {
2037     const unsigned _len = len;
2038     uintptr_t num = 0;
2039 
2040     ENTER();
2041 
2042     for (;;) {
2043         int ret;
2044 
2045         if (num == count)
2046             data = NULL;
2047 
2048         /* Record "descriptor" entity */
2049         ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
2050         if (unlikely(ret < 0)) {
2051             pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
2052                  num, ret);
2053             return ret;
2054         }
2055 
2056         if (!data)
2057             return _len - len;
2058 
2059         ret = ffs_do_single_desc(data, len, entity, priv);
2060         if (unlikely(ret < 0)) {
2061             pr_debug("%s returns %d\n", __func__, ret);
2062             return ret;
2063         }
2064 
2065         len -= ret;
2066         data += ret;
2067         ++num;
2068     }
2069 }
2070 
__ffs_data_do_entity(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)2071 static int __ffs_data_do_entity(enum ffs_entity_type type,
2072                 u8 *valuep, struct usb_descriptor_header *desc,
2073                 void *priv)
2074 {
2075     struct ffs_desc_helper *helper = priv;
2076     struct usb_endpoint_descriptor *d = NULL;
2077 
2078     ENTER();
2079 
2080     switch (type) {
2081     case FFS_DESCRIPTOR:
2082         break;
2083 
2084     case FFS_INTERFACE:
2085         /*
2086          * Interfaces are indexed from zero so if we
2087          * encountered interface "n" then there are at least
2088          * "n+1" interfaces.
2089          */
2090         if (*valuep >= helper->interfaces_count)
2091             helper->interfaces_count = *valuep + 1;
2092         break;
2093 
2094     case FFS_STRING:
2095         /*
2096          * Strings are indexed from 1 (0 is reserved
2097          * for languages list)
2098          */
2099         if (*valuep > helper->ffs->strings_count)
2100             helper->ffs->strings_count = *valuep;
2101         break;
2102 
2103     case FFS_ENDPOINT:
2104         d = (void *)desc;
2105         helper->eps_count++;
2106         if (helper->eps_count >= FFS_MAX_EPS_COUNT)
2107             return -EINVAL;
2108         /* Check if descriptors for any speed were already parsed */
2109         if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
2110             helper->ffs->eps_addrmap[helper->eps_count] =
2111                 d->bEndpointAddress;
2112         else if (helper->ffs->eps_addrmap[helper->eps_count] !=
2113                 d->bEndpointAddress)
2114             return -EINVAL;
2115         break;
2116     }
2117 
2118     return 0;
2119 }
2120 
__ffs_do_os_desc_header(enum ffs_os_desc_type * next_type,struct usb_os_desc_header * desc)2121 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
2122                 struct usb_os_desc_header *desc)
2123 {
2124     u16 bcd_version = le16_to_cpu(desc->bcdVersion);
2125     u16 w_index = le16_to_cpu(desc->wIndex);
2126 
2127     if (bcd_version != 1) {
2128         pr_vdebug("unsupported os descriptors version: %d",
2129               bcd_version);
2130         return -EINVAL;
2131     }
2132     switch (w_index) {
2133     case 0x4:
2134         *next_type = FFS_OS_DESC_EXT_COMPAT;
2135         break;
2136     case 0x5:
2137         *next_type = FFS_OS_DESC_EXT_PROP;
2138         break;
2139     default:
2140         pr_vdebug("unsupported os descriptor type: %d", w_index);
2141         return -EINVAL;
2142     }
2143 
2144     return sizeof(*desc);
2145 }
2146 
2147 /*
2148  * Process all extended compatibility/extended property descriptors
2149  * of a feature descriptor
2150  */
ffs_do_single_os_desc(char * data,unsigned len,enum ffs_os_desc_type type,u16 feature_count,ffs_os_desc_callback entity,void * priv,struct usb_os_desc_header * h)2151 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2152                 enum ffs_os_desc_type type,
2153                 u16 feature_count,
2154                 ffs_os_desc_callback entity,
2155                 void *priv,
2156                 struct usb_os_desc_header *h)
2157 {
2158     int ret;
2159     const unsigned _len = len;
2160 
2161     ENTER();
2162 
2163     /* loop over all ext compat/ext prop descriptors */
2164     while (feature_count--) {
2165         ret = entity(type, h, data, len, priv);
2166         if (unlikely(ret < 0)) {
2167             pr_debug("bad OS descriptor, type: %d\n", type);
2168             return ret;
2169         }
2170         data += ret;
2171         len -= ret;
2172     }
2173     return _len - len;
2174 }
2175 
2176 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
ffs_do_os_descs(unsigned count,char * data,unsigned len,ffs_os_desc_callback entity,void * priv)2177 static int __must_check ffs_do_os_descs(unsigned count,
2178                 char *data, unsigned len,
2179                 ffs_os_desc_callback entity, void *priv)
2180 {
2181     const unsigned _len = len;
2182     unsigned long num = 0;
2183 
2184     ENTER();
2185 
2186     for (num = 0; num < count; ++num) {
2187         int ret;
2188         enum ffs_os_desc_type type;
2189         u16 feature_count;
2190         struct usb_os_desc_header *desc = (void *)data;
2191 
2192         if (len < sizeof(*desc))
2193             return -EINVAL;
2194 
2195         /*
2196          * Record "descriptor" entity.
2197          * Process dwLength, bcdVersion, wIndex, get b/wCount.
2198          * Move the data pointer to the beginning of extended
2199          * compatibilities proper or extended properties proper
2200          * portions of the data
2201          */
2202         if (le32_to_cpu(desc->dwLength) > len)
2203             return -EINVAL;
2204 
2205         ret = __ffs_do_os_desc_header(&type, desc);
2206         if (unlikely(ret < 0)) {
2207             pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2208                  num, ret);
2209             return ret;
2210         }
2211         /*
2212          * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2213          */
2214         feature_count = le16_to_cpu(desc->wCount);
2215         if (type == FFS_OS_DESC_EXT_COMPAT &&
2216             (feature_count > 255 || desc->Reserved))
2217                 return -EINVAL;
2218         len -= ret;
2219         data += ret;
2220 
2221         /*
2222          * Process all function/property descriptors
2223          * of this Feature Descriptor
2224          */
2225         ret = ffs_do_single_os_desc(data, len, type,
2226                         feature_count, entity, priv, desc);
2227         if (unlikely(ret < 0)) {
2228             pr_debug("%s returns %d\n", __func__, ret);
2229             return ret;
2230         }
2231 
2232         len -= ret;
2233         data += ret;
2234     }
2235     return _len - len;
2236 }
2237 
2238 /**
2239  * Validate contents of the buffer from userspace related to OS descriptors.
2240  */
__ffs_data_do_os_desc(enum ffs_os_desc_type type,struct usb_os_desc_header * h,void * data,unsigned len,void * priv)2241 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2242                  struct usb_os_desc_header *h, void *data,
2243                  unsigned len, void *priv)
2244 {
2245     struct ffs_data *ffs = priv;
2246     u8 length;
2247 
2248     ENTER();
2249 
2250     switch (type) {
2251     case FFS_OS_DESC_EXT_COMPAT: {
2252         struct usb_ext_compat_desc *d = data;
2253         int i;
2254 
2255         if (len < sizeof(*d) ||
2256             d->bFirstInterfaceNumber >= ffs->interfaces_count)
2257             return -EINVAL;
2258         if (d->Reserved1 != 1) {
2259             /*
2260              * According to the spec, Reserved1 must be set to 1
2261              * but older kernels incorrectly rejected non-zero
2262              * values.  We fix it here to avoid returning EINVAL
2263              * in response to values we used to accept.
2264              */
2265             pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2266             d->Reserved1 = 1;
2267         }
2268         for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2269             if (d->Reserved2[i])
2270                 return -EINVAL;
2271 
2272         length = sizeof(struct usb_ext_compat_desc);
2273     }
2274         break;
2275     case FFS_OS_DESC_EXT_PROP: {
2276         struct usb_ext_prop_desc *d = data;
2277         u32 type, pdl;
2278         u16 pnl;
2279 
2280         if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2281             return -EINVAL;
2282         length = le32_to_cpu(d->dwSize);
2283         if (len < length)
2284             return -EINVAL;
2285         type = le32_to_cpu(d->dwPropertyDataType);
2286         if (type < USB_EXT_PROP_UNICODE ||
2287             type > USB_EXT_PROP_UNICODE_MULTI) {
2288             pr_vdebug("unsupported os descriptor property type: %d",
2289                   type);
2290             return -EINVAL;
2291         }
2292         pnl = le16_to_cpu(d->wPropertyNameLength);
2293         if (length < 14 + pnl) {
2294             pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2295                   length, pnl, type);
2296             return -EINVAL;
2297         }
2298         pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
2299         if (length != 14 + pnl + pdl) {
2300             pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2301                   length, pnl, pdl, type);
2302             return -EINVAL;
2303         }
2304         ++ffs->ms_os_descs_ext_prop_count;
2305         /* property name reported to the host as "WCHAR"s */
2306         ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2307         ffs->ms_os_descs_ext_prop_data_len += pdl;
2308     }
2309         break;
2310     default:
2311         pr_vdebug("unknown descriptor: %d\n", type);
2312         return -EINVAL;
2313     }
2314     return length;
2315 }
2316 
__ffs_data_got_descs(struct ffs_data * ffs,char * const _data,size_t len)2317 static int __ffs_data_got_descs(struct ffs_data *ffs,
2318                 char *const _data, size_t len)
2319 {
2320     char *data = _data, *raw_descs = NULL;
2321     unsigned os_descs_count = 0, counts[3], flags;
2322     int ret = -EINVAL, i;
2323     struct ffs_desc_helper helper;
2324 
2325     ENTER();
2326 
2327     if (get_unaligned_le32(data + 4) != len)
2328         goto error;
2329 
2330     switch (get_unaligned_le32(data)) {
2331     case FUNCTIONFS_DESCRIPTORS_MAGIC:
2332         flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2333         data += 8;
2334         len  -= 8;
2335         break;
2336     case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2337         flags = get_unaligned_le32(data + 8);
2338         ffs->user_flags = flags;
2339         if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2340                   FUNCTIONFS_HAS_HS_DESC |
2341                   FUNCTIONFS_HAS_SS_DESC |
2342                   FUNCTIONFS_HAS_MS_OS_DESC |
2343                   FUNCTIONFS_VIRTUAL_ADDR |
2344                   FUNCTIONFS_EVENTFD |
2345                   FUNCTIONFS_ALL_CTRL_RECIP |
2346                   FUNCTIONFS_CONFIG0_SETUP)) {
2347             ret = -ENOSYS;
2348             goto error;
2349         }
2350         data += 12;
2351         len  -= 12;
2352         break;
2353     default:
2354         goto error;
2355     }
2356 
2357     if (flags & FUNCTIONFS_EVENTFD) {
2358         if (len < 4)
2359             goto error;
2360         ffs->ffs_eventfd =
2361             eventfd_ctx_fdget((int)get_unaligned_le32(data));
2362         if (IS_ERR(ffs->ffs_eventfd)) {
2363             ret = PTR_ERR(ffs->ffs_eventfd);
2364             ffs->ffs_eventfd = NULL;
2365             goto error;
2366         }
2367         data += 4;
2368         len  -= 4;
2369     }
2370 
2371     /* Read fs_count, hs_count and ss_count (if present) */
2372     for (i = 0; i < 3; ++i) {
2373         if (!(flags & (1 << i))) {
2374             counts[i] = 0;
2375         } else if (len < 4) {
2376             goto error;
2377         } else {
2378             counts[i] = get_unaligned_le32(data);
2379             data += 4;
2380             len  -= 4;
2381         }
2382     }
2383     if (flags & (1 << i)) {
2384         if (len < 4) {
2385             goto error;
2386         }
2387         os_descs_count = get_unaligned_le32(data);
2388         data += 4;
2389         len -= 4;
2390     }
2391 
2392     /* Read descriptors */
2393     raw_descs = data;
2394     helper.ffs = ffs;
2395     for (i = 0; i < 3; ++i) {
2396         if (!counts[i])
2397             continue;
2398         helper.interfaces_count = 0;
2399         helper.eps_count = 0;
2400         ret = ffs_do_descs(counts[i], data, len,
2401                    __ffs_data_do_entity, &helper);
2402         if (ret < 0)
2403             goto error;
2404         if (!ffs->eps_count && !ffs->interfaces_count) {
2405             ffs->eps_count = helper.eps_count;
2406             ffs->interfaces_count = helper.interfaces_count;
2407         } else {
2408             if (ffs->eps_count != helper.eps_count) {
2409                 ret = -EINVAL;
2410                 goto error;
2411             }
2412             if (ffs->interfaces_count != helper.interfaces_count) {
2413                 ret = -EINVAL;
2414                 goto error;
2415             }
2416         }
2417         data += ret;
2418         len  -= ret;
2419     }
2420     if (os_descs_count) {
2421         ret = ffs_do_os_descs(os_descs_count, data, len,
2422                       __ffs_data_do_os_desc, ffs);
2423         if (ret < 0)
2424             goto error;
2425         data += ret;
2426         len -= ret;
2427     }
2428 
2429     if (raw_descs == data || len) {
2430         ret = -EINVAL;
2431         goto error;
2432     }
2433 
2434     ffs->raw_descs_data    = _data;
2435     ffs->raw_descs        = raw_descs;
2436     ffs->raw_descs_length    = data - raw_descs;
2437     ffs->fs_descs_count    = counts[0];
2438     ffs->hs_descs_count    = counts[1];
2439     ffs->ss_descs_count    = counts[2];
2440     ffs->ms_os_descs_count    = os_descs_count;
2441 
2442     return 0;
2443 
2444 error:
2445     kfree(_data);
2446     return ret;
2447 }
2448 
__ffs_data_got_strings(struct ffs_data * ffs,char * const _data,size_t len)2449 static int __ffs_data_got_strings(struct ffs_data *ffs,
2450                 char *const _data, size_t len)
2451 {
2452     u32 str_count, needed_count, lang_count;
2453     struct usb_gadget_strings **stringtabs = NULL, *t = NULL;
2454     const char *data = _data;
2455     struct usb_string *s = NULL;
2456 
2457     ENTER();
2458 
2459     if (unlikely(len < 16 ||
2460              get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2461              get_unaligned_le32(data + 4) != len))
2462         goto error;
2463     str_count  = get_unaligned_le32(data + 8);
2464     lang_count = get_unaligned_le32(data + 12);
2465 
2466     /* if one is zero the other must be zero */
2467     if (unlikely(!str_count != !lang_count))
2468         goto error;
2469 
2470     /* Do we have at least as many strings as descriptors need? */
2471     needed_count = ffs->strings_count;
2472     if (unlikely(str_count < needed_count))
2473         goto error;
2474 
2475     /*
2476      * If we don't need any strings just return and free all
2477      * memory.
2478      */
2479     if (!needed_count) {
2480         kfree(_data);
2481         return 0;
2482     }
2483 
2484     /* Allocate everything in one chunk so there's less maintenance. */
2485     {
2486         unsigned i = 0;
2487         vla_group(d);
2488         vla_item(d, struct usb_gadget_strings *, stringtabs,
2489             lang_count + 1);
2490         vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2491         vla_item(d, struct usb_string, strings,
2492             lang_count*(needed_count+1));
2493 
2494         char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2495 
2496         if (unlikely(!vlabuf)) {
2497             kfree(_data);
2498             return -ENOMEM;
2499         }
2500 
2501         /* Initialize the VLA pointers */
2502         stringtabs = vla_ptr(vlabuf, d, stringtabs);
2503         t = vla_ptr(vlabuf, d, stringtab);
2504         i = lang_count;
2505         do {
2506             *stringtabs++ = t++;
2507         } while (--i);
2508         *stringtabs = NULL;
2509 
2510         /* stringtabs = vlabuf = d_stringtabs for later kfree */
2511         stringtabs = vla_ptr(vlabuf, d, stringtabs);
2512         t = vla_ptr(vlabuf, d, stringtab);
2513         s = vla_ptr(vlabuf, d, strings);
2514     }
2515 
2516     /* For each language */
2517     data += 16;
2518     len -= 16;
2519 
2520     do { /* lang_count > 0 so we can use do-while */
2521         unsigned needed = needed_count;
2522 
2523         if (unlikely(len < 3))
2524             goto error_free;
2525         t->language = get_unaligned_le16(data);
2526         t->strings  = s;
2527         ++t;
2528 
2529         data += 2;
2530         len -= 2;
2531 
2532         /* For each string */
2533         do { /* str_count > 0 so we can use do-while */
2534             size_t length = strnlen(data, len);
2535 
2536             if (unlikely(length == len))
2537                 goto error_free;
2538 
2539             /*
2540              * User may provide more strings then we need,
2541              * if that's the case we simply ignore the
2542              * rest
2543              */
2544             if (likely(needed)) {
2545                 /*
2546                  * s->id will be set while adding
2547                  * function to configuration so for
2548                  * now just leave garbage here.
2549                  */
2550                 s->s = data;
2551                 --needed;
2552                 ++s;
2553             }
2554 
2555             data += length + 1;
2556             len -= length + 1;
2557         } while (--str_count);
2558 
2559         s->id = 0;   /* terminator */
2560         s->s = NULL;
2561         ++s;
2562 
2563     } while (--lang_count);
2564 
2565     /* Some garbage left? */
2566     if (unlikely(len))
2567         goto error_free;
2568 
2569     /* Done! */
2570     ffs->stringtabs = stringtabs;
2571     ffs->raw_strings = _data;
2572 
2573     return 0;
2574 
2575 error_free:
2576     kfree(stringtabs);
2577 error:
2578     kfree(_data);
2579     return -EINVAL;
2580 }
2581 
2582 /* Events handling and management *******************************************/
__ffs_event_add(struct ffs_data * ffs,enum usb_functionfs_event_type type)2583 static void __ffs_event_add(struct ffs_data *ffs,
2584                 enum usb_functionfs_event_type type)
2585 {
2586     enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2587     int neg = 0;
2588 
2589     /*
2590      * Abort any unhandled setup
2591      *
2592      * We do not need to worry about some cmpxchg() changing value
2593      * of ffs->setup_state without holding the lock because when
2594      * state is FFS_SETUP_PENDING cmpxchg() in several places in
2595      * the source does nothing.
2596      */
2597     if (ffs->setup_state == FFS_SETUP_PENDING)
2598         ffs->setup_state = FFS_SETUP_CANCELLED;
2599 
2600     /*
2601      * Logic of this function guarantees that there are at most four pending
2602      * evens on ffs->ev.types queue.  This is important because the queue
2603      * has space for four elements only and __ffs_ep0_read_events function
2604      * depends on that limit as well.  If more event types are added, those
2605      * limits have to be revisited or guaranteed to still hold.
2606      */
2607     switch (type) {
2608     case FUNCTIONFS_RESUME:
2609         rem_type2 = FUNCTIONFS_SUSPEND;
2610         /* FALL THROUGH */
2611     case FUNCTIONFS_SUSPEND:
2612     case FUNCTIONFS_SETUP:
2613         rem_type1 = type;
2614         /* Discard all similar events */
2615         break;
2616 
2617     case FUNCTIONFS_BIND:
2618     case FUNCTIONFS_UNBIND:
2619     case FUNCTIONFS_DISABLE:
2620     case FUNCTIONFS_ENABLE:
2621         /* Discard everything other then power management. */
2622         rem_type1 = FUNCTIONFS_SUSPEND;
2623         rem_type2 = FUNCTIONFS_RESUME;
2624         neg = 1;
2625         break;
2626 
2627     default:
2628         WARN(1, "%d: unknown event, this should not happen\n", type);
2629         return;
2630     }
2631 
2632     {
2633         u8 *ev  = ffs->ev.types, *out = ev;
2634         unsigned n = ffs->ev.count;
2635         for (; n; --n, ++ev)
2636             if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2637                 *out++ = *ev;
2638             else
2639                 pr_vdebug("purging event %d\n", *ev);
2640         ffs->ev.count = out - ffs->ev.types;
2641     }
2642 
2643     pr_vdebug("adding event %d\n", type);
2644     ffs->ev.types[ffs->ev.count++] = type;
2645     wake_up_locked(&ffs->ev.waitq);
2646     if (ffs->ffs_eventfd)
2647         eventfd_signal(ffs->ffs_eventfd, 1);
2648 }
2649 
ffs_event_add(struct ffs_data * ffs,enum usb_functionfs_event_type type)2650 static void ffs_event_add(struct ffs_data *ffs,
2651               enum usb_functionfs_event_type type)
2652 {
2653     unsigned long flags;
2654     spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2655     __ffs_event_add(ffs, type);
2656     spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2657 }
2658 
2659 /* Bind/unbind USB function hooks *******************************************/
2660 
ffs_ep_addr2idx(struct ffs_data * ffs,u8 endpoint_address)2661 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2662 {
2663     int i;
2664 
2665     for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2666         if (ffs->eps_addrmap[i] == endpoint_address)
2667             return i;
2668     return -ENOENT;
2669 }
2670 
__ffs_func_bind_do_descs(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)2671 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2672                 struct usb_descriptor_header *desc,
2673                 void *priv)
2674 {
2675     struct usb_endpoint_descriptor *ds = (void *)desc;
2676     struct ffs_function *func = priv;
2677     struct ffs_ep *ffs_ep = NULL;
2678     unsigned ep_desc_id;
2679     int idx;
2680     static const char *speed_names[] = { "full", "high", "super" };
2681 
2682     if (type != FFS_DESCRIPTOR)
2683         return 0;
2684 
2685     /*
2686      * If ss_descriptors is not NULL, we are reading super speed
2687      * descriptors; if hs_descriptors is not NULL, we are reading high
2688      * speed descriptors; otherwise, we are reading full speed
2689      * descriptors.
2690      */
2691     if (func->function.ss_descriptors) {
2692         ep_desc_id = 2;
2693         func->function.ss_descriptors[(uintptr_t)valuep] = desc;
2694     } else if (func->function.hs_descriptors) {
2695         ep_desc_id = 1;
2696         func->function.hs_descriptors[(uintptr_t)valuep] = desc;
2697     } else {
2698         ep_desc_id = 0;
2699         func->function.fs_descriptors[(uintptr_t)valuep]    = desc;
2700     }
2701 
2702     if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2703         return 0;
2704 
2705     idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2706     if (idx < 0)
2707         return idx;
2708 
2709     ffs_ep = func->eps + idx;
2710 
2711     if (unlikely(ffs_ep->descs[ep_desc_id])) {
2712         pr_err("two %sspeed descriptors for EP %d\n",
2713               speed_names[ep_desc_id],
2714               ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2715         return -EINVAL;
2716     }
2717     ffs_ep->descs[ep_desc_id] = ds;
2718 
2719     ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
2720     if (ffs_ep->ep) {
2721         ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2722         if (!ds->wMaxPacketSize)
2723             ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2724     } else {
2725         struct usb_request *req = NULL;
2726         struct usb_ep *ep = NULL;
2727         u8 bEndpointAddress;
2728 
2729         /*
2730          * We back up bEndpointAddress because autoconfig overwrites
2731          * it with physical endpoint address.
2732          */
2733         bEndpointAddress = ds->bEndpointAddress;
2734         pr_vdebug("autoconfig\n");
2735         ep = usb_ep_autoconfig(func->gadget, ds);
2736         if (unlikely(!ep))
2737             return -ENOTSUPP;
2738         ep->driver_data = func->eps + idx;
2739 
2740         req = usb_ep_alloc_request(ep, GFP_KERNEL);
2741         if (unlikely(!req))
2742             return -ENOMEM;
2743 
2744         ffs_ep->ep  = ep;
2745         ffs_ep->req = req;
2746             INIT_LIST_HEAD(&ffs_ep->req->list);
2747         func->eps_revmap[ds->bEndpointAddress &
2748                  USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2749         /*
2750          * If we use virtual address mapping, we restore
2751          * original bEndpointAddress value.
2752          */
2753         if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2754             ds->bEndpointAddress = bEndpointAddress;
2755     }
2756     ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2757 
2758     return 0;
2759 }
2760 
__ffs_func_bind_do_nums(enum ffs_entity_type type,u8 * valuep,struct usb_descriptor_header * desc,void * priv)2761 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2762                 struct usb_descriptor_header *desc,
2763                 void *priv)
2764 {
2765     struct ffs_function *func = priv;
2766     unsigned idx;
2767     u8 newValue;
2768 
2769     switch (type) {
2770     default:
2771     case FFS_DESCRIPTOR:
2772         /* Handled in previous pass by __ffs_func_bind_do_descs() */
2773         return 0;
2774 
2775     case FFS_INTERFACE:
2776         idx = *valuep;
2777         if (func->interfaces_nums[idx] < 0) {
2778             int id = usb_interface_id(func->conf, &func->function);
2779             if (unlikely(id < 0))
2780                 return id;
2781             func->interfaces_nums[idx] = id;
2782         }
2783         newValue = func->interfaces_nums[idx];
2784         break;
2785 
2786     case FFS_STRING:
2787         /* String' IDs are allocated when fsf_data is bound to cdev */
2788         newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2789         break;
2790 
2791     case FFS_ENDPOINT:
2792         /*
2793          * USB_DT_ENDPOINT are handled in
2794          * __ffs_func_bind_do_descs().
2795          */
2796         if (desc->bDescriptorType == USB_DT_ENDPOINT)
2797             return 0;
2798 
2799         idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2800         if (unlikely(!func->eps[idx].ep))
2801             return -EINVAL;
2802 
2803         {
2804             struct usb_endpoint_descriptor **descs;
2805             descs = func->eps[idx].descs;
2806             newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2807         }
2808         break;
2809     }
2810 
2811     pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2812     *valuep = newValue;
2813     return 0;
2814 }
2815 
__ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,struct usb_os_desc_header * h,void * data,unsigned len,void * priv)2816 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2817                 struct usb_os_desc_header *h, void *data,
2818                 unsigned len, void *priv)
2819 {
2820     struct ffs_function *func = priv;
2821     u8 length = 0;
2822 
2823     switch (type) {
2824     case FFS_OS_DESC_EXT_COMPAT: {
2825         struct usb_ext_compat_desc *desc = data;
2826         struct usb_os_desc_table *t;
2827 
2828         t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2829         t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2830         memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2831             ARRAY_SIZE(desc->CompatibleID) + ARRAY_SIZE(desc->SubCompatibleID));
2832         length = sizeof(*desc);
2833     }
2834         break;
2835     case FFS_OS_DESC_EXT_PROP: {
2836         struct usb_ext_prop_desc *desc = data;
2837         struct usb_os_desc_table *t;
2838         struct usb_os_desc_ext_prop *ext_prop;
2839         char *ext_prop_name;
2840         char *ext_prop_data;
2841 
2842         t = &func->function.os_desc_table[h->interface];
2843         t->if_id = func->interfaces_nums[h->interface];
2844 
2845         ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2846         func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2847 
2848         ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2849         ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2850         ext_prop->data_len = le32_to_cpu(*(__le32 *)
2851             usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2852         length = ext_prop->name_len + ext_prop->data_len + 14;
2853 
2854         ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2855         func->ffs->ms_os_descs_ext_prop_name_avail +=
2856             ext_prop->name_len;
2857 
2858         ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2859         func->ffs->ms_os_descs_ext_prop_data_avail +=
2860             ext_prop->data_len;
2861         memcpy(ext_prop_data, usb_ext_prop_data_ptr(data, ext_prop->name_len),
2862             ext_prop->data_len);
2863         /* unicode data reported to the host as "WCHAR"s */
2864         switch (ext_prop->type) {
2865         case USB_EXT_PROP_UNICODE:
2866         case USB_EXT_PROP_UNICODE_ENV:
2867         case USB_EXT_PROP_UNICODE_LINK:
2868         case USB_EXT_PROP_UNICODE_MULTI:
2869             ext_prop->data_len *= 2;
2870             break;
2871         }
2872         ext_prop->data = ext_prop_data;
2873 
2874         memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2875             ext_prop->name_len);
2876 		/* property name reported to the host as "WCHAR"s */
2877         ext_prop->name_len *= 2;
2878         ext_prop->name = ext_prop_name;
2879 
2880         t->os_desc->ext_prop_len +=
2881             ext_prop->name_len + ext_prop->data_len + 14;
2882         ++t->os_desc->ext_prop_count;
2883         list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2884     }
2885         break;
2886     default:
2887         pr_vdebug("unknown descriptor: %d\n", type);
2888     }
2889 
2890     return length;
2891 }
2892 
ffs_do_functionfs_bind(struct usb_function * f,struct usb_configuration * c)2893 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2894                 struct usb_configuration *c)
2895 {
2896     struct ffs_function *func = ffs_func_from_usb(f);
2897     struct f_fs_opts *ffs_opts =
2898         container_of(f->fi, struct f_fs_opts, func_inst);
2899     int ret;
2900 
2901     ENTER();
2902 
2903     /*
2904      * Legacy gadget triggers binding in functionfs_ready_callback,
2905      * which already uses locking; taking the same lock here would
2906      * cause a deadlock.
2907      *
2908      * Configfs-enabled gadgets however do need ffs_dev_lock.
2909      */
2910     if (!ffs_opts->no_configfs)
2911         ffs_dev_lock();
2912     ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2913     func->ffs = ffs_opts->dev->ffs_data;
2914     if (!ffs_opts->no_configfs)
2915         ffs_dev_unlock();
2916     if (ret)
2917         return ERR_PTR(ret);
2918 
2919     func->conf = c;
2920     func->gadget = c->cdev->gadget;
2921 
2922     /*
2923      * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2924      * configurations are bound in sequence with list_for_each_entry,
2925      * in each configuration its functions are bound in sequence
2926      * with list_for_each_entry, so we assume no race condition
2927      * with regard to ffs_opts->bound access
2928      */
2929     if (!ffs_opts->refcnt) {
2930         ret = functionfs_bind(func->ffs, c->cdev);
2931         if (ret)
2932             return ERR_PTR(ret);
2933     }
2934     ffs_opts->refcnt++;
2935     func->function.strings = func->ffs->stringtabs;
2936 
2937     return ffs_opts;
2938 }
2939 
_ffs_func_bind(struct usb_configuration * c,struct usb_function * f)2940 static int _ffs_func_bind(struct usb_configuration *c, struct usb_function *f)
2941 {
2942     struct ffs_function *func = ffs_func_from_usb(f);
2943     struct ffs_data *ffs = func->ffs;
2944 
2945     const int full = !!func->ffs->fs_descs_count;
2946     const int high = !!func->ffs->hs_descs_count;
2947     const int super = !!func->ffs->ss_descs_count;
2948 
2949     int fs_len, hs_len, ss_len, ret, i;
2950     struct ffs_ep *eps_ptr = NULL;
2951     struct usb_descriptor_header *des_head = NULL;
2952     struct usb_interface_descriptor *intf_ctl = NULL;
2953     struct usb_interface_descriptor *intf_data = NULL;
2954     /* Make it a single chunk, less management later on */
2955     vla_group(d);
2956     vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2957     vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2958         full ? ffs->fs_descs_count + 1 : 0);
2959     vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2960         high ? ffs->hs_descs_count + 1 : 0);
2961     vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2962         super ? ffs->ss_descs_count + 1 : 0);
2963     vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2964     vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2965              c->cdev->use_os_string ? ffs->interfaces_count : 0);
2966     vla_item_with_sz(d, char[16], ext_compat,
2967              c->cdev->use_os_string ? ffs->interfaces_count : 0);
2968     vla_item_with_sz(d, struct usb_os_desc, os_desc,
2969              c->cdev->use_os_string ? ffs->interfaces_count : 0);
2970     vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2971              ffs->ms_os_descs_ext_prop_count);
2972     vla_item_with_sz(d, char, ext_prop_name,
2973              ffs->ms_os_descs_ext_prop_name_len);
2974     vla_item_with_sz(d, char, ext_prop_data,
2975              ffs->ms_os_descs_ext_prop_data_len);
2976     vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2977     char *vlabuf = NULL;
2978 
2979     ENTER();
2980 
2981     /* Has descriptors only for speeds gadget does not support */
2982     if (unlikely(!(full | high | super)))
2983         return -ENOTSUPP;
2984 
2985     /* Allocate a single chunk, less management later on */
2986     vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2987     if (unlikely(!vlabuf))
2988         return -ENOMEM;
2989 
2990     ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2991     ffs->ms_os_descs_ext_prop_name_avail =
2992         vla_ptr(vlabuf, d, ext_prop_name);
2993     ffs->ms_os_descs_ext_prop_data_avail =
2994         vla_ptr(vlabuf, d, ext_prop_data);
2995 
2996     /* Copy descriptors  */
2997     memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs, ffs->raw_descs_length);
2998 
2999     memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
3000 
3001     eps_ptr = vla_ptr(vlabuf, d, eps);
3002     for (i = 0; i < ffs->eps_count; i++)
3003         eps_ptr[i].num = -1;
3004 
3005     /* Save pointers
3006      * d_eps == vlabuf, func->eps used to kfree vlabuf later
3007     */
3008     func->eps             = vla_ptr(vlabuf, d, eps);
3009     func->interfaces_nums = vla_ptr(vlabuf, d, inums);
3010 
3011     /*
3012      * Go through all the endpoint descriptors and allocate
3013      * endpoints first, so that later we can rewrite the endpoint
3014      * numbers without worrying that it may be described later on.
3015      */
3016     if (likely(full)) {
3017         func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
3018         fs_len = ffs_do_descs(ffs->fs_descs_count,
3019                       vla_ptr(vlabuf, d, raw_descs),
3020                       d_raw_descs__sz,
3021                       __ffs_func_bind_do_descs, func);
3022         if (unlikely(fs_len < 0)) {
3023             ret = fs_len;
3024             goto error;
3025         }
3026     } else {
3027         fs_len = 0;
3028     }
3029     if (likely(high)) {
3030         func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
3031         hs_len = ffs_do_descs(ffs->hs_descs_count,
3032                       vla_ptr(vlabuf, d, raw_descs) + fs_len,
3033                       d_raw_descs__sz - fs_len,
3034                       __ffs_func_bind_do_descs, func);
3035         if (unlikely(hs_len < 0)) {
3036             ret = hs_len;
3037             goto error;
3038         }
3039     } else {
3040         hs_len = 0;
3041     }
3042     if (likely(super)) {
3043         func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
3044         ss_len = ffs_do_descs(ffs->ss_descs_count,
3045                 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
3046                 d_raw_descs__sz - fs_len - hs_len,
3047                 __ffs_func_bind_do_descs, func);
3048         if (unlikely(ss_len < 0)) {
3049             ret = ss_len;
3050             goto error;
3051         }
3052     } else {
3053         ss_len = 0;
3054     }
3055     /*
3056      * Now handle interface numbers allocation and interface and
3057      * endpoint numbers rewriting.  We can do that in one go
3058      * now.
3059      */
3060     ret = ffs_do_descs(ffs->fs_descs_count +
3061                (high ? ffs->hs_descs_count : 0) +
3062                (super ? ffs->ss_descs_count : 0),
3063                vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
3064                __ffs_func_bind_do_nums, func);
3065     if (unlikely(ret < 0))
3066         goto error;
3067 
3068     func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
3069     if (c->cdev->use_os_string) {
3070         for (i = 0; i < ffs->interfaces_count; ++i) {
3071             struct usb_os_desc *desc;
3072 
3073             desc = func->function.os_desc_table[i].os_desc =
3074                 vla_ptr(vlabuf, d, os_desc) +
3075                 i * sizeof(struct usb_os_desc);
3076             desc->ext_compat_id =
3077                 vla_ptr(vlabuf, d, ext_compat) + i * 16;
3078             INIT_LIST_HEAD(&desc->ext_prop);
3079         }
3080         ret = ffs_do_os_descs(ffs->ms_os_descs_count,
3081                       vla_ptr(vlabuf, d, raw_descs) +
3082                       fs_len + hs_len + ss_len,
3083                       d_raw_descs__sz - fs_len - hs_len -
3084                       ss_len,
3085                       __ffs_func_bind_do_os_desc, func);
3086         if (unlikely(ret < 0))
3087             goto error;
3088     }
3089     func->function.os_desc_n =
3090         c->cdev->use_os_string ? ffs->interfaces_count : 0;
3091 
3092     for (i = 0; i< func->ffs->fs_descs_count; i++) {
3093         des_head = func->function.fs_descriptors[i];
3094         if (des_head->bDescriptorType == USB_DT_INTERFACE) {
3095             struct usb_interface_descriptor *intf = (struct usb_interface_descriptor *)des_head;
3096             if (intf->bNumEndpoints > 0) {
3097                 if (intf_ctl == NULL) {
3098                     intf_ctl = intf;
3099                 } else {
3100                     intf_data = intf;
3101                     break;
3102                 }
3103             }
3104         }
3105     }
3106     for (i = 0; i< func->ffs->fs_descs_count; i++) {
3107         des_head = func->function.fs_descriptors[i];
3108         if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3109             struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3110             a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3111         } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3112             struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3113             if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3114                 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3115                 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3116             } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3117                 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3118                 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3119                 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3120             } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3121                 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3122                 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3123             }
3124         }
3125     }
3126     for (i = 0; i< func->ffs->hs_descs_count; i++) {
3127         des_head = func->function.hs_descriptors[i];
3128         if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3129             struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3130             a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3131         } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3132             struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3133             if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3134                 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3135                 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3136             } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3137                 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3138                 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3139                 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3140             } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3141                 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3142                 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3143             }
3144         }
3145     }
3146     for (i = 0; i< func->ffs->ss_descs_count; i++) {
3147         des_head = func->function.ss_descriptors[i];
3148         if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3149             struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3150             a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3151         } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3152             struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3153             if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3154                 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3155                 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3156             } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3157                 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3158                 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3159                 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3160             } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3161                 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3162                 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3163             }
3164         }
3165     }
3166     /* And we're done */
3167     ffs->eps = func->eps;
3168     ffs_event_add(ffs, FUNCTIONFS_BIND);
3169     return 0;
3170 
3171 error:
3172     /* XXX Do we need to release all claimed endpoints here? */
3173     return ret;
3174 }
3175 
ffs_func_bind(struct usb_configuration * c,struct usb_function * f)3176 static int ffs_func_bind(struct usb_configuration *c, struct usb_function *f)
3177 {
3178     struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
3179     struct ffs_function *func = ffs_func_from_usb(f);
3180     int ret;
3181 
3182     if (IS_ERR(ffs_opts))
3183         return PTR_ERR(ffs_opts);
3184 
3185     ret = _ffs_func_bind(c, f);
3186     if (ret && !--ffs_opts->refcnt)
3187         functionfs_unbind(func->ffs);
3188 
3189     return ret;
3190 }
3191 
3192 /* Other USB function hooks *************************************************/
ffs_reset_work(struct work_struct * work)3193 static void ffs_reset_work(struct work_struct *work)
3194 {
3195     struct ffs_data *ffs = container_of(work,
3196         struct ffs_data, reset_work);
3197     ffs_data_reset(ffs);
3198 }
3199 
ffs_func_set_alt(struct usb_function * f,unsigned interface,unsigned alt)3200 static int ffs_func_set_alt(struct usb_function *f,
3201                 unsigned interface, unsigned alt)
3202 {
3203     struct ffs_function *func = ffs_func_from_usb(f);
3204     struct ffs_data *ffs = func->ffs;
3205     int ret = 0, intf;
3206 
3207     if (alt != (unsigned)-1) {
3208         intf = ffs_func_revmap_intf(func, interface);
3209         if (unlikely(intf < 0))
3210             return intf;
3211     }
3212 
3213     if (ffs->func)
3214         ffs_func_eps_disable(ffs->func);
3215 
3216     if (ffs->state == FFS_DEACTIVATED) {
3217         ffs->state = FFS_CLOSING;
3218         INIT_WORK(&ffs->reset_work, ffs_reset_work);
3219         schedule_work(&ffs->reset_work);
3220         return -ENODEV;
3221     }
3222 
3223     if (ffs->state != FFS_ACTIVE)
3224         return -ENODEV;
3225 
3226     if (alt == (unsigned)-1) {
3227         ffs->func = NULL;
3228         ffs_event_add(ffs, FUNCTIONFS_DISABLE);
3229         return 0;
3230     }
3231 
3232     ffs->func = func;
3233     ret = ffs_func_eps_enable(func);
3234     if (likely(ret >= 0))
3235         ffs_event_add(ffs, FUNCTIONFS_ENABLE);
3236     return ret;
3237 }
3238 
ffs_func_disable(struct usb_function * f)3239 static void ffs_func_disable(struct usb_function *f)
3240 {
3241     ffs_func_set_alt(f, 0, (unsigned)-1);
3242 }
3243 
ffs_func_setup(struct usb_function * f,const struct usb_ctrlrequest * creq)3244 static int ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq)
3245 {
3246     struct ffs_function *func = ffs_func_from_usb(f);
3247     struct ffs_data *ffs = func->ffs;
3248     unsigned long flags;
3249     int ret;
3250 
3251     ENTER();
3252 
3253     pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3254     pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
3255     pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
3256     pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
3257     pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
3258 
3259     /*
3260      * Most requests directed to interface go through here
3261      * (notable exceptions are set/get interface) so we need to
3262      * handle them.  All other either handled by composite or
3263      * passed to usb_configuration->setup() (if one is set).  No
3264      * matter, we will handle requests directed to endpoint here
3265      * as well (as it's straightforward).  Other request recipient
3266      * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3267      * is being used.
3268      */
3269     if (ffs->state != FFS_ACTIVE)
3270         return -ENODEV;
3271 
3272     switch (creq->bRequestType & USB_RECIP_MASK) {
3273     case USB_RECIP_INTERFACE:
3274         ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3275         if (unlikely(ret < 0))
3276             return ret;
3277         break;
3278 
3279     case USB_RECIP_ENDPOINT:
3280         ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3281         if (unlikely(ret < 0))
3282             return ret;
3283         if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3284             ret = func->ffs->eps_addrmap[ret];
3285         break;
3286 
3287     default:
3288         if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
3289             ret = le16_to_cpu(creq->wIndex);
3290         else
3291             return -EOPNOTSUPP;
3292     }
3293 
3294     spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3295     ffs->ev.setup = *creq;
3296     ffs->ev.setup.wIndex = cpu_to_le16(ret);
3297     __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3298     spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3299 
3300     return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3301 }
3302 
ffs_func_req_match(struct usb_function * f,const struct usb_ctrlrequest * creq,bool config0)3303 static bool ffs_func_req_match(struct usb_function *f,
3304                 const struct usb_ctrlrequest *creq,
3305                 bool config0)
3306 {
3307     struct ffs_function *func = ffs_func_from_usb(f);
3308 
3309     if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
3310         return false;
3311 
3312     switch (creq->bRequestType & USB_RECIP_MASK) {
3313     case USB_RECIP_INTERFACE:
3314         return (ffs_func_revmap_intf(func,
3315                          le16_to_cpu(creq->wIndex)) >= 0);
3316     case USB_RECIP_ENDPOINT:
3317         return (ffs_func_revmap_ep(func,
3318                        le16_to_cpu(creq->wIndex)) >= 0);
3319     default:
3320         return (bool) (func->ffs->user_flags &
3321                    FUNCTIONFS_ALL_CTRL_RECIP);
3322     }
3323 }
3324 
ffs_func_suspend(struct usb_function * f)3325 static void ffs_func_suspend(struct usb_function *f)
3326 {
3327     ENTER();
3328     ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3329 }
3330 
ffs_func_resume(struct usb_function * f)3331 static void ffs_func_resume(struct usb_function *f)
3332 {
3333     ENTER();
3334     ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3335 }
3336 
3337 /* Endpoint and interface numbers reverse mapping ***************************/
ffs_func_revmap_ep(struct ffs_function * func,u8 num)3338 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3339 {
3340     num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3341     return num ? num : -EDOM;
3342 }
3343 
ffs_func_revmap_intf(struct ffs_function * func,u8 intf)3344 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3345 {
3346     short *nums = func->interfaces_nums;
3347     unsigned count = func->ffs->interfaces_count;
3348 
3349     for (; count; --count, ++nums) {
3350         if (*nums >= 0 && *nums == intf)
3351             return nums - func->interfaces_nums;
3352     }
3353 
3354     return -EDOM;
3355 }
3356 
3357 /* Devices management *******************************************************/
3358 static LIST_HEAD(ffs_devices);
3359 
_ffs_do_find_dev(const char * name)3360 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3361 {
3362     struct ffs_dev *dev = NULL;
3363 
3364     if (!name)
3365         return NULL;
3366 
3367     list_for_each_entry(dev, &ffs_devices, entry) {
3368         if (!dev->name)
3369             return NULL;
3370         if (strcmp(dev->name, name) == 0)
3371             return dev;
3372     }
3373 
3374     return NULL;
3375 }
3376 
3377 /*
3378  * ffs_lock must be taken by the caller of this function
3379  */
_ffs_get_single_dev(void)3380 static struct ffs_dev *_ffs_get_single_dev(void)
3381 {
3382     struct ffs_dev *dev = NULL;
3383 
3384     if (list_is_singular(&ffs_devices)) {
3385         dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3386         if (dev->single)
3387             return dev;
3388     }
3389 
3390     return NULL;
3391 }
3392 
3393 /*
3394  * ffs_lock must be taken by the caller of this function
3395  */
_ffs_find_dev(const char * name)3396 static struct ffs_dev *_ffs_find_dev(const char *name)
3397 {
3398     struct ffs_dev *dev;
3399 
3400     dev = _ffs_get_single_dev();
3401     if (dev)
3402         return dev;
3403 
3404     return _ffs_do_find_dev(name);
3405 }
3406 
3407 /* Configfs support *********************************************************/
to_ffs_opts(struct config_item * item)3408 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3409 {
3410     return container_of(to_config_group(item), struct f_fs_opts,
3411                 func_inst.group);
3412 }
3413 
ffs_attr_release(struct config_item * item)3414 static void ffs_attr_release(struct config_item *item)
3415 {
3416     struct f_fs_opts *opts = to_ffs_opts(item);
3417 
3418     usb_put_function_instance(&opts->func_inst);
3419 }
3420 
3421 static struct configfs_item_operations ffs_item_ops = {
3422     .release    = ffs_attr_release,
3423 };
3424 
3425 static const struct config_item_type ffs_func_type = {
3426     .ct_item_ops    = &ffs_item_ops,
3427     .ct_owner    = THIS_MODULE,
3428 };
3429 
3430 /* Function registration interface ******************************************/
ffs_free_inst(struct usb_function_instance * f)3431 static void ffs_free_inst(struct usb_function_instance *f)
3432 {
3433     struct f_fs_opts *opts;
3434 
3435     opts = to_f_fs_opts(f);
3436     ffs_dev_lock();
3437     _ffs_free_dev(opts->dev);
3438     ffs_dev_unlock();
3439     kfree(opts);
3440 }
3441 
ffs_set_inst_name(struct usb_function_instance * fi,const char * name)3442 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3443 {
3444     char name_dev[MAX_NAMELEN] = {0};
3445     if (snprintf(name_dev, MAX_NAMELEN - 1, "%s.%s", FUNCTION_GENERIC, name) < 0) {
3446         return -EFAULT;
3447     }
3448     if (strlen(name_dev) >= sizeof_field(struct ffs_dev, name))
3449         return -ENAMETOOLONG;
3450     return ffs_name_dev_adapter(to_f_fs_opts(fi)->dev, name_dev);
3451 }
3452 
ffs_alloc_inst(void)3453 static struct usb_function_instance *ffs_alloc_inst(void)
3454 {
3455     struct f_fs_opts *opts = NULL;
3456     struct ffs_dev *dev = NULL;
3457 
3458     opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3459     if (!opts)
3460         return ERR_PTR(-ENOMEM);
3461 
3462     opts->func_inst.set_inst_name = ffs_set_inst_name;
3463     opts->func_inst.free_func_inst = ffs_free_inst;
3464     ffs_dev_lock();
3465     dev = _ffs_alloc_dev();
3466     ffs_dev_unlock();
3467     if (IS_ERR(dev)) {
3468         kfree(opts);
3469         return ERR_CAST(dev);
3470     }
3471     opts->dev = dev;
3472     dev->opts = opts;
3473 
3474     config_group_init_type_name(&opts->func_inst.group, "",
3475                     &ffs_func_type);
3476     return &opts->func_inst;
3477 }
3478 
ffs_free(struct usb_function * f)3479 static void ffs_free(struct usb_function *f)
3480 {
3481     kfree(ffs_func_from_usb(f));
3482 }
3483 
ffs_func_unbind(struct usb_configuration * c,struct usb_function * f)3484 static void ffs_func_unbind(struct usb_configuration *c,
3485                 struct usb_function *f)
3486 {
3487     struct ffs_function *func = ffs_func_from_usb(f);
3488     struct ffs_data *ffs = func->ffs;
3489     struct f_fs_opts *opts =
3490         container_of(f->fi, struct f_fs_opts, func_inst);
3491     struct ffs_ep *ep = func->eps;
3492     unsigned count = ffs->eps_count;
3493     unsigned long flags;
3494 
3495     ENTER();
3496     if (ffs->func == func) {
3497         ffs_func_eps_disable(func);
3498         ffs->func = NULL;
3499     }
3500 
3501     if (!--opts->refcnt)
3502         functionfs_unbind(ffs);
3503 
3504     /* cleanup after autoconfig */
3505     spin_lock_irqsave(&func->ffs->eps_lock, flags);
3506     while (count--) {
3507         if (ep->ep && ep->req)
3508             usb_ep_free_request(ep->ep, ep->req);
3509         ep->req = NULL;
3510         ++ep;
3511     }
3512     spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3513     kfree(func->eps);
3514     func->eps = NULL;
3515     /*
3516      * eps, descriptors and interfaces_nums are allocated in the
3517      * same chunk so only one free is required.
3518      */
3519     func->function.fs_descriptors = NULL;
3520     func->function.hs_descriptors = NULL;
3521     func->function.ss_descriptors = NULL;
3522     func->interfaces_nums = NULL;
3523 
3524     ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3525 }
3526 
ffs_func_get_alt(struct usb_function * f,unsigned intf)3527 static int ffs_func_get_alt(struct usb_function *f, unsigned intf)
3528 {
3529     if (intf == 0)
3530         return 0;
3531     return 1;
3532 }
3533 
ffs_alloc(struct usb_function_instance * fi)3534 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3535 {
3536     struct ffs_function *func = NULL;
3537 
3538     ENTER();
3539 
3540     func = kzalloc(sizeof(*func), GFP_KERNEL);
3541     if (unlikely(!func))
3542         return ERR_PTR(-ENOMEM);
3543 
3544     func->function.name    = "FunctionFS Adapter";
3545 
3546     func->function.bind    = ffs_func_bind;
3547     func->function.unbind  = ffs_func_unbind;
3548     func->function.set_alt = ffs_func_set_alt;
3549     func->function.get_alt = ffs_func_get_alt;
3550     func->function.disable = ffs_func_disable;
3551     func->function.setup   = ffs_func_setup;
3552     func->function.req_match = ffs_func_req_match;
3553     func->function.suspend = ffs_func_suspend;
3554     func->function.resume  = ffs_func_resume;
3555     func->function.free_func = ffs_free;
3556 
3557     return &func->function;
3558 }
3559 
3560 /*
3561  * ffs_lock must be taken by the caller of this function
3562  */
_ffs_alloc_dev(void)3563 static struct ffs_dev *_ffs_alloc_dev(void)
3564 {
3565     struct ffs_dev *dev = NULL;
3566     int ret;
3567 
3568     if (_ffs_get_single_dev())
3569             return ERR_PTR(-EBUSY);
3570 
3571     dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3572     if (!dev)
3573         return ERR_PTR(-ENOMEM);
3574 
3575     if (list_empty(&ffs_devices)) {
3576         ret = functionfs_init();
3577         if (ret) {
3578             kfree(dev);
3579             return ERR_PTR(ret);
3580         }
3581     }
3582 
3583     list_add(&dev->entry, &ffs_devices);
3584 
3585     return dev;
3586 }
3587 
ffs_name_dev_adapter(struct ffs_dev * dev,const char * name)3588 int ffs_name_dev_adapter(struct ffs_dev *dev, const char *name)
3589 {
3590     struct ffs_dev *existing = NULL;
3591     int ret = 0;
3592 
3593     ffs_dev_lock();
3594 
3595     existing = _ffs_do_find_dev(name);
3596     if (!existing)
3597         strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
3598     else if (existing != dev)
3599         ret = -EBUSY;
3600 
3601     ffs_dev_unlock();
3602 
3603     return ret;
3604 }
3605 EXPORT_SYMBOL_GPL(ffs_name_dev_adapter);
3606 
ffs_single_dev_adapter(struct ffs_dev * dev)3607 int ffs_single_dev_adapter(struct ffs_dev *dev)
3608 {
3609     int ret;
3610 
3611     ret = 0;
3612     ffs_dev_lock();
3613 
3614     if (!list_is_singular(&ffs_devices))
3615         ret = -EBUSY;
3616     else
3617         dev->single = true;
3618 
3619     ffs_dev_unlock();
3620     return ret;
3621 }
3622 EXPORT_SYMBOL_GPL(ffs_single_dev_adapter);
3623 /*
3624  * ffs_lock must be taken by the caller of this function
3625  */
_ffs_free_dev(struct ffs_dev * dev)3626 static void _ffs_free_dev(struct ffs_dev *dev)
3627 {
3628     list_del(&dev->entry);
3629 
3630     /* Clear the private_data pointer to stop incorrect dev access */
3631     if (dev->ffs_data)
3632         dev->ffs_data->private_data = NULL;
3633 
3634     kfree(dev);
3635     if (list_empty(&ffs_devices))
3636         functionfs_cleanup();
3637 }
3638 
ffs_acquire_dev(const char * dev_name)3639 static void *ffs_acquire_dev(const char *dev_name)
3640 {
3641     struct ffs_dev *ffs_dev = NULL;
3642 
3643     ENTER();
3644     ffs_dev_lock();
3645 
3646     ffs_dev = _ffs_find_dev(dev_name);
3647     if (!ffs_dev)
3648         ffs_dev = ERR_PTR(-ENOENT);
3649     else if (ffs_dev->mounted)
3650         ffs_dev = ERR_PTR(-EBUSY);
3651     else if (ffs_dev->ffs_acquire_dev_callback &&
3652         ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3653         ffs_dev = ERR_PTR(-ENOENT);
3654     else
3655         ffs_dev->mounted = true;
3656 
3657     ffs_dev_unlock();
3658     return ffs_dev;
3659 }
3660 
ffs_release_dev(struct ffs_data * ffs_data)3661 static void ffs_release_dev(struct ffs_data *ffs_data)
3662 {
3663     struct ffs_dev *ffs_dev = NULL;
3664 
3665     ENTER();
3666     ffs_dev_lock();
3667 
3668     ffs_dev = ffs_data->private_data;
3669     if (ffs_dev) {
3670         ffs_dev->mounted = false;
3671 
3672         if (ffs_dev->ffs_release_dev_callback)
3673             ffs_dev->ffs_release_dev_callback(ffs_dev);
3674     }
3675 
3676     ffs_dev_unlock();
3677 }
3678 
ffs_ready(struct ffs_data * ffs)3679 static int ffs_ready(struct ffs_data *ffs)
3680 {
3681     struct ffs_dev *ffs_obj = NULL;
3682     int ret = 0;
3683 
3684     ENTER();
3685     ffs_dev_lock();
3686 
3687     ffs_obj = ffs->private_data;
3688     if (!ffs_obj) {
3689         ret = -EINVAL;
3690         goto done;
3691     }
3692     if (WARN_ON(ffs_obj->desc_ready)) {
3693         ret = -EBUSY;
3694         goto done;
3695     }
3696 
3697     ffs_obj->desc_ready = true;
3698     ffs_obj->ffs_data = ffs;
3699 
3700     if (ffs_obj->ffs_ready_callback) {
3701         ret = ffs_obj->ffs_ready_callback(ffs);
3702         if (ret)
3703             goto done;
3704     }
3705 
3706     set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
3707 done:
3708     ffs_dev_unlock();
3709     return ret;
3710 }
3711 
ffs_closed(struct ffs_data * ffs)3712 static void ffs_closed(struct ffs_data *ffs)
3713 {
3714     struct ffs_dev *ffs_obj = NULL;
3715     struct f_fs_opts *opts = NULL;
3716     struct config_item *ci = NULL;
3717 
3718     ENTER();
3719     ffs_dev_lock();
3720 
3721     ffs_obj = ffs->private_data;
3722     if (!ffs_obj)
3723         goto done;
3724 
3725     ffs_obj->desc_ready = false;
3726     ffs_obj->ffs_data = NULL;
3727 
3728     if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3729         ffs_obj->ffs_closed_callback)
3730         ffs_obj->ffs_closed_callback(ffs);
3731 
3732     if (ffs_obj->opts)
3733         opts = ffs_obj->opts;
3734     else
3735         goto done;
3736 
3737     if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
3738         || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
3739         goto done;
3740 
3741     ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3742     ffs_dev_unlock();
3743 
3744     if (test_bit(FFS_FL_BOUND, &ffs->flags))
3745         unregister_gadget_item(ci);
3746     return;
3747 done:
3748     ffs_dev_unlock();
3749 }
3750 
3751 /* Misc helper functions ****************************************************/
ffs_mutex_lock(struct mutex * mutex,unsigned nonblock)3752 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3753 {
3754     return nonblock
3755         ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3756         : mutex_lock_interruptible(mutex);
3757 }
3758 
ffs_prepare_buffer(const char __user * buf,size_t len)3759 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3760 {
3761     char *data = NULL;
3762 
3763     if (unlikely(!len))
3764         return NULL;
3765 
3766     data = kmalloc(len, GFP_KERNEL);
3767     if (unlikely(!data))
3768         return ERR_PTR(-ENOMEM);
3769 
3770     if (unlikely(copy_from_user(data, buf, len))) {
3771         kfree(data);
3772         return ERR_PTR(-EFAULT);
3773     }
3774 
3775     pr_vdebug("Buffer from user space:\n");
3776     ffs_dump_mem("", data, len);
3777 
3778     return data;
3779 }
3780 
3781 DECLARE_USB_FUNCTION_INIT(f_generic, ffs_alloc_inst, ffs_alloc);
3782 MODULE_LICENSE("GPL");