• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * inode.c -- user mode filesystem api for usb gadget controllers
4  *
5  * Copyright (C) 2003-2004 David Brownell
6  * Copyright (C) 2003 Agilent Technologies
7  */
8 
9 
10 /* #define VERBOSE_DEBUG */
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/kthread.h>
25 #include <linux/aio.h>
26 #include <linux/uio.h>
27 #include <linux/refcount.h>
28 #include <linux/delay.h>
29 #include <linux/device.h>
30 #include <linux/moduleparam.h>
31 
32 #include <linux/usb/gadgetfs.h>
33 #include <linux/usb/gadget.h>
34 
35 
36 /*
37  * The gadgetfs API maps each endpoint to a file descriptor so that you
38  * can use standard synchronous read/write calls for I/O.  There's some
39  * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
40  * drivers show how this works in practice.  You can also use AIO to
41  * eliminate I/O gaps between requests, to help when streaming data.
42  *
43  * Key parts that must be USB-specific are protocols defining how the
44  * read/write operations relate to the hardware state machines.  There
45  * are two types of files.  One type is for the device, implementing ep0.
46  * The other type is for each IN or OUT endpoint.  In both cases, the
47  * user mode driver must configure the hardware before using it.
48  *
49  * - First, dev_config() is called when /dev/gadget/$CHIP is configured
50  *   (by writing configuration and device descriptors).  Afterwards it
51  *   may serve as a source of device events, used to handle all control
52  *   requests other than basic enumeration.
53  *
54  * - Then, after a SET_CONFIGURATION control request, ep_config() is
55  *   called when each /dev/gadget/ep* file is configured (by writing
56  *   endpoint descriptors).  Afterwards these files are used to write()
57  *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
58  *   direction" request is issued (like reading an IN endpoint).
59  *
60  * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
61  * not possible on all hardware.  For example, precise fault handling with
62  * respect to data left in endpoint fifos after aborted operations; or
63  * selective clearing of endpoint halts, to implement SET_INTERFACE.
64  */
65 
66 #define	DRIVER_DESC	"USB Gadget filesystem"
67 #define	DRIVER_VERSION	"24 Aug 2004"
68 
69 static const char driver_desc [] = DRIVER_DESC;
70 static const char shortname [] = "gadgetfs";
71 
72 MODULE_DESCRIPTION (DRIVER_DESC);
73 MODULE_AUTHOR ("David Brownell");
74 MODULE_LICENSE ("GPL");
75 
76 static int ep_open(struct inode *, struct file *);
77 
78 
79 /*----------------------------------------------------------------------*/
80 
81 #define GADGETFS_MAGIC		0xaee71ee7
82 
83 /* /dev/gadget/$CHIP represents ep0 and the whole device */
84 enum ep0_state {
85 	/* DISABLED is the initial state. */
86 	STATE_DEV_DISABLED = 0,
87 
88 	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
89 	 * ep0/device i/o modes and binding to the controller.  Driver
90 	 * must always write descriptors to initialize the device, then
91 	 * the device becomes UNCONNECTED until enumeration.
92 	 */
93 	STATE_DEV_OPENED,
94 
95 	/* From then on, ep0 fd is in either of two basic modes:
96 	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97 	 * - SETUP: read/write will transfer control data and succeed;
98 	 *   or if "wrong direction", performs protocol stall
99 	 */
100 	STATE_DEV_UNCONNECTED,
101 	STATE_DEV_CONNECTED,
102 	STATE_DEV_SETUP,
103 
104 	/* UNBOUND means the driver closed ep0, so the device won't be
105 	 * accessible again (DEV_DISABLED) until all fds are closed.
106 	 */
107 	STATE_DEV_UNBOUND,
108 };
109 
110 /* enough for the whole queue: most events invalidate others */
111 #define	N_EVENT			5
112 
113 #define RBUF_SIZE		256
114 
115 struct dev_data {
116 	spinlock_t			lock;
117 	refcount_t			count;
118 	int				udc_usage;
119 	enum ep0_state			state;		/* P: lock */
120 	struct usb_gadgetfs_event	event [N_EVENT];
121 	unsigned			ev_next;
122 	struct fasync_struct		*fasync;
123 	u8				current_config;
124 
125 	/* drivers reading ep0 MUST handle control requests (SETUP)
126 	 * reported that way; else the host will time out.
127 	 */
128 	unsigned			usermode_setup : 1,
129 					setup_in : 1,
130 					setup_can_stall : 1,
131 					setup_out_ready : 1,
132 					setup_out_error : 1,
133 					setup_abort : 1,
134 					gadget_registered : 1;
135 	unsigned			setup_wLength;
136 
137 	/* the rest is basically write-once */
138 	struct usb_config_descriptor	*config, *hs_config;
139 	struct usb_device_descriptor	*dev;
140 	struct usb_request		*req;
141 	struct usb_gadget		*gadget;
142 	struct list_head		epfiles;
143 	void				*buf;
144 	wait_queue_head_t		wait;
145 	struct super_block		*sb;
146 	struct dentry			*dentry;
147 
148 	/* except this scratch i/o buffer for ep0 */
149 	u8				rbuf[RBUF_SIZE];
150 };
151 
get_dev(struct dev_data * data)152 static inline void get_dev (struct dev_data *data)
153 {
154 	refcount_inc (&data->count);
155 }
156 
put_dev(struct dev_data * data)157 static void put_dev (struct dev_data *data)
158 {
159 	if (likely (!refcount_dec_and_test (&data->count)))
160 		return;
161 	/* needs no more cleanup */
162 	BUG_ON (waitqueue_active (&data->wait));
163 	kfree (data);
164 }
165 
dev_new(void)166 static struct dev_data *dev_new (void)
167 {
168 	struct dev_data		*dev;
169 
170 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
171 	if (!dev)
172 		return NULL;
173 	dev->state = STATE_DEV_DISABLED;
174 	refcount_set (&dev->count, 1);
175 	spin_lock_init (&dev->lock);
176 	INIT_LIST_HEAD (&dev->epfiles);
177 	init_waitqueue_head (&dev->wait);
178 	return dev;
179 }
180 
181 /*----------------------------------------------------------------------*/
182 
183 /* other /dev/gadget/$ENDPOINT files represent endpoints */
184 enum ep_state {
185 	STATE_EP_DISABLED = 0,
186 	STATE_EP_READY,
187 	STATE_EP_ENABLED,
188 	STATE_EP_UNBOUND,
189 };
190 
191 struct ep_data {
192 	struct mutex			lock;
193 	enum ep_state			state;
194 	refcount_t			count;
195 	struct dev_data			*dev;
196 	/* must hold dev->lock before accessing ep or req */
197 	struct usb_ep			*ep;
198 	struct usb_request		*req;
199 	ssize_t				status;
200 	char				name [16];
201 	struct usb_endpoint_descriptor	desc, hs_desc;
202 	struct list_head		epfiles;
203 	wait_queue_head_t		wait;
204 	struct dentry			*dentry;
205 };
206 
get_ep(struct ep_data * data)207 static inline void get_ep (struct ep_data *data)
208 {
209 	refcount_inc (&data->count);
210 }
211 
put_ep(struct ep_data * data)212 static void put_ep (struct ep_data *data)
213 {
214 	if (likely (!refcount_dec_and_test (&data->count)))
215 		return;
216 	put_dev (data->dev);
217 	/* needs no more cleanup */
218 	BUG_ON (!list_empty (&data->epfiles));
219 	BUG_ON (waitqueue_active (&data->wait));
220 	kfree (data);
221 }
222 
223 /*----------------------------------------------------------------------*/
224 
225 /* most "how to use the hardware" policy choices are in userspace:
226  * mapping endpoint roles (which the driver needs) to the capabilities
227  * which the usb controller has.  most of those capabilities are exposed
228  * implicitly, starting with the driver name and then endpoint names.
229  */
230 
231 static const char *CHIP;
232 static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
233 
234 /*----------------------------------------------------------------------*/
235 
236 /* NOTE:  don't use dev_printk calls before binding to the gadget
237  * at the end of ep0 configuration, or after unbind.
238  */
239 
240 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
241 #define xprintk(d,level,fmt,args...) \
242 	printk(level "%s: " fmt , shortname , ## args)
243 
244 #ifdef DEBUG
245 #define DBG(dev,fmt,args...) \
246 	xprintk(dev , KERN_DEBUG , fmt , ## args)
247 #else
248 #define DBG(dev,fmt,args...) \
249 	do { } while (0)
250 #endif /* DEBUG */
251 
252 #ifdef VERBOSE_DEBUG
253 #define VDEBUG	DBG
254 #else
255 #define VDEBUG(dev,fmt,args...) \
256 	do { } while (0)
257 #endif /* DEBUG */
258 
259 #define ERROR(dev,fmt,args...) \
260 	xprintk(dev , KERN_ERR , fmt , ## args)
261 #define INFO(dev,fmt,args...) \
262 	xprintk(dev , KERN_INFO , fmt , ## args)
263 
264 
265 /*----------------------------------------------------------------------*/
266 
267 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
268  *
269  * After opening, configure non-control endpoints.  Then use normal
270  * stream read() and write() requests; and maybe ioctl() to get more
271  * precise FIFO status when recovering from cancellation.
272  */
273 
epio_complete(struct usb_ep * ep,struct usb_request * req)274 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
275 {
276 	struct ep_data	*epdata = ep->driver_data;
277 
278 	if (!req->context)
279 		return;
280 	if (req->status)
281 		epdata->status = req->status;
282 	else
283 		epdata->status = req->actual;
284 	complete ((struct completion *)req->context);
285 }
286 
287 /* tasklock endpoint, returning when it's connected.
288  * still need dev->lock to use epdata->ep.
289  */
290 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)291 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
292 {
293 	int	val;
294 
295 	if (f_flags & O_NONBLOCK) {
296 		if (!mutex_trylock(&epdata->lock))
297 			goto nonblock;
298 		if (epdata->state != STATE_EP_ENABLED &&
299 		    (!is_write || epdata->state != STATE_EP_READY)) {
300 			mutex_unlock(&epdata->lock);
301 nonblock:
302 			val = -EAGAIN;
303 		} else
304 			val = 0;
305 		return val;
306 	}
307 
308 	val = mutex_lock_interruptible(&epdata->lock);
309 	if (val < 0)
310 		return val;
311 
312 	switch (epdata->state) {
313 	case STATE_EP_ENABLED:
314 		return 0;
315 	case STATE_EP_READY:			/* not configured yet */
316 		if (is_write)
317 			return 0;
318 		fallthrough;
319 	case STATE_EP_UNBOUND:			/* clean disconnect */
320 		break;
321 	// case STATE_EP_DISABLED:		/* "can't happen" */
322 	default:				/* error! */
323 		pr_debug ("%s: ep %p not available, state %d\n",
324 				shortname, epdata, epdata->state);
325 	}
326 	mutex_unlock(&epdata->lock);
327 	return -ENODEV;
328 }
329 
330 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)331 ep_io (struct ep_data *epdata, void *buf, unsigned len)
332 {
333 	DECLARE_COMPLETION_ONSTACK (done);
334 	int value;
335 
336 	spin_lock_irq (&epdata->dev->lock);
337 	if (likely (epdata->ep != NULL)) {
338 		struct usb_request	*req = epdata->req;
339 
340 		req->context = &done;
341 		req->complete = epio_complete;
342 		req->buf = buf;
343 		req->length = len;
344 		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
345 	} else
346 		value = -ENODEV;
347 	spin_unlock_irq (&epdata->dev->lock);
348 
349 	if (likely (value == 0)) {
350 		value = wait_for_completion_interruptible(&done);
351 		if (value != 0) {
352 			spin_lock_irq (&epdata->dev->lock);
353 			if (likely (epdata->ep != NULL)) {
354 				DBG (epdata->dev, "%s i/o interrupted\n",
355 						epdata->name);
356 				usb_ep_dequeue (epdata->ep, epdata->req);
357 				spin_unlock_irq (&epdata->dev->lock);
358 
359 				wait_for_completion(&done);
360 				if (epdata->status == -ECONNRESET)
361 					epdata->status = -EINTR;
362 			} else {
363 				spin_unlock_irq (&epdata->dev->lock);
364 
365 				DBG (epdata->dev, "endpoint gone\n");
366 				wait_for_completion(&done);
367 				epdata->status = -ENODEV;
368 			}
369 		}
370 		return epdata->status;
371 	}
372 	return value;
373 }
374 
375 static int
ep_release(struct inode * inode,struct file * fd)376 ep_release (struct inode *inode, struct file *fd)
377 {
378 	struct ep_data		*data = fd->private_data;
379 	int value;
380 
381 	value = mutex_lock_interruptible(&data->lock);
382 	if (value < 0)
383 		return value;
384 
385 	/* clean up if this can be reopened */
386 	if (data->state != STATE_EP_UNBOUND) {
387 		data->state = STATE_EP_DISABLED;
388 		data->desc.bDescriptorType = 0;
389 		data->hs_desc.bDescriptorType = 0;
390 		usb_ep_disable(data->ep);
391 	}
392 	mutex_unlock(&data->lock);
393 	put_ep (data);
394 	return 0;
395 }
396 
ep_ioctl(struct file * fd,unsigned code,unsigned long value)397 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
398 {
399 	struct ep_data		*data = fd->private_data;
400 	int			status;
401 
402 	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
403 		return status;
404 
405 	spin_lock_irq (&data->dev->lock);
406 	if (likely (data->ep != NULL)) {
407 		switch (code) {
408 		case GADGETFS_FIFO_STATUS:
409 			status = usb_ep_fifo_status (data->ep);
410 			break;
411 		case GADGETFS_FIFO_FLUSH:
412 			usb_ep_fifo_flush (data->ep);
413 			break;
414 		case GADGETFS_CLEAR_HALT:
415 			status = usb_ep_clear_halt (data->ep);
416 			break;
417 		default:
418 			status = -ENOTTY;
419 		}
420 	} else
421 		status = -ENODEV;
422 	spin_unlock_irq (&data->dev->lock);
423 	mutex_unlock(&data->lock);
424 	return status;
425 }
426 
427 /*----------------------------------------------------------------------*/
428 
429 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
430 
431 struct kiocb_priv {
432 	struct usb_request	*req;
433 	struct ep_data		*epdata;
434 	struct kiocb		*iocb;
435 	struct mm_struct	*mm;
436 	struct work_struct	work;
437 	void			*buf;
438 	struct iov_iter		to;
439 	const void		*to_free;
440 	unsigned		actual;
441 };
442 
ep_aio_cancel(struct kiocb * iocb)443 static int ep_aio_cancel(struct kiocb *iocb)
444 {
445 	struct kiocb_priv	*priv = iocb->private;
446 	struct ep_data		*epdata;
447 	int			value;
448 
449 	local_irq_disable();
450 	epdata = priv->epdata;
451 	// spin_lock(&epdata->dev->lock);
452 	if (likely(epdata && epdata->ep && priv->req))
453 		value = usb_ep_dequeue (epdata->ep, priv->req);
454 	else
455 		value = -EINVAL;
456 	// spin_unlock(&epdata->dev->lock);
457 	local_irq_enable();
458 
459 	return value;
460 }
461 
ep_user_copy_worker(struct work_struct * work)462 static void ep_user_copy_worker(struct work_struct *work)
463 {
464 	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
465 	struct mm_struct *mm = priv->mm;
466 	struct kiocb *iocb = priv->iocb;
467 	size_t ret;
468 
469 	kthread_use_mm(mm);
470 	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
471 	kthread_unuse_mm(mm);
472 	if (!ret)
473 		ret = -EFAULT;
474 
475 	/* completing the iocb can drop the ctx and mm, don't touch mm after */
476 	iocb->ki_complete(iocb, ret, ret);
477 
478 	kfree(priv->buf);
479 	kfree(priv->to_free);
480 	kfree(priv);
481 }
482 
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)483 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
484 {
485 	struct kiocb		*iocb = req->context;
486 	struct kiocb_priv	*priv = iocb->private;
487 	struct ep_data		*epdata = priv->epdata;
488 
489 	/* lock against disconnect (and ideally, cancel) */
490 	spin_lock(&epdata->dev->lock);
491 	priv->req = NULL;
492 	priv->epdata = NULL;
493 
494 	/* if this was a write or a read returning no data then we
495 	 * don't need to copy anything to userspace, so we can
496 	 * complete the aio request immediately.
497 	 */
498 	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
499 		kfree(req->buf);
500 		kfree(priv->to_free);
501 		kfree(priv);
502 		iocb->private = NULL;
503 		/* aio_complete() reports bytes-transferred _and_ faults */
504 
505 		iocb->ki_complete(iocb,
506 				req->actual ? req->actual : (long)req->status,
507 				req->status);
508 	} else {
509 		/* ep_copy_to_user() won't report both; we hide some faults */
510 		if (unlikely(0 != req->status))
511 			DBG(epdata->dev, "%s fault %d len %d\n",
512 				ep->name, req->status, req->actual);
513 
514 		priv->buf = req->buf;
515 		priv->actual = req->actual;
516 		INIT_WORK(&priv->work, ep_user_copy_worker);
517 		schedule_work(&priv->work);
518 	}
519 
520 	usb_ep_free_request(ep, req);
521 	spin_unlock(&epdata->dev->lock);
522 	put_ep(epdata);
523 }
524 
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)525 static ssize_t ep_aio(struct kiocb *iocb,
526 		      struct kiocb_priv *priv,
527 		      struct ep_data *epdata,
528 		      char *buf,
529 		      size_t len)
530 {
531 	struct usb_request *req;
532 	ssize_t value;
533 
534 	iocb->private = priv;
535 	priv->iocb = iocb;
536 
537 	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
538 	get_ep(epdata);
539 	priv->epdata = epdata;
540 	priv->actual = 0;
541 	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
542 
543 	/* each kiocb is coupled to one usb_request, but we can't
544 	 * allocate or submit those if the host disconnected.
545 	 */
546 	spin_lock_irq(&epdata->dev->lock);
547 	value = -ENODEV;
548 	if (unlikely(epdata->ep == NULL))
549 		goto fail;
550 
551 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
552 	value = -ENOMEM;
553 	if (unlikely(!req))
554 		goto fail;
555 
556 	priv->req = req;
557 	req->buf = buf;
558 	req->length = len;
559 	req->complete = ep_aio_complete;
560 	req->context = iocb;
561 	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
562 	if (unlikely(0 != value)) {
563 		usb_ep_free_request(epdata->ep, req);
564 		goto fail;
565 	}
566 	spin_unlock_irq(&epdata->dev->lock);
567 	return -EIOCBQUEUED;
568 
569 fail:
570 	spin_unlock_irq(&epdata->dev->lock);
571 	kfree(priv->to_free);
572 	kfree(priv);
573 	put_ep(epdata);
574 	return value;
575 }
576 
577 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)578 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
579 {
580 	struct file *file = iocb->ki_filp;
581 	struct ep_data *epdata = file->private_data;
582 	size_t len = iov_iter_count(to);
583 	ssize_t value;
584 	char *buf;
585 
586 	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
587 		return value;
588 
589 	/* halt any endpoint by doing a "wrong direction" i/o call */
590 	if (usb_endpoint_dir_in(&epdata->desc)) {
591 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
592 		    !is_sync_kiocb(iocb)) {
593 			mutex_unlock(&epdata->lock);
594 			return -EINVAL;
595 		}
596 		DBG (epdata->dev, "%s halt\n", epdata->name);
597 		spin_lock_irq(&epdata->dev->lock);
598 		if (likely(epdata->ep != NULL))
599 			usb_ep_set_halt(epdata->ep);
600 		spin_unlock_irq(&epdata->dev->lock);
601 		mutex_unlock(&epdata->lock);
602 		return -EBADMSG;
603 	}
604 
605 	buf = kmalloc(len, GFP_KERNEL);
606 	if (unlikely(!buf)) {
607 		mutex_unlock(&epdata->lock);
608 		return -ENOMEM;
609 	}
610 	if (is_sync_kiocb(iocb)) {
611 		value = ep_io(epdata, buf, len);
612 		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
613 			value = -EFAULT;
614 	} else {
615 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
616 		value = -ENOMEM;
617 		if (!priv)
618 			goto fail;
619 		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
620 		if (!priv->to_free) {
621 			kfree(priv);
622 			goto fail;
623 		}
624 		value = ep_aio(iocb, priv, epdata, buf, len);
625 		if (value == -EIOCBQUEUED)
626 			buf = NULL;
627 	}
628 fail:
629 	kfree(buf);
630 	mutex_unlock(&epdata->lock);
631 	return value;
632 }
633 
634 static ssize_t ep_config(struct ep_data *, const char *, size_t);
635 
636 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)637 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
638 {
639 	struct file *file = iocb->ki_filp;
640 	struct ep_data *epdata = file->private_data;
641 	size_t len = iov_iter_count(from);
642 	bool configured;
643 	ssize_t value;
644 	char *buf;
645 
646 	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
647 		return value;
648 
649 	configured = epdata->state == STATE_EP_ENABLED;
650 
651 	/* halt any endpoint by doing a "wrong direction" i/o call */
652 	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
653 		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
654 		    !is_sync_kiocb(iocb)) {
655 			mutex_unlock(&epdata->lock);
656 			return -EINVAL;
657 		}
658 		DBG (epdata->dev, "%s halt\n", epdata->name);
659 		spin_lock_irq(&epdata->dev->lock);
660 		if (likely(epdata->ep != NULL))
661 			usb_ep_set_halt(epdata->ep);
662 		spin_unlock_irq(&epdata->dev->lock);
663 		mutex_unlock(&epdata->lock);
664 		return -EBADMSG;
665 	}
666 
667 	buf = kmalloc(len, GFP_KERNEL);
668 	if (unlikely(!buf)) {
669 		mutex_unlock(&epdata->lock);
670 		return -ENOMEM;
671 	}
672 
673 	if (unlikely(!copy_from_iter_full(buf, len, from))) {
674 		value = -EFAULT;
675 		goto out;
676 	}
677 
678 	if (unlikely(!configured)) {
679 		value = ep_config(epdata, buf, len);
680 	} else if (is_sync_kiocb(iocb)) {
681 		value = ep_io(epdata, buf, len);
682 	} else {
683 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
684 		value = -ENOMEM;
685 		if (priv) {
686 			value = ep_aio(iocb, priv, epdata, buf, len);
687 			if (value == -EIOCBQUEUED)
688 				buf = NULL;
689 		}
690 	}
691 out:
692 	kfree(buf);
693 	mutex_unlock(&epdata->lock);
694 	return value;
695 }
696 
697 /*----------------------------------------------------------------------*/
698 
699 /* used after endpoint configuration */
700 static const struct file_operations ep_io_operations = {
701 	.owner =	THIS_MODULE,
702 
703 	.open =		ep_open,
704 	.release =	ep_release,
705 	.llseek =	no_llseek,
706 	.unlocked_ioctl = ep_ioctl,
707 	.read_iter =	ep_read_iter,
708 	.write_iter =	ep_write_iter,
709 };
710 
711 /* ENDPOINT INITIALIZATION
712  *
713  *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
714  *     status = write (fd, descriptors, sizeof descriptors)
715  *
716  * That write establishes the endpoint configuration, configuring
717  * the controller to process bulk, interrupt, or isochronous transfers
718  * at the right maxpacket size, and so on.
719  *
720  * The descriptors are message type 1, identified by a host order u32
721  * at the beginning of what's written.  Descriptor order is: full/low
722  * speed descriptor, then optional high speed descriptor.
723  */
724 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)725 ep_config (struct ep_data *data, const char *buf, size_t len)
726 {
727 	struct usb_ep		*ep;
728 	u32			tag;
729 	int			value, length = len;
730 
731 	if (data->state != STATE_EP_READY) {
732 		value = -EL2HLT;
733 		goto fail;
734 	}
735 
736 	value = len;
737 	if (len < USB_DT_ENDPOINT_SIZE + 4)
738 		goto fail0;
739 
740 	/* we might need to change message format someday */
741 	memcpy(&tag, buf, 4);
742 	if (tag != 1) {
743 		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
744 		goto fail0;
745 	}
746 	buf += 4;
747 	len -= 4;
748 
749 	/* NOTE:  audio endpoint extensions not accepted here;
750 	 * just don't include the extra bytes.
751 	 */
752 
753 	/* full/low speed descriptor, then high speed */
754 	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
755 	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
756 			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
757 		goto fail0;
758 	if (len != USB_DT_ENDPOINT_SIZE) {
759 		if (len != 2 * USB_DT_ENDPOINT_SIZE)
760 			goto fail0;
761 		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
762 			USB_DT_ENDPOINT_SIZE);
763 		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
764 				|| data->hs_desc.bDescriptorType
765 					!= USB_DT_ENDPOINT) {
766 			DBG(data->dev, "config %s, bad hs length or type\n",
767 					data->name);
768 			goto fail0;
769 		}
770 	}
771 
772 	spin_lock_irq (&data->dev->lock);
773 	if (data->dev->state == STATE_DEV_UNBOUND) {
774 		value = -ENOENT;
775 		goto gone;
776 	} else {
777 		ep = data->ep;
778 		if (ep == NULL) {
779 			value = -ENODEV;
780 			goto gone;
781 		}
782 	}
783 	switch (data->dev->gadget->speed) {
784 	case USB_SPEED_LOW:
785 	case USB_SPEED_FULL:
786 		ep->desc = &data->desc;
787 		break;
788 	case USB_SPEED_HIGH:
789 		/* fails if caller didn't provide that descriptor... */
790 		ep->desc = &data->hs_desc;
791 		break;
792 	default:
793 		DBG(data->dev, "unconnected, %s init abandoned\n",
794 				data->name);
795 		value = -EINVAL;
796 		goto gone;
797 	}
798 	value = usb_ep_enable(ep);
799 	if (value == 0) {
800 		data->state = STATE_EP_ENABLED;
801 		value = length;
802 	}
803 gone:
804 	spin_unlock_irq (&data->dev->lock);
805 	if (value < 0) {
806 fail:
807 		data->desc.bDescriptorType = 0;
808 		data->hs_desc.bDescriptorType = 0;
809 	}
810 	return value;
811 fail0:
812 	value = -EINVAL;
813 	goto fail;
814 }
815 
816 static int
ep_open(struct inode * inode,struct file * fd)817 ep_open (struct inode *inode, struct file *fd)
818 {
819 	struct ep_data		*data = inode->i_private;
820 	int			value = -EBUSY;
821 
822 	if (mutex_lock_interruptible(&data->lock) != 0)
823 		return -EINTR;
824 	spin_lock_irq (&data->dev->lock);
825 	if (data->dev->state == STATE_DEV_UNBOUND)
826 		value = -ENOENT;
827 	else if (data->state == STATE_EP_DISABLED) {
828 		value = 0;
829 		data->state = STATE_EP_READY;
830 		get_ep (data);
831 		fd->private_data = data;
832 		VDEBUG (data->dev, "%s ready\n", data->name);
833 	} else
834 		DBG (data->dev, "%s state %d\n",
835 			data->name, data->state);
836 	spin_unlock_irq (&data->dev->lock);
837 	mutex_unlock(&data->lock);
838 	return value;
839 }
840 
841 /*----------------------------------------------------------------------*/
842 
843 /* EP0 IMPLEMENTATION can be partly in userspace.
844  *
845  * Drivers that use this facility receive various events, including
846  * control requests the kernel doesn't handle.  Drivers that don't
847  * use this facility may be too simple-minded for real applications.
848  */
849 
ep0_readable(struct dev_data * dev)850 static inline void ep0_readable (struct dev_data *dev)
851 {
852 	wake_up (&dev->wait);
853 	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
854 }
855 
clean_req(struct usb_ep * ep,struct usb_request * req)856 static void clean_req (struct usb_ep *ep, struct usb_request *req)
857 {
858 	struct dev_data		*dev = ep->driver_data;
859 
860 	if (req->buf != dev->rbuf) {
861 		kfree(req->buf);
862 		req->buf = dev->rbuf;
863 	}
864 	req->complete = epio_complete;
865 	dev->setup_out_ready = 0;
866 }
867 
ep0_complete(struct usb_ep * ep,struct usb_request * req)868 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
869 {
870 	struct dev_data		*dev = ep->driver_data;
871 	unsigned long		flags;
872 	int			free = 1;
873 
874 	/* for control OUT, data must still get to userspace */
875 	spin_lock_irqsave(&dev->lock, flags);
876 	if (!dev->setup_in) {
877 		dev->setup_out_error = (req->status != 0);
878 		if (!dev->setup_out_error)
879 			free = 0;
880 		dev->setup_out_ready = 1;
881 		ep0_readable (dev);
882 	}
883 
884 	/* clean up as appropriate */
885 	if (free && req->buf != &dev->rbuf)
886 		clean_req (ep, req);
887 	req->complete = epio_complete;
888 	spin_unlock_irqrestore(&dev->lock, flags);
889 }
890 
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)891 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
892 {
893 	struct dev_data	*dev = ep->driver_data;
894 
895 	if (dev->setup_out_ready) {
896 		DBG (dev, "ep0 request busy!\n");
897 		return -EBUSY;
898 	}
899 	if (len > sizeof (dev->rbuf))
900 		req->buf = kmalloc(len, GFP_ATOMIC);
901 	if (req->buf == NULL) {
902 		req->buf = dev->rbuf;
903 		return -ENOMEM;
904 	}
905 	req->complete = ep0_complete;
906 	req->length = len;
907 	req->zero = 0;
908 	return 0;
909 }
910 
911 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)912 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
913 {
914 	struct dev_data			*dev = fd->private_data;
915 	ssize_t				retval;
916 	enum ep0_state			state;
917 
918 	spin_lock_irq (&dev->lock);
919 	if (dev->state <= STATE_DEV_OPENED) {
920 		retval = -EINVAL;
921 		goto done;
922 	}
923 
924 	/* report fd mode change before acting on it */
925 	if (dev->setup_abort) {
926 		dev->setup_abort = 0;
927 		retval = -EIDRM;
928 		goto done;
929 	}
930 
931 	/* control DATA stage */
932 	if ((state = dev->state) == STATE_DEV_SETUP) {
933 
934 		if (dev->setup_in) {		/* stall IN */
935 			VDEBUG(dev, "ep0in stall\n");
936 			(void) usb_ep_set_halt (dev->gadget->ep0);
937 			retval = -EL2HLT;
938 			dev->state = STATE_DEV_CONNECTED;
939 
940 		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
941 			struct usb_ep		*ep = dev->gadget->ep0;
942 			struct usb_request	*req = dev->req;
943 
944 			if ((retval = setup_req (ep, req, 0)) == 0) {
945 				++dev->udc_usage;
946 				spin_unlock_irq (&dev->lock);
947 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
948 				spin_lock_irq (&dev->lock);
949 				--dev->udc_usage;
950 			}
951 			dev->state = STATE_DEV_CONNECTED;
952 
953 			/* assume that was SET_CONFIGURATION */
954 			if (dev->current_config) {
955 				unsigned power;
956 
957 				if (gadget_is_dualspeed(dev->gadget)
958 						&& (dev->gadget->speed
959 							== USB_SPEED_HIGH))
960 					power = dev->hs_config->bMaxPower;
961 				else
962 					power = dev->config->bMaxPower;
963 				usb_gadget_vbus_draw(dev->gadget, 2 * power);
964 			}
965 
966 		} else {			/* collect OUT data */
967 			if ((fd->f_flags & O_NONBLOCK) != 0
968 					&& !dev->setup_out_ready) {
969 				retval = -EAGAIN;
970 				goto done;
971 			}
972 			spin_unlock_irq (&dev->lock);
973 			retval = wait_event_interruptible (dev->wait,
974 					dev->setup_out_ready != 0);
975 
976 			/* FIXME state could change from under us */
977 			spin_lock_irq (&dev->lock);
978 			if (retval)
979 				goto done;
980 
981 			if (dev->state != STATE_DEV_SETUP) {
982 				retval = -ECANCELED;
983 				goto done;
984 			}
985 			dev->state = STATE_DEV_CONNECTED;
986 
987 			if (dev->setup_out_error)
988 				retval = -EIO;
989 			else {
990 				len = min (len, (size_t)dev->req->actual);
991 				++dev->udc_usage;
992 				spin_unlock_irq(&dev->lock);
993 				if (copy_to_user (buf, dev->req->buf, len))
994 					retval = -EFAULT;
995 				else
996 					retval = len;
997 				spin_lock_irq(&dev->lock);
998 				--dev->udc_usage;
999 				clean_req (dev->gadget->ep0, dev->req);
1000 				/* NOTE userspace can't yet choose to stall */
1001 			}
1002 		}
1003 		goto done;
1004 	}
1005 
1006 	/* else normal: return event data */
1007 	if (len < sizeof dev->event [0]) {
1008 		retval = -EINVAL;
1009 		goto done;
1010 	}
1011 	len -= len % sizeof (struct usb_gadgetfs_event);
1012 	dev->usermode_setup = 1;
1013 
1014 scan:
1015 	/* return queued events right away */
1016 	if (dev->ev_next != 0) {
1017 		unsigned		i, n;
1018 
1019 		n = len / sizeof (struct usb_gadgetfs_event);
1020 		if (dev->ev_next < n)
1021 			n = dev->ev_next;
1022 
1023 		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1024 		for (i = 0; i < n; i++) {
1025 			if (dev->event [i].type == GADGETFS_SETUP) {
1026 				dev->state = STATE_DEV_SETUP;
1027 				n = i + 1;
1028 				break;
1029 			}
1030 		}
1031 		spin_unlock_irq (&dev->lock);
1032 		len = n * sizeof (struct usb_gadgetfs_event);
1033 		if (copy_to_user (buf, &dev->event, len))
1034 			retval = -EFAULT;
1035 		else
1036 			retval = len;
1037 		if (len > 0) {
1038 			/* NOTE this doesn't guard against broken drivers;
1039 			 * concurrent ep0 readers may lose events.
1040 			 */
1041 			spin_lock_irq (&dev->lock);
1042 			if (dev->ev_next > n) {
1043 				memmove(&dev->event[0], &dev->event[n],
1044 					sizeof (struct usb_gadgetfs_event)
1045 						* (dev->ev_next - n));
1046 			}
1047 			dev->ev_next -= n;
1048 			spin_unlock_irq (&dev->lock);
1049 		}
1050 		return retval;
1051 	}
1052 	if (fd->f_flags & O_NONBLOCK) {
1053 		retval = -EAGAIN;
1054 		goto done;
1055 	}
1056 
1057 	switch (state) {
1058 	default:
1059 		DBG (dev, "fail %s, state %d\n", __func__, state);
1060 		retval = -ESRCH;
1061 		break;
1062 	case STATE_DEV_UNCONNECTED:
1063 	case STATE_DEV_CONNECTED:
1064 		spin_unlock_irq (&dev->lock);
1065 		DBG (dev, "%s wait\n", __func__);
1066 
1067 		/* wait for events */
1068 		retval = wait_event_interruptible (dev->wait,
1069 				dev->ev_next != 0);
1070 		if (retval < 0)
1071 			return retval;
1072 		spin_lock_irq (&dev->lock);
1073 		goto scan;
1074 	}
1075 
1076 done:
1077 	spin_unlock_irq (&dev->lock);
1078 	return retval;
1079 }
1080 
1081 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1082 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1083 {
1084 	struct usb_gadgetfs_event	*event;
1085 	unsigned			i;
1086 
1087 	switch (type) {
1088 	/* these events purge the queue */
1089 	case GADGETFS_DISCONNECT:
1090 		if (dev->state == STATE_DEV_SETUP)
1091 			dev->setup_abort = 1;
1092 		fallthrough;
1093 	case GADGETFS_CONNECT:
1094 		dev->ev_next = 0;
1095 		break;
1096 	case GADGETFS_SETUP:		/* previous request timed out */
1097 	case GADGETFS_SUSPEND:		/* same effect */
1098 		/* these events can't be repeated */
1099 		for (i = 0; i != dev->ev_next; i++) {
1100 			if (dev->event [i].type != type)
1101 				continue;
1102 			DBG(dev, "discard old event[%d] %d\n", i, type);
1103 			dev->ev_next--;
1104 			if (i == dev->ev_next)
1105 				break;
1106 			/* indices start at zero, for simplicity */
1107 			memmove (&dev->event [i], &dev->event [i + 1],
1108 				sizeof (struct usb_gadgetfs_event)
1109 					* (dev->ev_next - i));
1110 		}
1111 		break;
1112 	default:
1113 		BUG ();
1114 	}
1115 	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1116 	event = &dev->event [dev->ev_next++];
1117 	BUG_ON (dev->ev_next > N_EVENT);
1118 	memset (event, 0, sizeof *event);
1119 	event->type = type;
1120 	return event;
1121 }
1122 
1123 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1124 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1125 {
1126 	struct dev_data		*dev = fd->private_data;
1127 	ssize_t			retval = -ESRCH;
1128 
1129 	/* report fd mode change before acting on it */
1130 	if (dev->setup_abort) {
1131 		dev->setup_abort = 0;
1132 		retval = -EIDRM;
1133 
1134 	/* data and/or status stage for control request */
1135 	} else if (dev->state == STATE_DEV_SETUP) {
1136 
1137 		len = min_t(size_t, len, dev->setup_wLength);
1138 		if (dev->setup_in) {
1139 			retval = setup_req (dev->gadget->ep0, dev->req, len);
1140 			if (retval == 0) {
1141 				dev->state = STATE_DEV_CONNECTED;
1142 				++dev->udc_usage;
1143 				spin_unlock_irq (&dev->lock);
1144 				if (copy_from_user (dev->req->buf, buf, len))
1145 					retval = -EFAULT;
1146 				else {
1147 					if (len < dev->setup_wLength)
1148 						dev->req->zero = 1;
1149 					retval = usb_ep_queue (
1150 						dev->gadget->ep0, dev->req,
1151 						GFP_KERNEL);
1152 				}
1153 				spin_lock_irq(&dev->lock);
1154 				--dev->udc_usage;
1155 				if (retval < 0) {
1156 					clean_req (dev->gadget->ep0, dev->req);
1157 				} else
1158 					retval = len;
1159 
1160 				return retval;
1161 			}
1162 
1163 		/* can stall some OUT transfers */
1164 		} else if (dev->setup_can_stall) {
1165 			VDEBUG(dev, "ep0out stall\n");
1166 			(void) usb_ep_set_halt (dev->gadget->ep0);
1167 			retval = -EL2HLT;
1168 			dev->state = STATE_DEV_CONNECTED;
1169 		} else {
1170 			DBG(dev, "bogus ep0out stall!\n");
1171 		}
1172 	} else
1173 		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1174 
1175 	return retval;
1176 }
1177 
1178 static int
ep0_fasync(int f,struct file * fd,int on)1179 ep0_fasync (int f, struct file *fd, int on)
1180 {
1181 	struct dev_data		*dev = fd->private_data;
1182 	// caller must F_SETOWN before signal delivery happens
1183 	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1184 	return fasync_helper (f, fd, on, &dev->fasync);
1185 }
1186 
1187 static struct usb_gadget_driver gadgetfs_driver;
1188 
1189 static int
dev_release(struct inode * inode,struct file * fd)1190 dev_release (struct inode *inode, struct file *fd)
1191 {
1192 	struct dev_data		*dev = fd->private_data;
1193 
1194 	/* closing ep0 === shutdown all */
1195 
1196 	if (dev->gadget_registered) {
1197 		usb_gadget_unregister_driver (&gadgetfs_driver);
1198 		dev->gadget_registered = false;
1199 	}
1200 
1201 	/* at this point "good" hardware has disconnected the
1202 	 * device from USB; the host won't see it any more.
1203 	 * alternatively, all host requests will time out.
1204 	 */
1205 
1206 	kfree (dev->buf);
1207 	dev->buf = NULL;
1208 
1209 	/* other endpoints were all decoupled from this device */
1210 	spin_lock_irq(&dev->lock);
1211 	dev->state = STATE_DEV_DISABLED;
1212 	spin_unlock_irq(&dev->lock);
1213 
1214 	put_dev (dev);
1215 	return 0;
1216 }
1217 
1218 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1219 ep0_poll (struct file *fd, poll_table *wait)
1220 {
1221 	struct dev_data         *dev = fd->private_data;
1222 	__poll_t                mask = 0;
1223 
1224 	if (dev->state <= STATE_DEV_OPENED)
1225 		return DEFAULT_POLLMASK;
1226 
1227 	poll_wait(fd, &dev->wait, wait);
1228 
1229 	spin_lock_irq(&dev->lock);
1230 
1231 	/* report fd mode change before acting on it */
1232 	if (dev->setup_abort) {
1233 		dev->setup_abort = 0;
1234 		mask = EPOLLHUP;
1235 		goto out;
1236 	}
1237 
1238 	if (dev->state == STATE_DEV_SETUP) {
1239 		if (dev->setup_in || dev->setup_can_stall)
1240 			mask = EPOLLOUT;
1241 	} else {
1242 		if (dev->ev_next != 0)
1243 			mask = EPOLLIN;
1244 	}
1245 out:
1246 	spin_unlock_irq(&dev->lock);
1247 	return mask;
1248 }
1249 
dev_ioctl(struct file * fd,unsigned code,unsigned long value)1250 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1251 {
1252 	struct dev_data		*dev = fd->private_data;
1253 	struct usb_gadget	*gadget = dev->gadget;
1254 	long ret = -ENOTTY;
1255 
1256 	spin_lock_irq(&dev->lock);
1257 	if (dev->state == STATE_DEV_OPENED ||
1258 			dev->state == STATE_DEV_UNBOUND) {
1259 		/* Not bound to a UDC */
1260 	} else if (gadget->ops->ioctl) {
1261 		++dev->udc_usage;
1262 		spin_unlock_irq(&dev->lock);
1263 
1264 		ret = gadget->ops->ioctl (gadget, code, value);
1265 
1266 		spin_lock_irq(&dev->lock);
1267 		--dev->udc_usage;
1268 	}
1269 	spin_unlock_irq(&dev->lock);
1270 
1271 	return ret;
1272 }
1273 
1274 /*----------------------------------------------------------------------*/
1275 
1276 /* The in-kernel gadget driver handles most ep0 issues, in particular
1277  * enumerating the single configuration (as provided from user space).
1278  *
1279  * Unrecognized ep0 requests may be handled in user space.
1280  */
1281 
make_qualifier(struct dev_data * dev)1282 static void make_qualifier (struct dev_data *dev)
1283 {
1284 	struct usb_qualifier_descriptor		qual;
1285 	struct usb_device_descriptor		*desc;
1286 
1287 	qual.bLength = sizeof qual;
1288 	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1289 	qual.bcdUSB = cpu_to_le16 (0x0200);
1290 
1291 	desc = dev->dev;
1292 	qual.bDeviceClass = desc->bDeviceClass;
1293 	qual.bDeviceSubClass = desc->bDeviceSubClass;
1294 	qual.bDeviceProtocol = desc->bDeviceProtocol;
1295 
1296 	/* assumes ep0 uses the same value for both speeds ... */
1297 	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1298 
1299 	qual.bNumConfigurations = 1;
1300 	qual.bRESERVED = 0;
1301 
1302 	memcpy (dev->rbuf, &qual, sizeof qual);
1303 }
1304 
1305 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1306 config_buf (struct dev_data *dev, u8 type, unsigned index)
1307 {
1308 	int		len;
1309 	int		hs = 0;
1310 
1311 	/* only one configuration */
1312 	if (index > 0)
1313 		return -EINVAL;
1314 
1315 	if (gadget_is_dualspeed(dev->gadget)) {
1316 		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1317 		if (type == USB_DT_OTHER_SPEED_CONFIG)
1318 			hs = !hs;
1319 	}
1320 	if (hs) {
1321 		dev->req->buf = dev->hs_config;
1322 		len = le16_to_cpu(dev->hs_config->wTotalLength);
1323 	} else {
1324 		dev->req->buf = dev->config;
1325 		len = le16_to_cpu(dev->config->wTotalLength);
1326 	}
1327 	((u8 *)dev->req->buf) [1] = type;
1328 	return len;
1329 }
1330 
1331 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1332 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1333 {
1334 	struct dev_data			*dev = get_gadget_data (gadget);
1335 	struct usb_request		*req = dev->req;
1336 	int				value = -EOPNOTSUPP;
1337 	struct usb_gadgetfs_event	*event;
1338 	u16				w_value = le16_to_cpu(ctrl->wValue);
1339 	u16				w_length = le16_to_cpu(ctrl->wLength);
1340 
1341 	if (w_length > RBUF_SIZE) {
1342 		if (ctrl->bRequestType & USB_DIR_IN) {
1343 			/* Cast away the const, we are going to overwrite on purpose. */
1344 			__le16 *temp = (__le16 *)&ctrl->wLength;
1345 
1346 			*temp = cpu_to_le16(RBUF_SIZE);
1347 			w_length = RBUF_SIZE;
1348 		} else {
1349 			return value;
1350 		}
1351 	}
1352 
1353 	spin_lock (&dev->lock);
1354 	dev->setup_abort = 0;
1355 	if (dev->state == STATE_DEV_UNCONNECTED) {
1356 		if (gadget_is_dualspeed(gadget)
1357 				&& gadget->speed == USB_SPEED_HIGH
1358 				&& dev->hs_config == NULL) {
1359 			spin_unlock(&dev->lock);
1360 			ERROR (dev, "no high speed config??\n");
1361 			return -EINVAL;
1362 		}
1363 
1364 		dev->state = STATE_DEV_CONNECTED;
1365 
1366 		INFO (dev, "connected\n");
1367 		event = next_event (dev, GADGETFS_CONNECT);
1368 		event->u.speed = gadget->speed;
1369 		ep0_readable (dev);
1370 
1371 	/* host may have given up waiting for response.  we can miss control
1372 	 * requests handled lower down (device/endpoint status and features);
1373 	 * then ep0_{read,write} will report the wrong status. controller
1374 	 * driver will have aborted pending i/o.
1375 	 */
1376 	} else if (dev->state == STATE_DEV_SETUP)
1377 		dev->setup_abort = 1;
1378 
1379 	req->buf = dev->rbuf;
1380 	req->context = NULL;
1381 	switch (ctrl->bRequest) {
1382 
1383 	case USB_REQ_GET_DESCRIPTOR:
1384 		if (ctrl->bRequestType != USB_DIR_IN)
1385 			goto unrecognized;
1386 		switch (w_value >> 8) {
1387 
1388 		case USB_DT_DEVICE:
1389 			value = min (w_length, (u16) sizeof *dev->dev);
1390 			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1391 			req->buf = dev->dev;
1392 			break;
1393 		case USB_DT_DEVICE_QUALIFIER:
1394 			if (!dev->hs_config)
1395 				break;
1396 			value = min (w_length, (u16)
1397 				sizeof (struct usb_qualifier_descriptor));
1398 			make_qualifier (dev);
1399 			break;
1400 		case USB_DT_OTHER_SPEED_CONFIG:
1401 		case USB_DT_CONFIG:
1402 			value = config_buf (dev,
1403 					w_value >> 8,
1404 					w_value & 0xff);
1405 			if (value >= 0)
1406 				value = min (w_length, (u16) value);
1407 			break;
1408 		case USB_DT_STRING:
1409 			goto unrecognized;
1410 
1411 		default:		// all others are errors
1412 			break;
1413 		}
1414 		break;
1415 
1416 	/* currently one config, two speeds */
1417 	case USB_REQ_SET_CONFIGURATION:
1418 		if (ctrl->bRequestType != 0)
1419 			goto unrecognized;
1420 		if (0 == (u8) w_value) {
1421 			value = 0;
1422 			dev->current_config = 0;
1423 			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1424 			// user mode expected to disable endpoints
1425 		} else {
1426 			u8	config, power;
1427 
1428 			if (gadget_is_dualspeed(gadget)
1429 					&& gadget->speed == USB_SPEED_HIGH) {
1430 				config = dev->hs_config->bConfigurationValue;
1431 				power = dev->hs_config->bMaxPower;
1432 			} else {
1433 				config = dev->config->bConfigurationValue;
1434 				power = dev->config->bMaxPower;
1435 			}
1436 
1437 			if (config == (u8) w_value) {
1438 				value = 0;
1439 				dev->current_config = config;
1440 				usb_gadget_vbus_draw(gadget, 2 * power);
1441 			}
1442 		}
1443 
1444 		/* report SET_CONFIGURATION like any other control request,
1445 		 * except that usermode may not stall this.  the next
1446 		 * request mustn't be allowed start until this finishes:
1447 		 * endpoints and threads set up, etc.
1448 		 *
1449 		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1450 		 * has bad/racey automagic that prevents synchronizing here.
1451 		 * even kernel mode drivers often miss them.
1452 		 */
1453 		if (value == 0) {
1454 			INFO (dev, "configuration #%d\n", dev->current_config);
1455 			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1456 			if (dev->usermode_setup) {
1457 				dev->setup_can_stall = 0;
1458 				goto delegate;
1459 			}
1460 		}
1461 		break;
1462 
1463 #ifndef	CONFIG_USB_PXA25X
1464 	/* PXA automagically handles this request too */
1465 	case USB_REQ_GET_CONFIGURATION:
1466 		if (ctrl->bRequestType != 0x80)
1467 			goto unrecognized;
1468 		*(u8 *)req->buf = dev->current_config;
1469 		value = min (w_length, (u16) 1);
1470 		break;
1471 #endif
1472 
1473 	default:
1474 unrecognized:
1475 		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1476 			dev->usermode_setup ? "delegate" : "fail",
1477 			ctrl->bRequestType, ctrl->bRequest,
1478 			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1479 
1480 		/* if there's an ep0 reader, don't stall */
1481 		if (dev->usermode_setup) {
1482 			dev->setup_can_stall = 1;
1483 delegate:
1484 			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1485 						? 1 : 0;
1486 			dev->setup_wLength = w_length;
1487 			dev->setup_out_ready = 0;
1488 			dev->setup_out_error = 0;
1489 
1490 			/* read DATA stage for OUT right away */
1491 			if (unlikely (!dev->setup_in && w_length)) {
1492 				value = setup_req (gadget->ep0, dev->req,
1493 							w_length);
1494 				if (value < 0)
1495 					break;
1496 
1497 				++dev->udc_usage;
1498 				spin_unlock (&dev->lock);
1499 				value = usb_ep_queue (gadget->ep0, dev->req,
1500 							GFP_KERNEL);
1501 				spin_lock (&dev->lock);
1502 				--dev->udc_usage;
1503 				if (value < 0) {
1504 					clean_req (gadget->ep0, dev->req);
1505 					break;
1506 				}
1507 
1508 				/* we can't currently stall these */
1509 				dev->setup_can_stall = 0;
1510 			}
1511 
1512 			/* state changes when reader collects event */
1513 			event = next_event (dev, GADGETFS_SETUP);
1514 			event->u.setup = *ctrl;
1515 			ep0_readable (dev);
1516 			spin_unlock (&dev->lock);
1517 			return 0;
1518 		}
1519 	}
1520 
1521 	/* proceed with data transfer and status phases? */
1522 	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1523 		req->length = value;
1524 		req->zero = value < w_length;
1525 
1526 		++dev->udc_usage;
1527 		spin_unlock (&dev->lock);
1528 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1529 		spin_lock(&dev->lock);
1530 		--dev->udc_usage;
1531 		spin_unlock(&dev->lock);
1532 		if (value < 0) {
1533 			DBG (dev, "ep_queue --> %d\n", value);
1534 			req->status = 0;
1535 		}
1536 		return value;
1537 	}
1538 
1539 	/* device stalls when value < 0 */
1540 	spin_unlock (&dev->lock);
1541 	return value;
1542 }
1543 
destroy_ep_files(struct dev_data * dev)1544 static void destroy_ep_files (struct dev_data *dev)
1545 {
1546 	DBG (dev, "%s %d\n", __func__, dev->state);
1547 
1548 	/* dev->state must prevent interference */
1549 	spin_lock_irq (&dev->lock);
1550 	while (!list_empty(&dev->epfiles)) {
1551 		struct ep_data	*ep;
1552 		struct inode	*parent;
1553 		struct dentry	*dentry;
1554 
1555 		/* break link to FS */
1556 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1557 		list_del_init (&ep->epfiles);
1558 		spin_unlock_irq (&dev->lock);
1559 
1560 		dentry = ep->dentry;
1561 		ep->dentry = NULL;
1562 		parent = d_inode(dentry->d_parent);
1563 
1564 		/* break link to controller */
1565 		mutex_lock(&ep->lock);
1566 		if (ep->state == STATE_EP_ENABLED)
1567 			(void) usb_ep_disable (ep->ep);
1568 		ep->state = STATE_EP_UNBOUND;
1569 		usb_ep_free_request (ep->ep, ep->req);
1570 		ep->ep = NULL;
1571 		mutex_unlock(&ep->lock);
1572 
1573 		wake_up (&ep->wait);
1574 		put_ep (ep);
1575 
1576 		/* break link to dcache */
1577 		inode_lock(parent);
1578 		d_delete (dentry);
1579 		dput (dentry);
1580 		inode_unlock(parent);
1581 
1582 		spin_lock_irq (&dev->lock);
1583 	}
1584 	spin_unlock_irq (&dev->lock);
1585 }
1586 
1587 
1588 static struct dentry *
1589 gadgetfs_create_file (struct super_block *sb, char const *name,
1590 		void *data, const struct file_operations *fops);
1591 
activate_ep_files(struct dev_data * dev)1592 static int activate_ep_files (struct dev_data *dev)
1593 {
1594 	struct usb_ep	*ep;
1595 	struct ep_data	*data;
1596 
1597 	gadget_for_each_ep (ep, dev->gadget) {
1598 
1599 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1600 		if (!data)
1601 			goto enomem0;
1602 		data->state = STATE_EP_DISABLED;
1603 		mutex_init(&data->lock);
1604 		init_waitqueue_head (&data->wait);
1605 
1606 		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1607 		refcount_set (&data->count, 1);
1608 		data->dev = dev;
1609 		get_dev (dev);
1610 
1611 		data->ep = ep;
1612 		ep->driver_data = data;
1613 
1614 		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1615 		if (!data->req)
1616 			goto enomem1;
1617 
1618 		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1619 				data, &ep_io_operations);
1620 		if (!data->dentry)
1621 			goto enomem2;
1622 		list_add_tail (&data->epfiles, &dev->epfiles);
1623 	}
1624 	return 0;
1625 
1626 enomem2:
1627 	usb_ep_free_request (ep, data->req);
1628 enomem1:
1629 	put_dev (dev);
1630 	kfree (data);
1631 enomem0:
1632 	DBG (dev, "%s enomem\n", __func__);
1633 	destroy_ep_files (dev);
1634 	return -ENOMEM;
1635 }
1636 
1637 static void
gadgetfs_unbind(struct usb_gadget * gadget)1638 gadgetfs_unbind (struct usb_gadget *gadget)
1639 {
1640 	struct dev_data		*dev = get_gadget_data (gadget);
1641 
1642 	DBG (dev, "%s\n", __func__);
1643 
1644 	spin_lock_irq (&dev->lock);
1645 	dev->state = STATE_DEV_UNBOUND;
1646 	while (dev->udc_usage > 0) {
1647 		spin_unlock_irq(&dev->lock);
1648 		usleep_range(1000, 2000);
1649 		spin_lock_irq(&dev->lock);
1650 	}
1651 	spin_unlock_irq (&dev->lock);
1652 
1653 	destroy_ep_files (dev);
1654 	gadget->ep0->driver_data = NULL;
1655 	set_gadget_data (gadget, NULL);
1656 
1657 	/* we've already been disconnected ... no i/o is active */
1658 	if (dev->req)
1659 		usb_ep_free_request (gadget->ep0, dev->req);
1660 	DBG (dev, "%s done\n", __func__);
1661 	put_dev (dev);
1662 }
1663 
1664 static struct dev_data		*the_device;
1665 
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1666 static int gadgetfs_bind(struct usb_gadget *gadget,
1667 		struct usb_gadget_driver *driver)
1668 {
1669 	struct dev_data		*dev = the_device;
1670 
1671 	if (!dev)
1672 		return -ESRCH;
1673 	if (0 != strcmp (CHIP, gadget->name)) {
1674 		pr_err("%s expected %s controller not %s\n",
1675 			shortname, CHIP, gadget->name);
1676 		return -ENODEV;
1677 	}
1678 
1679 	set_gadget_data (gadget, dev);
1680 	dev->gadget = gadget;
1681 	gadget->ep0->driver_data = dev;
1682 
1683 	/* preallocate control response and buffer */
1684 	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1685 	if (!dev->req)
1686 		goto enomem;
1687 	dev->req->context = NULL;
1688 	dev->req->complete = epio_complete;
1689 
1690 	if (activate_ep_files (dev) < 0)
1691 		goto enomem;
1692 
1693 	INFO (dev, "bound to %s driver\n", gadget->name);
1694 	spin_lock_irq(&dev->lock);
1695 	dev->state = STATE_DEV_UNCONNECTED;
1696 	spin_unlock_irq(&dev->lock);
1697 	get_dev (dev);
1698 	return 0;
1699 
1700 enomem:
1701 	gadgetfs_unbind (gadget);
1702 	return -ENOMEM;
1703 }
1704 
1705 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1706 gadgetfs_disconnect (struct usb_gadget *gadget)
1707 {
1708 	struct dev_data		*dev = get_gadget_data (gadget);
1709 	unsigned long		flags;
1710 
1711 	spin_lock_irqsave (&dev->lock, flags);
1712 	if (dev->state == STATE_DEV_UNCONNECTED)
1713 		goto exit;
1714 	dev->state = STATE_DEV_UNCONNECTED;
1715 
1716 	INFO (dev, "disconnected\n");
1717 	next_event (dev, GADGETFS_DISCONNECT);
1718 	ep0_readable (dev);
1719 exit:
1720 	spin_unlock_irqrestore (&dev->lock, flags);
1721 }
1722 
1723 static void
gadgetfs_suspend(struct usb_gadget * gadget)1724 gadgetfs_suspend (struct usb_gadget *gadget)
1725 {
1726 	struct dev_data		*dev = get_gadget_data (gadget);
1727 	unsigned long		flags;
1728 
1729 	INFO (dev, "suspended from state %d\n", dev->state);
1730 	spin_lock_irqsave(&dev->lock, flags);
1731 	switch (dev->state) {
1732 	case STATE_DEV_SETUP:		// VERY odd... host died??
1733 	case STATE_DEV_CONNECTED:
1734 	case STATE_DEV_UNCONNECTED:
1735 		next_event (dev, GADGETFS_SUSPEND);
1736 		ep0_readable (dev);
1737 		fallthrough;
1738 	default:
1739 		break;
1740 	}
1741 	spin_unlock_irqrestore(&dev->lock, flags);
1742 }
1743 
1744 static struct usb_gadget_driver gadgetfs_driver = {
1745 	.function	= (char *) driver_desc,
1746 	.bind		= gadgetfs_bind,
1747 	.unbind		= gadgetfs_unbind,
1748 	.setup		= gadgetfs_setup,
1749 	.reset		= gadgetfs_disconnect,
1750 	.disconnect	= gadgetfs_disconnect,
1751 	.suspend	= gadgetfs_suspend,
1752 
1753 	.driver	= {
1754 		.name		= shortname,
1755 	},
1756 };
1757 
1758 /*----------------------------------------------------------------------*/
1759 /* DEVICE INITIALIZATION
1760  *
1761  *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1762  *     status = write (fd, descriptors, sizeof descriptors)
1763  *
1764  * That write establishes the device configuration, so the kernel can
1765  * bind to the controller ... guaranteeing it can handle enumeration
1766  * at all necessary speeds.  Descriptor order is:
1767  *
1768  * . message tag (u32, host order) ... for now, must be zero; it
1769  *	would change to support features like multi-config devices
1770  * . full/low speed config ... all wTotalLength bytes (with interface,
1771  *	class, altsetting, endpoint, and other descriptors)
1772  * . high speed config ... all descriptors, for high speed operation;
1773  *	this one's optional except for high-speed hardware
1774  * . device descriptor
1775  *
1776  * Endpoints are not yet enabled. Drivers must wait until device
1777  * configuration and interface altsetting changes create
1778  * the need to configure (or unconfigure) them.
1779  *
1780  * After initialization, the device stays active for as long as that
1781  * $CHIP file is open.  Events must then be read from that descriptor,
1782  * such as configuration notifications.
1783  */
1784 
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1785 static int is_valid_config(struct usb_config_descriptor *config,
1786 		unsigned int total)
1787 {
1788 	return config->bDescriptorType == USB_DT_CONFIG
1789 		&& config->bLength == USB_DT_CONFIG_SIZE
1790 		&& total >= USB_DT_CONFIG_SIZE
1791 		&& config->bConfigurationValue != 0
1792 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1793 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1794 	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1795 	/* FIXME check lengths: walk to end */
1796 }
1797 
1798 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1799 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1800 {
1801 	struct dev_data		*dev = fd->private_data;
1802 	ssize_t			value, length = len;
1803 	unsigned		total;
1804 	u32			tag;
1805 	char			*kbuf;
1806 
1807 	spin_lock_irq(&dev->lock);
1808 	if (dev->state > STATE_DEV_OPENED) {
1809 		value = ep0_write(fd, buf, len, ptr);
1810 		spin_unlock_irq(&dev->lock);
1811 		return value;
1812 	}
1813 	spin_unlock_irq(&dev->lock);
1814 
1815 	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1816 	    (len > PAGE_SIZE * 4))
1817 		return -EINVAL;
1818 
1819 	/* we might need to change message format someday */
1820 	if (copy_from_user (&tag, buf, 4))
1821 		return -EFAULT;
1822 	if (tag != 0)
1823 		return -EINVAL;
1824 	buf += 4;
1825 	length -= 4;
1826 
1827 	kbuf = memdup_user(buf, length);
1828 	if (IS_ERR(kbuf))
1829 		return PTR_ERR(kbuf);
1830 
1831 	spin_lock_irq (&dev->lock);
1832 	value = -EINVAL;
1833 	if (dev->buf) {
1834 		spin_unlock_irq(&dev->lock);
1835 		kfree(kbuf);
1836 		return value;
1837 	}
1838 	dev->buf = kbuf;
1839 
1840 	/* full or low speed config */
1841 	dev->config = (void *) kbuf;
1842 	total = le16_to_cpu(dev->config->wTotalLength);
1843 	if (!is_valid_config(dev->config, total) ||
1844 			total > length - USB_DT_DEVICE_SIZE)
1845 		goto fail;
1846 	kbuf += total;
1847 	length -= total;
1848 
1849 	/* optional high speed config */
1850 	if (kbuf [1] == USB_DT_CONFIG) {
1851 		dev->hs_config = (void *) kbuf;
1852 		total = le16_to_cpu(dev->hs_config->wTotalLength);
1853 		if (!is_valid_config(dev->hs_config, total) ||
1854 				total > length - USB_DT_DEVICE_SIZE)
1855 			goto fail;
1856 		kbuf += total;
1857 		length -= total;
1858 	} else {
1859 		dev->hs_config = NULL;
1860 	}
1861 
1862 	/* could support multiple configs, using another encoding! */
1863 
1864 	/* device descriptor (tweaked for paranoia) */
1865 	if (length != USB_DT_DEVICE_SIZE)
1866 		goto fail;
1867 	dev->dev = (void *)kbuf;
1868 	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1869 			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1870 			|| dev->dev->bNumConfigurations != 1)
1871 		goto fail;
1872 	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1873 
1874 	/* triggers gadgetfs_bind(); then we can enumerate. */
1875 	spin_unlock_irq (&dev->lock);
1876 	if (dev->hs_config)
1877 		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1878 	else
1879 		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1880 
1881 	value = usb_gadget_probe_driver(&gadgetfs_driver);
1882 	if (value != 0) {
1883 		spin_lock_irq(&dev->lock);
1884 		goto fail;
1885 	} else {
1886 		/* at this point "good" hardware has for the first time
1887 		 * let the USB the host see us.  alternatively, if users
1888 		 * unplug/replug that will clear all the error state.
1889 		 *
1890 		 * note:  everything running before here was guaranteed
1891 		 * to choke driver model style diagnostics.  from here
1892 		 * on, they can work ... except in cleanup paths that
1893 		 * kick in after the ep0 descriptor is closed.
1894 		 */
1895 		value = len;
1896 		dev->gadget_registered = true;
1897 	}
1898 	return value;
1899 
1900 fail:
1901 	dev->config = NULL;
1902 	dev->hs_config = NULL;
1903 	dev->dev = NULL;
1904 	spin_unlock_irq (&dev->lock);
1905 	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1906 	kfree (dev->buf);
1907 	dev->buf = NULL;
1908 	return value;
1909 }
1910 
1911 static int
dev_open(struct inode * inode,struct file * fd)1912 dev_open (struct inode *inode, struct file *fd)
1913 {
1914 	struct dev_data		*dev = inode->i_private;
1915 	int			value = -EBUSY;
1916 
1917 	spin_lock_irq(&dev->lock);
1918 	if (dev->state == STATE_DEV_DISABLED) {
1919 		dev->ev_next = 0;
1920 		dev->state = STATE_DEV_OPENED;
1921 		fd->private_data = dev;
1922 		get_dev (dev);
1923 		value = 0;
1924 	}
1925 	spin_unlock_irq(&dev->lock);
1926 	return value;
1927 }
1928 
1929 static const struct file_operations ep0_operations = {
1930 	.llseek =	no_llseek,
1931 
1932 	.open =		dev_open,
1933 	.read =		ep0_read,
1934 	.write =	dev_config,
1935 	.fasync =	ep0_fasync,
1936 	.poll =		ep0_poll,
1937 	.unlocked_ioctl = dev_ioctl,
1938 	.release =	dev_release,
1939 };
1940 
1941 /*----------------------------------------------------------------------*/
1942 
1943 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1944  *
1945  * Mounting the filesystem creates a controller file, used first for
1946  * device configuration then later for event monitoring.
1947  */
1948 
1949 
1950 /* FIXME PAM etc could set this security policy without mount options
1951  * if epfiles inherited ownership and permissons from ep0 ...
1952  */
1953 
1954 static unsigned default_uid;
1955 static unsigned default_gid;
1956 static unsigned default_perm = S_IRUSR | S_IWUSR;
1957 
1958 module_param (default_uid, uint, 0644);
1959 module_param (default_gid, uint, 0644);
1960 module_param (default_perm, uint, 0644);
1961 
1962 
1963 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1964 gadgetfs_make_inode (struct super_block *sb,
1965 		void *data, const struct file_operations *fops,
1966 		int mode)
1967 {
1968 	struct inode *inode = new_inode (sb);
1969 
1970 	if (inode) {
1971 		inode->i_ino = get_next_ino();
1972 		inode->i_mode = mode;
1973 		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1974 		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1975 		inode->i_atime = inode->i_mtime = inode->i_ctime
1976 				= current_time(inode);
1977 		inode->i_private = data;
1978 		inode->i_fop = fops;
1979 	}
1980 	return inode;
1981 }
1982 
1983 /* creates in fs root directory, so non-renamable and non-linkable.
1984  * so inode and dentry are paired, until device reconfig.
1985  */
1986 static struct dentry *
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1987 gadgetfs_create_file (struct super_block *sb, char const *name,
1988 		void *data, const struct file_operations *fops)
1989 {
1990 	struct dentry	*dentry;
1991 	struct inode	*inode;
1992 
1993 	dentry = d_alloc_name(sb->s_root, name);
1994 	if (!dentry)
1995 		return NULL;
1996 
1997 	inode = gadgetfs_make_inode (sb, data, fops,
1998 			S_IFREG | (default_perm & S_IRWXUGO));
1999 	if (!inode) {
2000 		dput(dentry);
2001 		return NULL;
2002 	}
2003 	d_add (dentry, inode);
2004 	return dentry;
2005 }
2006 
2007 static const struct super_operations gadget_fs_operations = {
2008 	.statfs =	simple_statfs,
2009 	.drop_inode =	generic_delete_inode,
2010 };
2011 
2012 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2013 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2014 {
2015 	struct inode	*inode;
2016 	struct dev_data	*dev;
2017 	int		rc;
2018 
2019 	mutex_lock(&sb_mutex);
2020 
2021 	if (the_device) {
2022 		rc = -ESRCH;
2023 		goto Done;
2024 	}
2025 
2026 	CHIP = usb_get_gadget_udc_name();
2027 	if (!CHIP) {
2028 		rc = -ENODEV;
2029 		goto Done;
2030 	}
2031 
2032 	/* superblock */
2033 	sb->s_blocksize = PAGE_SIZE;
2034 	sb->s_blocksize_bits = PAGE_SHIFT;
2035 	sb->s_magic = GADGETFS_MAGIC;
2036 	sb->s_op = &gadget_fs_operations;
2037 	sb->s_time_gran = 1;
2038 
2039 	/* root inode */
2040 	inode = gadgetfs_make_inode (sb,
2041 			NULL, &simple_dir_operations,
2042 			S_IFDIR | S_IRUGO | S_IXUGO);
2043 	if (!inode)
2044 		goto Enomem;
2045 	inode->i_op = &simple_dir_inode_operations;
2046 	if (!(sb->s_root = d_make_root (inode)))
2047 		goto Enomem;
2048 
2049 	/* the ep0 file is named after the controller we expect;
2050 	 * user mode code can use it for sanity checks, like we do.
2051 	 */
2052 	dev = dev_new ();
2053 	if (!dev)
2054 		goto Enomem;
2055 
2056 	dev->sb = sb;
2057 	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2058 	if (!dev->dentry) {
2059 		put_dev(dev);
2060 		goto Enomem;
2061 	}
2062 
2063 	/* other endpoint files are available after hardware setup,
2064 	 * from binding to a controller.
2065 	 */
2066 	the_device = dev;
2067 	rc = 0;
2068 	goto Done;
2069 
2070  Enomem:
2071 	kfree(CHIP);
2072 	CHIP = NULL;
2073 	rc = -ENOMEM;
2074 
2075  Done:
2076 	mutex_unlock(&sb_mutex);
2077 	return rc;
2078 }
2079 
2080 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2081 static int gadgetfs_get_tree(struct fs_context *fc)
2082 {
2083 	return get_tree_single(fc, gadgetfs_fill_super);
2084 }
2085 
2086 static const struct fs_context_operations gadgetfs_context_ops = {
2087 	.get_tree	= gadgetfs_get_tree,
2088 };
2089 
gadgetfs_init_fs_context(struct fs_context * fc)2090 static int gadgetfs_init_fs_context(struct fs_context *fc)
2091 {
2092 	fc->ops = &gadgetfs_context_ops;
2093 	return 0;
2094 }
2095 
2096 static void
gadgetfs_kill_sb(struct super_block * sb)2097 gadgetfs_kill_sb (struct super_block *sb)
2098 {
2099 	mutex_lock(&sb_mutex);
2100 	kill_litter_super (sb);
2101 	if (the_device) {
2102 		put_dev (the_device);
2103 		the_device = NULL;
2104 	}
2105 	kfree(CHIP);
2106 	CHIP = NULL;
2107 	mutex_unlock(&sb_mutex);
2108 }
2109 
2110 /*----------------------------------------------------------------------*/
2111 
2112 static struct file_system_type gadgetfs_type = {
2113 	.owner		= THIS_MODULE,
2114 	.name		= shortname,
2115 	.init_fs_context = gadgetfs_init_fs_context,
2116 	.kill_sb	= gadgetfs_kill_sb,
2117 };
2118 MODULE_ALIAS_FS("gadgetfs");
2119 
2120 /*----------------------------------------------------------------------*/
2121 
init(void)2122 static int __init init (void)
2123 {
2124 	int status;
2125 
2126 	status = register_filesystem (&gadgetfs_type);
2127 	if (status == 0)
2128 		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2129 			shortname, driver_desc);
2130 	return status;
2131 }
2132 module_init (init);
2133 
cleanup(void)2134 static void __exit cleanup (void)
2135 {
2136 	pr_debug ("unregister %s\n", shortname);
2137 	unregister_filesystem (&gadgetfs_type);
2138 }
2139 module_exit (cleanup);
2140 
2141