• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Gadget Function Driver for MTP
3  *
4  * Copyright (C) 2010 Google, Inc.
5  * Author: Mike Lockwood <lockwood@android.com>
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20 
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 
29 #include <linux/types.h>
30 #include <linux/file.h>
31 #include <linux/device.h>
32 #include <linux/miscdevice.h>
33 
34 #include <linux/usb.h>
35 #include <linux/usb_usual.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/f_mtp.h>
38 
39 #define MTP_BULK_BUFFER_SIZE       16384
40 #define INTR_BUFFER_SIZE           28
41 
42 /* String IDs */
43 #define INTERFACE_STRING_INDEX	0
44 
45 /* values for mtp_dev.state */
46 #define STATE_OFFLINE               0   /* initial state, disconnected */
47 #define STATE_READY                 1   /* ready for userspace calls */
48 #define STATE_BUSY                  2   /* processing userspace calls */
49 #define STATE_CANCELED              3   /* transaction canceled by host */
50 #define STATE_ERROR                 4   /* error from completion routine */
51 
52 /* number of tx and rx requests to allocate */
53 #define TX_REQ_MAX 4
54 #define RX_REQ_MAX 2
55 #define INTR_REQ_MAX 5
56 
57 /* ID for Microsoft MTP OS String */
58 #define MTP_OS_STRING_ID   0xEE
59 
60 /* MTP class reqeusts */
61 #define MTP_REQ_CANCEL              0x64
62 #define MTP_REQ_GET_EXT_EVENT_DATA  0x65
63 #define MTP_REQ_RESET               0x66
64 #define MTP_REQ_GET_DEVICE_STATUS   0x67
65 
66 /* constants for device status */
67 #define MTP_RESPONSE_OK             0x2001
68 #define MTP_RESPONSE_DEVICE_BUSY    0x2019
69 
70 static const char mtp_shortname[] = "mtp_usb";
71 
72 struct mtp_dev {
73 	struct usb_function function;
74 	struct usb_composite_dev *cdev;
75 	spinlock_t lock;
76 
77 	struct usb_ep *ep_in;
78 	struct usb_ep *ep_out;
79 	struct usb_ep *ep_intr;
80 
81 	int state;
82 
83 	/* synchronize access to our device file */
84 	atomic_t open_excl;
85 	/* to enforce only one ioctl at a time */
86 	atomic_t ioctl_excl;
87 
88 	struct list_head tx_idle;
89 	struct list_head intr_idle;
90 
91 	wait_queue_head_t read_wq;
92 	wait_queue_head_t write_wq;
93 	wait_queue_head_t intr_wq;
94 	struct usb_request *rx_req[RX_REQ_MAX];
95 	int rx_done;
96 
97 	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
98 	 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
99 	 */
100 	struct workqueue_struct *wq;
101 	struct work_struct send_file_work;
102 	struct work_struct receive_file_work;
103 	struct file *xfer_file;
104 	loff_t xfer_file_offset;
105 	int64_t xfer_file_length;
106 	unsigned int xfer_send_header;
107 	uint16_t xfer_command;
108 	uint32_t xfer_transaction_id;
109 	int xfer_result;
110 };
111 
112 static struct usb_interface_descriptor mtp_interface_desc = {
113 	.bLength                = USB_DT_INTERFACE_SIZE,
114 	.bDescriptorType        = USB_DT_INTERFACE,
115 	.bInterfaceNumber       = 0,
116 	.bNumEndpoints          = 3,
117 	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
118 	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
119 	.bInterfaceProtocol     = 0,
120 };
121 
122 static struct usb_interface_descriptor ptp_interface_desc = {
123 	.bLength                = USB_DT_INTERFACE_SIZE,
124 	.bDescriptorType        = USB_DT_INTERFACE,
125 	.bInterfaceNumber       = 0,
126 	.bNumEndpoints          = 3,
127 	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
128 	.bInterfaceSubClass     = 1,
129 	.bInterfaceProtocol     = 1,
130 };
131 
132 static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
133 	.bLength                = USB_DT_ENDPOINT_SIZE,
134 	.bDescriptorType        = USB_DT_ENDPOINT,
135 	.bEndpointAddress       = USB_DIR_IN,
136 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
137 	.wMaxPacketSize         = __constant_cpu_to_le16(512),
138 };
139 
140 static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
141 	.bLength                = USB_DT_ENDPOINT_SIZE,
142 	.bDescriptorType        = USB_DT_ENDPOINT,
143 	.bEndpointAddress       = USB_DIR_OUT,
144 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
145 	.wMaxPacketSize         = __constant_cpu_to_le16(512),
146 };
147 
148 static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
149 	.bLength                = USB_DT_ENDPOINT_SIZE,
150 	.bDescriptorType        = USB_DT_ENDPOINT,
151 	.bEndpointAddress       = USB_DIR_IN,
152 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
153 };
154 
155 static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
156 	.bLength                = USB_DT_ENDPOINT_SIZE,
157 	.bDescriptorType        = USB_DT_ENDPOINT,
158 	.bEndpointAddress       = USB_DIR_OUT,
159 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
160 };
161 
162 static struct usb_endpoint_descriptor mtp_intr_desc = {
163 	.bLength                = USB_DT_ENDPOINT_SIZE,
164 	.bDescriptorType        = USB_DT_ENDPOINT,
165 	.bEndpointAddress       = USB_DIR_IN,
166 	.bmAttributes           = USB_ENDPOINT_XFER_INT,
167 	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
168 	.bInterval              = 6,
169 };
170 
171 static struct usb_descriptor_header *fs_mtp_descs[] = {
172 	(struct usb_descriptor_header *) &mtp_interface_desc,
173 	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
174 	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
175 	(struct usb_descriptor_header *) &mtp_intr_desc,
176 	NULL,
177 };
178 
179 static struct usb_descriptor_header *hs_mtp_descs[] = {
180 	(struct usb_descriptor_header *) &mtp_interface_desc,
181 	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
182 	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
183 	(struct usb_descriptor_header *) &mtp_intr_desc,
184 	NULL,
185 };
186 
187 static struct usb_descriptor_header *fs_ptp_descs[] = {
188 	(struct usb_descriptor_header *) &ptp_interface_desc,
189 	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
190 	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
191 	(struct usb_descriptor_header *) &mtp_intr_desc,
192 	NULL,
193 };
194 
195 static struct usb_descriptor_header *hs_ptp_descs[] = {
196 	(struct usb_descriptor_header *) &ptp_interface_desc,
197 	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
198 	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
199 	(struct usb_descriptor_header *) &mtp_intr_desc,
200 	NULL,
201 };
202 
203 static struct usb_string mtp_string_defs[] = {
204 	/* Naming interface "MTP" so libmtp will recognize us */
205 	[INTERFACE_STRING_INDEX].s	= "MTP",
206 	{  },	/* end of list */
207 };
208 
209 static struct usb_gadget_strings mtp_string_table = {
210 	.language		= 0x0409,	/* en-US */
211 	.strings		= mtp_string_defs,
212 };
213 
214 static struct usb_gadget_strings *mtp_strings[] = {
215 	&mtp_string_table,
216 	NULL,
217 };
218 
219 /* Microsoft MTP OS String */
220 static u8 mtp_os_string[] = {
221 	18, /* sizeof(mtp_os_string) */
222 	USB_DT_STRING,
223 	/* Signature field: "MSFT100" */
224 	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
225 	/* vendor code */
226 	1,
227 	/* padding */
228 	0
229 };
230 
231 /* Microsoft Extended Configuration Descriptor Header Section */
232 struct mtp_ext_config_desc_header {
233 	__le32	dwLength;
234 	__u16	bcdVersion;
235 	__le16	wIndex;
236 	__u8	bCount;
237 	__u8	reserved[7];
238 };
239 
240 /* Microsoft Extended Configuration Descriptor Function Section */
241 struct mtp_ext_config_desc_function {
242 	__u8	bFirstInterfaceNumber;
243 	__u8	bInterfaceCount;
244 	__u8	compatibleID[8];
245 	__u8	subCompatibleID[8];
246 	__u8	reserved[6];
247 };
248 
249 /* MTP Extended Configuration Descriptor */
250 struct {
251 	struct mtp_ext_config_desc_header	header;
252 	struct mtp_ext_config_desc_function    function;
253 } mtp_ext_config_desc = {
254 	.header = {
255 		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
256 		.bcdVersion = __constant_cpu_to_le16(0x0100),
257 		.wIndex = __constant_cpu_to_le16(4),
258 		.bCount = __constant_cpu_to_le16(1),
259 	},
260 	.function = {
261 		.bFirstInterfaceNumber = 0,
262 		.bInterfaceCount = 1,
263 		.compatibleID = { 'M', 'T', 'P' },
264 	},
265 };
266 
267 struct mtp_device_status {
268 	__le16	wLength;
269 	__le16	wCode;
270 };
271 
272 struct mtp_data_header {
273 	/* length of packet, including this header */
274 	__le32	length;
275 	/* container type (2 for data packet) */
276 	__le16	type;
277 	/* MTP command code */
278 	__le16	command;
279 	/* MTP transaction ID */
280 	__le32	transaction_id;
281 };
282 
283 /* temporary variable used between mtp_open() and mtp_gadget_bind() */
284 static struct mtp_dev *_mtp_dev;
285 
func_to_mtp(struct usb_function * f)286 static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
287 {
288 	return container_of(f, struct mtp_dev, function);
289 }
290 
mtp_request_new(struct usb_ep * ep,int buffer_size)291 static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
292 {
293 	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
294 
295 	if (!req)
296 		return NULL;
297 
298 	/* now allocate buffers for the requests */
299 	req->buf = kmalloc(buffer_size, GFP_KERNEL);
300 	if (!req->buf) {
301 		usb_ep_free_request(ep, req);
302 		return NULL;
303 	}
304 
305 	return req;
306 }
307 
mtp_request_free(struct usb_request * req,struct usb_ep * ep)308 static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
309 {
310 	if (req) {
311 		kfree(req->buf);
312 		usb_ep_free_request(ep, req);
313 	}
314 }
315 
mtp_lock(atomic_t * excl)316 static inline int mtp_lock(atomic_t *excl)
317 {
318 	if (atomic_inc_return(excl) == 1) {
319 		return 0;
320 	} else {
321 		atomic_dec(excl);
322 		return -1;
323 	}
324 }
325 
mtp_unlock(atomic_t * excl)326 static inline void mtp_unlock(atomic_t *excl)
327 {
328 	atomic_dec(excl);
329 }
330 
331 /* add a request to the tail of a list */
mtp_req_put(struct mtp_dev * dev,struct list_head * head,struct usb_request * req)332 static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
333 		struct usb_request *req)
334 {
335 	unsigned long flags;
336 
337 	spin_lock_irqsave(&dev->lock, flags);
338 	list_add_tail(&req->list, head);
339 	spin_unlock_irqrestore(&dev->lock, flags);
340 }
341 
342 /* remove a request from the head of a list */
343 static struct usb_request
mtp_req_get(struct mtp_dev * dev,struct list_head * head)344 *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
345 {
346 	unsigned long flags;
347 	struct usb_request *req;
348 
349 	spin_lock_irqsave(&dev->lock, flags);
350 	if (list_empty(head)) {
351 		req = 0;
352 	} else {
353 		req = list_first_entry(head, struct usb_request, list);
354 		list_del(&req->list);
355 	}
356 	spin_unlock_irqrestore(&dev->lock, flags);
357 	return req;
358 }
359 
mtp_complete_in(struct usb_ep * ep,struct usb_request * req)360 static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
361 {
362 	struct mtp_dev *dev = _mtp_dev;
363 
364 	if (req->status != 0)
365 		dev->state = STATE_ERROR;
366 
367 	mtp_req_put(dev, &dev->tx_idle, req);
368 
369 	wake_up(&dev->write_wq);
370 }
371 
mtp_complete_out(struct usb_ep * ep,struct usb_request * req)372 static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
373 {
374 	struct mtp_dev *dev = _mtp_dev;
375 
376 	dev->rx_done = 1;
377 	if (req->status != 0)
378 		dev->state = STATE_ERROR;
379 
380 	wake_up(&dev->read_wq);
381 }
382 
mtp_complete_intr(struct usb_ep * ep,struct usb_request * req)383 static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
384 {
385 	struct mtp_dev *dev = _mtp_dev;
386 
387 	if (req->status != 0)
388 		dev->state = STATE_ERROR;
389 
390 	mtp_req_put(dev, &dev->intr_idle, req);
391 
392 	wake_up(&dev->intr_wq);
393 }
394 
mtp_create_bulk_endpoints(struct mtp_dev * dev,struct usb_endpoint_descriptor * in_desc,struct usb_endpoint_descriptor * out_desc,struct usb_endpoint_descriptor * intr_desc)395 static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
396 				struct usb_endpoint_descriptor *in_desc,
397 				struct usb_endpoint_descriptor *out_desc,
398 				struct usb_endpoint_descriptor *intr_desc)
399 {
400 	struct usb_composite_dev *cdev = dev->cdev;
401 	struct usb_request *req;
402 	struct usb_ep *ep;
403 	int i;
404 
405 	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
406 
407 	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
408 	if (!ep) {
409 		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
410 		return -ENODEV;
411 	}
412 	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
413 	ep->driver_data = dev;		/* claim the endpoint */
414 	dev->ep_in = ep;
415 
416 	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
417 	if (!ep) {
418 		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
419 		return -ENODEV;
420 	}
421 	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
422 	ep->driver_data = dev;		/* claim the endpoint */
423 	dev->ep_out = ep;
424 
425 	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
426 	if (!ep) {
427 		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
428 		return -ENODEV;
429 	}
430 	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
431 	ep->driver_data = dev;		/* claim the endpoint */
432 	dev->ep_intr = ep;
433 
434 	/* now allocate requests for our endpoints */
435 	for (i = 0; i < TX_REQ_MAX; i++) {
436 		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
437 		if (!req)
438 			goto fail;
439 		req->complete = mtp_complete_in;
440 		mtp_req_put(dev, &dev->tx_idle, req);
441 	}
442 	for (i = 0; i < RX_REQ_MAX; i++) {
443 		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
444 		if (!req)
445 			goto fail;
446 		req->complete = mtp_complete_out;
447 		dev->rx_req[i] = req;
448 	}
449 	for (i = 0; i < INTR_REQ_MAX; i++) {
450 		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
451 		if (!req)
452 			goto fail;
453 		req->complete = mtp_complete_intr;
454 		mtp_req_put(dev, &dev->intr_idle, req);
455 	}
456 
457 	return 0;
458 
459 fail:
460 	printk(KERN_ERR "mtp_bind() could not allocate requests\n");
461 	return -1;
462 }
463 
mtp_read(struct file * fp,char __user * buf,size_t count,loff_t * pos)464 static ssize_t mtp_read(struct file *fp, char __user *buf,
465 	size_t count, loff_t *pos)
466 {
467 	struct mtp_dev *dev = fp->private_data;
468 	struct usb_composite_dev *cdev = dev->cdev;
469 	struct usb_request *req;
470 	ssize_t r = count;
471 	unsigned int xfer;
472 	int ret = 0;
473 
474 	DBG(cdev, "mtp_read(%zu)\n", count);
475 
476 	if (count > MTP_BULK_BUFFER_SIZE)
477 		return -EINVAL;
478 
479 	/* we will block until we're online */
480 	DBG(cdev, "mtp_read: waiting for online state\n");
481 	ret = wait_event_interruptible(dev->read_wq,
482 		dev->state != STATE_OFFLINE);
483 	if (ret < 0) {
484 		r = ret;
485 		goto done;
486 	}
487 	spin_lock_irq(&dev->lock);
488 	if (dev->state == STATE_CANCELED) {
489 		/* report cancelation to userspace */
490 		dev->state = STATE_READY;
491 		spin_unlock_irq(&dev->lock);
492 		return -ECANCELED;
493 	}
494 	dev->state = STATE_BUSY;
495 	spin_unlock_irq(&dev->lock);
496 
497 requeue_req:
498 	/* queue a request */
499 	req = dev->rx_req[0];
500 	req->length = count;
501 	dev->rx_done = 0;
502 	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
503 	if (ret < 0) {
504 		r = -EIO;
505 		goto done;
506 	} else {
507 		DBG(cdev, "rx %p queue\n", req);
508 	}
509 
510 	/* wait for a request to complete */
511 	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
512 	if (ret < 0) {
513 		r = ret;
514 		usb_ep_dequeue(dev->ep_out, req);
515 		goto done;
516 	}
517 	if (dev->state == STATE_BUSY) {
518 		/* If we got a 0-len packet, throw it back and try again. */
519 		if (req->actual == 0)
520 			goto requeue_req;
521 
522 		DBG(cdev, "rx %p %d\n", req, req->actual);
523 		xfer = (req->actual < count) ? req->actual : count;
524 		r = xfer;
525 		if (copy_to_user(buf, req->buf, xfer))
526 			r = -EFAULT;
527 	} else
528 		r = -EIO;
529 
530 done:
531 	spin_lock_irq(&dev->lock);
532 	if (dev->state == STATE_CANCELED)
533 		r = -ECANCELED;
534 	else if (dev->state != STATE_OFFLINE)
535 		dev->state = STATE_READY;
536 	spin_unlock_irq(&dev->lock);
537 
538 	DBG(cdev, "mtp_read returning %zd\n", r);
539 	return r;
540 }
541 
mtp_write(struct file * fp,const char __user * buf,size_t count,loff_t * pos)542 static ssize_t mtp_write(struct file *fp, const char __user *buf,
543 	size_t count, loff_t *pos)
544 {
545 	struct mtp_dev *dev = fp->private_data;
546 	struct usb_composite_dev *cdev = dev->cdev;
547 	struct usb_request *req = 0;
548 	ssize_t r = count;
549 	unsigned int xfer;
550 	int sendZLP = 0;
551 	int ret;
552 
553 	DBG(cdev, "mtp_write(%zu)\n", count);
554 
555 	spin_lock_irq(&dev->lock);
556 	if (dev->state == STATE_CANCELED) {
557 		/* report cancelation to userspace */
558 		dev->state = STATE_READY;
559 		spin_unlock_irq(&dev->lock);
560 		return -ECANCELED;
561 	}
562 	if (dev->state == STATE_OFFLINE) {
563 		spin_unlock_irq(&dev->lock);
564 		return -ENODEV;
565 	}
566 	dev->state = STATE_BUSY;
567 	spin_unlock_irq(&dev->lock);
568 
569 	/* we need to send a zero length packet to signal the end of transfer
570 	 * if the transfer size is aligned to a packet boundary.
571 	 */
572 	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
573 		sendZLP = 1;
574 
575 	while (count > 0 || sendZLP) {
576 		/* so we exit after sending ZLP */
577 		if (count == 0)
578 			sendZLP = 0;
579 
580 		if (dev->state != STATE_BUSY) {
581 			DBG(cdev, "mtp_write dev->error\n");
582 			r = -EIO;
583 			break;
584 		}
585 
586 		/* get an idle tx request to use */
587 		req = 0;
588 		ret = wait_event_interruptible(dev->write_wq,
589 			((req = mtp_req_get(dev, &dev->tx_idle))
590 				|| dev->state != STATE_BUSY));
591 		if (!req) {
592 			r = ret;
593 			break;
594 		}
595 
596 		if (count > MTP_BULK_BUFFER_SIZE)
597 			xfer = MTP_BULK_BUFFER_SIZE;
598 		else
599 			xfer = count;
600 		if (xfer && copy_from_user(req->buf, buf, xfer)) {
601 			r = -EFAULT;
602 			break;
603 		}
604 
605 		req->length = xfer;
606 		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
607 		if (ret < 0) {
608 			DBG(cdev, "mtp_write: xfer error %d\n", ret);
609 			r = -EIO;
610 			break;
611 		}
612 
613 		buf += xfer;
614 		count -= xfer;
615 
616 		/* zero this so we don't try to free it on error exit */
617 		req = 0;
618 	}
619 
620 	if (req)
621 		mtp_req_put(dev, &dev->tx_idle, req);
622 
623 	spin_lock_irq(&dev->lock);
624 	if (dev->state == STATE_CANCELED)
625 		r = -ECANCELED;
626 	else if (dev->state != STATE_OFFLINE)
627 		dev->state = STATE_READY;
628 	spin_unlock_irq(&dev->lock);
629 
630 	DBG(cdev, "mtp_write returning %zd\n", r);
631 	return r;
632 }
633 
634 /* read from a local file and write to USB */
send_file_work(struct work_struct * data)635 static void send_file_work(struct work_struct *data)
636 {
637 	struct mtp_dev *dev = container_of(data, struct mtp_dev,
638 						send_file_work);
639 	struct usb_composite_dev *cdev = dev->cdev;
640 	struct usb_request *req = 0;
641 	struct mtp_data_header *header;
642 	struct file *filp;
643 	loff_t offset;
644 	int64_t count;
645 	int xfer, ret, hdr_size;
646 	int r = 0;
647 	int sendZLP = 0;
648 
649 	/* read our parameters */
650 	smp_rmb();
651 	filp = dev->xfer_file;
652 	offset = dev->xfer_file_offset;
653 	count = dev->xfer_file_length;
654 
655 	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
656 
657 	if (dev->xfer_send_header) {
658 		hdr_size = sizeof(struct mtp_data_header);
659 		count += hdr_size;
660 	} else {
661 		hdr_size = 0;
662 	}
663 
664 	/* we need to send a zero length packet to signal the end of transfer
665 	 * if the transfer size is aligned to a packet boundary.
666 	 */
667 	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
668 		sendZLP = 1;
669 
670 	while (count > 0 || sendZLP) {
671 		/* so we exit after sending ZLP */
672 		if (count == 0)
673 			sendZLP = 0;
674 
675 		/* get an idle tx request to use */
676 		req = 0;
677 		ret = wait_event_interruptible(dev->write_wq,
678 			(req = mtp_req_get(dev, &dev->tx_idle))
679 			|| dev->state != STATE_BUSY);
680 		if (dev->state == STATE_CANCELED) {
681 			r = -ECANCELED;
682 			break;
683 		}
684 		if (!req) {
685 			r = ret;
686 			break;
687 		}
688 
689 		if (count > MTP_BULK_BUFFER_SIZE)
690 			xfer = MTP_BULK_BUFFER_SIZE;
691 		else
692 			xfer = count;
693 
694 		if (hdr_size) {
695 			/* prepend MTP data header */
696 			header = (struct mtp_data_header *)req->buf;
697 			header->length = __cpu_to_le32(count);
698 			header->type = __cpu_to_le16(2); /* data packet */
699 			header->command = __cpu_to_le16(dev->xfer_command);
700 			header->transaction_id =
701 					__cpu_to_le32(dev->xfer_transaction_id);
702 		}
703 
704 		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
705 								&offset);
706 		if (ret < 0) {
707 			r = ret;
708 			break;
709 		}
710 		xfer = ret + hdr_size;
711 		hdr_size = 0;
712 
713 		req->length = xfer;
714 		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
715 		if (ret < 0) {
716 			DBG(cdev, "send_file_work: xfer error %d\n", ret);
717 			dev->state = STATE_ERROR;
718 			r = -EIO;
719 			break;
720 		}
721 
722 		count -= xfer;
723 
724 		/* zero this so we don't try to free it on error exit */
725 		req = 0;
726 	}
727 
728 	if (req)
729 		mtp_req_put(dev, &dev->tx_idle, req);
730 
731 	DBG(cdev, "send_file_work returning %d\n", r);
732 	/* write the result */
733 	dev->xfer_result = r;
734 	smp_wmb();
735 }
736 
737 /* read from USB and write to a local file */
receive_file_work(struct work_struct * data)738 static void receive_file_work(struct work_struct *data)
739 {
740 	struct mtp_dev *dev = container_of(data, struct mtp_dev,
741 						receive_file_work);
742 	struct usb_composite_dev *cdev = dev->cdev;
743 	struct usb_request *read_req = NULL, *write_req = NULL;
744 	struct file *filp;
745 	loff_t offset;
746 	int64_t count;
747 	int ret, cur_buf = 0;
748 	int r = 0;
749 
750 	/* read our parameters */
751 	smp_rmb();
752 	filp = dev->xfer_file;
753 	offset = dev->xfer_file_offset;
754 	count = dev->xfer_file_length;
755 
756 	DBG(cdev, "receive_file_work(%lld)\n", count);
757 
758 	while (count > 0 || write_req) {
759 		if (count > 0) {
760 			/* queue a request */
761 			read_req = dev->rx_req[cur_buf];
762 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
763 
764 			read_req->length = (count > MTP_BULK_BUFFER_SIZE
765 					? MTP_BULK_BUFFER_SIZE : count);
766 			dev->rx_done = 0;
767 			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
768 			if (ret < 0) {
769 				r = -EIO;
770 				dev->state = STATE_ERROR;
771 				break;
772 			}
773 		}
774 
775 		if (write_req) {
776 			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
777 			ret = vfs_write(filp, write_req->buf, write_req->actual,
778 				&offset);
779 			DBG(cdev, "vfs_write %d\n", ret);
780 			if (ret != write_req->actual) {
781 				r = -EIO;
782 				dev->state = STATE_ERROR;
783 				break;
784 			}
785 			write_req = NULL;
786 		}
787 
788 		if (read_req) {
789 			/* wait for our last read to complete */
790 			ret = wait_event_interruptible(dev->read_wq,
791 				dev->rx_done || dev->state != STATE_BUSY);
792 			if (dev->state == STATE_CANCELED) {
793 				r = -ECANCELED;
794 				if (!dev->rx_done)
795 					usb_ep_dequeue(dev->ep_out, read_req);
796 				break;
797 			}
798 			/* if xfer_file_length is 0xFFFFFFFF, then we read until
799 			 * we get a zero length packet
800 			 */
801 			if (count != 0xFFFFFFFF)
802 				count -= read_req->actual;
803 			if (read_req->actual < read_req->length) {
804 				/*
805 				 * short packet is used to signal EOF for
806 				 * sizes > 4 gig
807 				 */
808 				DBG(cdev, "got short packet\n");
809 				count = 0;
810 			}
811 
812 			write_req = read_req;
813 			read_req = NULL;
814 		}
815 	}
816 
817 	DBG(cdev, "receive_file_work returning %d\n", r);
818 	/* write the result */
819 	dev->xfer_result = r;
820 	smp_wmb();
821 }
822 
mtp_send_event(struct mtp_dev * dev,struct mtp_event * event)823 static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
824 {
825 	struct usb_request *req = NULL;
826 	int ret;
827 	int length = event->length;
828 
829 	DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
830 
831 	if (length < 0 || length > INTR_BUFFER_SIZE)
832 		return -EINVAL;
833 	if (dev->state == STATE_OFFLINE)
834 		return -ENODEV;
835 
836 	ret = wait_event_interruptible_timeout(dev->intr_wq,
837 			(req = mtp_req_get(dev, &dev->intr_idle)),
838 			msecs_to_jiffies(1000));
839 	if (!req)
840 		return -ETIME;
841 
842 	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
843 		mtp_req_put(dev, &dev->intr_idle, req);
844 		return -EFAULT;
845 	}
846 	req->length = length;
847 	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
848 	if (ret)
849 		mtp_req_put(dev, &dev->intr_idle, req);
850 
851 	return ret;
852 }
853 
mtp_ioctl(struct file * fp,unsigned int code,unsigned long value)854 static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
855 {
856 	struct mtp_dev *dev = fp->private_data;
857 	struct file *filp = NULL;
858 	int ret = -EINVAL;
859 
860 	if (mtp_lock(&dev->ioctl_excl))
861 		return -EBUSY;
862 
863 	switch (code) {
864 	case MTP_SEND_FILE:
865 	case MTP_RECEIVE_FILE:
866 	case MTP_SEND_FILE_WITH_HEADER:
867 	{
868 		struct mtp_file_range	mfr;
869 		struct work_struct *work;
870 
871 		spin_lock_irq(&dev->lock);
872 		if (dev->state == STATE_CANCELED) {
873 			/* report cancelation to userspace */
874 			dev->state = STATE_READY;
875 			spin_unlock_irq(&dev->lock);
876 			ret = -ECANCELED;
877 			goto out;
878 		}
879 		if (dev->state == STATE_OFFLINE) {
880 			spin_unlock_irq(&dev->lock);
881 			ret = -ENODEV;
882 			goto out;
883 		}
884 		dev->state = STATE_BUSY;
885 		spin_unlock_irq(&dev->lock);
886 
887 		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
888 			ret = -EFAULT;
889 			goto fail;
890 		}
891 		/* hold a reference to the file while we are working with it */
892 		filp = fget(mfr.fd);
893 		if (!filp) {
894 			ret = -EBADF;
895 			goto fail;
896 		}
897 
898 		/* write the parameters */
899 		dev->xfer_file = filp;
900 		dev->xfer_file_offset = mfr.offset;
901 		dev->xfer_file_length = mfr.length;
902 		smp_wmb();
903 
904 		if (code == MTP_SEND_FILE_WITH_HEADER) {
905 			work = &dev->send_file_work;
906 			dev->xfer_send_header = 1;
907 			dev->xfer_command = mfr.command;
908 			dev->xfer_transaction_id = mfr.transaction_id;
909 		} else if (code == MTP_SEND_FILE) {
910 			work = &dev->send_file_work;
911 			dev->xfer_send_header = 0;
912 		} else {
913 			work = &dev->receive_file_work;
914 		}
915 
916 		/* We do the file transfer on a work queue so it will run
917 		 * in kernel context, which is necessary for vfs_read and
918 		 * vfs_write to use our buffers in the kernel address space.
919 		 */
920 		queue_work(dev->wq, work);
921 		/* wait for operation to complete */
922 		flush_workqueue(dev->wq);
923 		fput(filp);
924 
925 		/* read the result */
926 		smp_rmb();
927 		ret = dev->xfer_result;
928 		break;
929 	}
930 	case MTP_SEND_EVENT:
931 	{
932 		struct mtp_event	event;
933 		/* return here so we don't change dev->state below,
934 		 * which would interfere with bulk transfer state.
935 		 */
936 		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
937 			ret = -EFAULT;
938 		else
939 			ret = mtp_send_event(dev, &event);
940 		goto out;
941 	}
942 	}
943 
944 fail:
945 	spin_lock_irq(&dev->lock);
946 	if (dev->state == STATE_CANCELED)
947 		ret = -ECANCELED;
948 	else if (dev->state != STATE_OFFLINE)
949 		dev->state = STATE_READY;
950 	spin_unlock_irq(&dev->lock);
951 out:
952 	mtp_unlock(&dev->ioctl_excl);
953 	DBG(dev->cdev, "ioctl returning %d\n", ret);
954 	return ret;
955 }
956 
mtp_open(struct inode * ip,struct file * fp)957 static int mtp_open(struct inode *ip, struct file *fp)
958 {
959 	printk(KERN_INFO "mtp_open\n");
960 	if (mtp_lock(&_mtp_dev->open_excl))
961 		return -EBUSY;
962 
963 	/* clear any error condition */
964 	if (_mtp_dev->state != STATE_OFFLINE)
965 		_mtp_dev->state = STATE_READY;
966 
967 	fp->private_data = _mtp_dev;
968 	return 0;
969 }
970 
mtp_release(struct inode * ip,struct file * fp)971 static int mtp_release(struct inode *ip, struct file *fp)
972 {
973 	printk(KERN_INFO "mtp_release\n");
974 
975 	mtp_unlock(&_mtp_dev->open_excl);
976 	return 0;
977 }
978 
979 /* file operations for /dev/mtp_usb */
980 static const struct file_operations mtp_fops = {
981 	.owner = THIS_MODULE,
982 	.read = mtp_read,
983 	.write = mtp_write,
984 	.unlocked_ioctl = mtp_ioctl,
985 	.open = mtp_open,
986 	.release = mtp_release,
987 };
988 
989 static struct miscdevice mtp_device = {
990 	.minor = MISC_DYNAMIC_MINOR,
991 	.name = mtp_shortname,
992 	.fops = &mtp_fops,
993 };
994 
mtp_ctrlrequest(struct usb_composite_dev * cdev,const struct usb_ctrlrequest * ctrl)995 static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
996 				const struct usb_ctrlrequest *ctrl)
997 {
998 	struct mtp_dev *dev = _mtp_dev;
999 	int	value = -EOPNOTSUPP;
1000 	u16	w_index = le16_to_cpu(ctrl->wIndex);
1001 	u16	w_value = le16_to_cpu(ctrl->wValue);
1002 	u16	w_length = le16_to_cpu(ctrl->wLength);
1003 	unsigned long	flags;
1004 
1005 	VDBG(cdev, "mtp_ctrlrequest "
1006 			"%02x.%02x v%04x i%04x l%u\n",
1007 			ctrl->bRequestType, ctrl->bRequest,
1008 			w_value, w_index, w_length);
1009 
1010 	/* Handle MTP OS string */
1011 	if (ctrl->bRequestType ==
1012 			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1013 			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1014 			&& (w_value >> 8) == USB_DT_STRING
1015 			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
1016 		value = (w_length < sizeof(mtp_os_string)
1017 				? w_length : sizeof(mtp_os_string));
1018 		memcpy(cdev->req->buf, mtp_os_string, value);
1019 	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1020 		/* Handle MTP OS descriptor */
1021 		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1022 			ctrl->bRequest, w_index, w_value, w_length);
1023 
1024 		if (ctrl->bRequest == 1
1025 				&& (ctrl->bRequestType & USB_DIR_IN)
1026 				&& (w_index == 4 || w_index == 5)) {
1027 			value = (w_length < sizeof(mtp_ext_config_desc) ?
1028 					w_length : sizeof(mtp_ext_config_desc));
1029 			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1030 		}
1031 	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1032 		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1033 			ctrl->bRequest, w_index, w_value, w_length);
1034 
1035 		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1036 				&& w_value == 0) {
1037 			DBG(cdev, "MTP_REQ_CANCEL\n");
1038 
1039 			spin_lock_irqsave(&dev->lock, flags);
1040 			if (dev->state == STATE_BUSY) {
1041 				dev->state = STATE_CANCELED;
1042 				wake_up(&dev->read_wq);
1043 				wake_up(&dev->write_wq);
1044 			}
1045 			spin_unlock_irqrestore(&dev->lock, flags);
1046 
1047 			/* We need to queue a request to read the remaining
1048 			 *  bytes, but we don't actually need to look at
1049 			 * the contents.
1050 			 */
1051 			value = w_length;
1052 		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1053 				&& w_index == 0 && w_value == 0) {
1054 			struct mtp_device_status *status = cdev->req->buf;
1055 
1056 			status->wLength =
1057 				__constant_cpu_to_le16(sizeof(*status));
1058 
1059 			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1060 			spin_lock_irqsave(&dev->lock, flags);
1061 			/* device status is "busy" until we report
1062 			 * the cancelation to userspace
1063 			 */
1064 			if (dev->state == STATE_CANCELED)
1065 				status->wCode =
1066 					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1067 			else
1068 				status->wCode =
1069 					__cpu_to_le16(MTP_RESPONSE_OK);
1070 			spin_unlock_irqrestore(&dev->lock, flags);
1071 			value = sizeof(*status);
1072 		}
1073 	}
1074 
1075 	/* respond with data transfer or status phase? */
1076 	if (value >= 0) {
1077 		int rc;
1078 
1079 		cdev->req->zero = value < w_length;
1080 		cdev->req->length = value;
1081 		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1082 		if (rc < 0)
1083 			ERROR(cdev, "%s: response queue error\n", __func__);
1084 	}
1085 	return value;
1086 }
1087 
1088 static int
mtp_function_bind(struct usb_configuration * c,struct usb_function * f)1089 mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1090 {
1091 	struct usb_composite_dev *cdev = c->cdev;
1092 	struct mtp_dev	*dev = func_to_mtp(f);
1093 	int			id;
1094 	int			ret;
1095 
1096 	dev->cdev = cdev;
1097 	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
1098 
1099 	/* allocate interface ID(s) */
1100 	id = usb_interface_id(c, f);
1101 	if (id < 0)
1102 		return id;
1103 	mtp_interface_desc.bInterfaceNumber = id;
1104 
1105 	/* allocate endpoints */
1106 	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1107 			&mtp_fullspeed_out_desc, &mtp_intr_desc);
1108 	if (ret)
1109 		return ret;
1110 
1111 	/* support high speed hardware */
1112 	if (gadget_is_dualspeed(c->cdev->gadget)) {
1113 		mtp_highspeed_in_desc.bEndpointAddress =
1114 			mtp_fullspeed_in_desc.bEndpointAddress;
1115 		mtp_highspeed_out_desc.bEndpointAddress =
1116 			mtp_fullspeed_out_desc.bEndpointAddress;
1117 	}
1118 
1119 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1120 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1121 			f->name, dev->ep_in->name, dev->ep_out->name);
1122 	return 0;
1123 }
1124 
1125 static void
mtp_function_unbind(struct usb_configuration * c,struct usb_function * f)1126 mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1127 {
1128 	struct mtp_dev	*dev = func_to_mtp(f);
1129 	struct usb_request *req;
1130 	int i;
1131 
1132 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
1133 		mtp_request_free(req, dev->ep_in);
1134 	for (i = 0; i < RX_REQ_MAX; i++)
1135 		mtp_request_free(dev->rx_req[i], dev->ep_out);
1136 	while ((req = mtp_req_get(dev, &dev->intr_idle)))
1137 		mtp_request_free(req, dev->ep_intr);
1138 	dev->state = STATE_OFFLINE;
1139 }
1140 
mtp_function_set_alt(struct usb_function * f,unsigned int intf,unsigned int alt)1141 static int mtp_function_set_alt(struct usb_function *f,
1142 		unsigned int intf, unsigned int alt)
1143 {
1144 	struct mtp_dev	*dev = func_to_mtp(f);
1145 	struct usb_composite_dev *cdev = f->config->cdev;
1146 	int ret;
1147 
1148 	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1149 
1150 	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1151 	if (ret)
1152 		return ret;
1153 
1154 	ret = usb_ep_enable(dev->ep_in);
1155 	if (ret)
1156 		return ret;
1157 
1158 	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1159 	if (ret)
1160 		return ret;
1161 
1162 	ret = usb_ep_enable(dev->ep_out);
1163 	if (ret) {
1164 		usb_ep_disable(dev->ep_in);
1165 		return ret;
1166 	}
1167 
1168 	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1169 	if (ret)
1170 		return ret;
1171 
1172 	ret = usb_ep_enable(dev->ep_intr);
1173 	if (ret) {
1174 		usb_ep_disable(dev->ep_out);
1175 		usb_ep_disable(dev->ep_in);
1176 		return ret;
1177 	}
1178 	dev->state = STATE_READY;
1179 
1180 	/* readers may be blocked waiting for us to go online */
1181 	wake_up(&dev->read_wq);
1182 	return 0;
1183 }
1184 
mtp_function_disable(struct usb_function * f)1185 static void mtp_function_disable(struct usb_function *f)
1186 {
1187 	struct mtp_dev	*dev = func_to_mtp(f);
1188 	struct usb_composite_dev	*cdev = dev->cdev;
1189 
1190 	DBG(cdev, "mtp_function_disable\n");
1191 	dev->state = STATE_OFFLINE;
1192 	usb_ep_disable(dev->ep_in);
1193 	usb_ep_disable(dev->ep_out);
1194 	usb_ep_disable(dev->ep_intr);
1195 
1196 	/* readers may be blocked waiting for us to go online */
1197 	wake_up(&dev->read_wq);
1198 
1199 	VDBG(cdev, "%s disabled\n", dev->function.name);
1200 }
1201 
mtp_bind_config(struct usb_configuration * c,bool ptp_config)1202 static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
1203 {
1204 	struct mtp_dev *dev = _mtp_dev;
1205 	int ret = 0;
1206 
1207 	printk(KERN_INFO "mtp_bind_config\n");
1208 
1209 	/* allocate a string ID for our interface */
1210 	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1211 		ret = usb_string_id(c->cdev);
1212 		if (ret < 0)
1213 			return ret;
1214 		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1215 		mtp_interface_desc.iInterface = ret;
1216 	}
1217 
1218 	dev->cdev = c->cdev;
1219 	dev->function.name = "mtp";
1220 	dev->function.strings = mtp_strings;
1221 	if (ptp_config) {
1222 		dev->function.fs_descriptors = fs_ptp_descs;
1223 		dev->function.hs_descriptors = hs_ptp_descs;
1224 	} else {
1225 		dev->function.fs_descriptors = fs_mtp_descs;
1226 		dev->function.hs_descriptors = hs_mtp_descs;
1227 	}
1228 	dev->function.bind = mtp_function_bind;
1229 	dev->function.unbind = mtp_function_unbind;
1230 	dev->function.set_alt = mtp_function_set_alt;
1231 	dev->function.disable = mtp_function_disable;
1232 
1233 	return usb_add_function(c, &dev->function);
1234 }
1235 
mtp_setup(void)1236 static int mtp_setup(void)
1237 {
1238 	struct mtp_dev *dev;
1239 	int ret;
1240 
1241 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1242 	if (!dev)
1243 		return -ENOMEM;
1244 
1245 	spin_lock_init(&dev->lock);
1246 	init_waitqueue_head(&dev->read_wq);
1247 	init_waitqueue_head(&dev->write_wq);
1248 	init_waitqueue_head(&dev->intr_wq);
1249 	atomic_set(&dev->open_excl, 0);
1250 	atomic_set(&dev->ioctl_excl, 0);
1251 	INIT_LIST_HEAD(&dev->tx_idle);
1252 	INIT_LIST_HEAD(&dev->intr_idle);
1253 
1254 	dev->wq = create_singlethread_workqueue("f_mtp");
1255 	if (!dev->wq) {
1256 		ret = -ENOMEM;
1257 		goto err1;
1258 	}
1259 	INIT_WORK(&dev->send_file_work, send_file_work);
1260 	INIT_WORK(&dev->receive_file_work, receive_file_work);
1261 
1262 	_mtp_dev = dev;
1263 
1264 	ret = misc_register(&mtp_device);
1265 	if (ret)
1266 		goto err2;
1267 
1268 	return 0;
1269 
1270 err2:
1271 	destroy_workqueue(dev->wq);
1272 err1:
1273 	_mtp_dev = NULL;
1274 	kfree(dev);
1275 	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1276 	return ret;
1277 }
1278 
mtp_cleanup(void)1279 static void mtp_cleanup(void)
1280 {
1281 	struct mtp_dev *dev = _mtp_dev;
1282 
1283 	if (!dev)
1284 		return;
1285 
1286 	misc_deregister(&mtp_device);
1287 	destroy_workqueue(dev->wq);
1288 	_mtp_dev = NULL;
1289 	kfree(dev);
1290 }
1291