• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xen-hcd.c
4  *
5  * Xen USB Virtual Host Controller driver
6  *
7  * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
8  * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/usb.h>
13 #include <linux/list.h>
14 #include <linux/usb/hcd.h>
15 #include <linux/io.h>
16 
17 #include <xen/xen.h>
18 #include <xen/xenbus.h>
19 #include <xen/grant_table.h>
20 #include <xen/events.h>
21 #include <xen/page.h>
22 
23 #include <xen/interface/io/usbif.h>
24 
25 /* Private per-URB data */
26 struct urb_priv {
27 	struct list_head list;
28 	struct urb *urb;
29 	int req_id;		/* RING_REQUEST id for submitting */
30 	int unlink_req_id;	/* RING_REQUEST id for unlinking */
31 	int status;
32 	bool unlinked;		/* dequeued marker */
33 };
34 
35 /* virtual roothub port status */
36 struct rhport_status {
37 	__u32 status;
38 	bool resuming;		/* in resuming */
39 	bool c_connection;	/* connection changed */
40 	unsigned long timeout;
41 };
42 
43 /* status of attached device */
44 struct vdevice_status {
45 	int devnum;
46 	enum usb_device_state status;
47 	enum usb_device_speed speed;
48 };
49 
50 /* RING request shadow */
51 struct usb_shadow {
52 	struct xenusb_urb_request req;
53 	struct urb *urb;
54 	bool in_flight;
55 };
56 
57 struct xenhcd_info {
58 	/* Virtual Host Controller has 4 urb queues */
59 	struct list_head pending_submit_list;
60 	struct list_head pending_unlink_list;
61 	struct list_head in_progress_list;
62 	struct list_head giveback_waiting_list;
63 
64 	spinlock_t lock;
65 
66 	/* timer that kick pending and giveback waiting urbs */
67 	struct timer_list watchdog;
68 	unsigned long actions;
69 
70 	/* virtual root hub */
71 	int rh_numports;
72 	struct rhport_status ports[XENUSB_MAX_PORTNR];
73 	struct vdevice_status devices[XENUSB_MAX_PORTNR];
74 
75 	/* Xen related staff */
76 	struct xenbus_device *xbdev;
77 	int urb_ring_ref;
78 	int conn_ring_ref;
79 	struct xenusb_urb_front_ring urb_ring;
80 	struct xenusb_conn_front_ring conn_ring;
81 
82 	unsigned int evtchn;
83 	unsigned int irq;
84 	struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
85 	unsigned int shadow_free;
86 
87 	bool error;
88 };
89 
90 #define XENHCD_RING_JIFFIES (HZ/200)
91 #define XENHCD_SCAN_JIFFIES 1
92 
93 enum xenhcd_timer_action {
94 	TIMER_RING_WATCHDOG,
95 	TIMER_SCAN_PENDING_URBS,
96 };
97 
98 static struct kmem_cache *xenhcd_urbp_cachep;
99 
xenhcd_hcd_to_info(struct usb_hcd * hcd)100 static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
101 {
102 	return (struct xenhcd_info *)hcd->hcd_priv;
103 }
104 
xenhcd_info_to_hcd(struct xenhcd_info * info)105 static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
106 {
107 	return container_of((void *)info, struct usb_hcd, hcd_priv);
108 }
109 
xenhcd_set_error(struct xenhcd_info * info,const char * msg)110 static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
111 {
112 	info->error = true;
113 
114 	pr_alert("xen-hcd: protocol error: %s!\n", msg);
115 }
116 
xenhcd_timer_action_done(struct xenhcd_info * info,enum xenhcd_timer_action action)117 static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
118 					    enum xenhcd_timer_action action)
119 {
120 	clear_bit(action, &info->actions);
121 }
122 
xenhcd_timer_action(struct xenhcd_info * info,enum xenhcd_timer_action action)123 static void xenhcd_timer_action(struct xenhcd_info *info,
124 				enum xenhcd_timer_action action)
125 {
126 	if (timer_pending(&info->watchdog) &&
127 	    test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
128 		return;
129 
130 	if (!test_and_set_bit(action, &info->actions)) {
131 		unsigned long t;
132 
133 		switch (action) {
134 		case TIMER_RING_WATCHDOG:
135 			t = XENHCD_RING_JIFFIES;
136 			break;
137 		default:
138 			t = XENHCD_SCAN_JIFFIES;
139 			break;
140 		}
141 		mod_timer(&info->watchdog, t + jiffies);
142 	}
143 }
144 
145 /*
146  * set virtual port connection status
147  */
xenhcd_set_connect_state(struct xenhcd_info * info,int portnum)148 static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
149 {
150 	int port;
151 
152 	port = portnum - 1;
153 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
154 		switch (info->devices[port].speed) {
155 		case XENUSB_SPEED_NONE:
156 			info->ports[port].status &=
157 				~(USB_PORT_STAT_CONNECTION |
158 				  USB_PORT_STAT_ENABLE |
159 				  USB_PORT_STAT_LOW_SPEED |
160 				  USB_PORT_STAT_HIGH_SPEED |
161 				  USB_PORT_STAT_SUSPEND);
162 			break;
163 		case XENUSB_SPEED_LOW:
164 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
165 			info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
166 			break;
167 		case XENUSB_SPEED_FULL:
168 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
169 			break;
170 		case XENUSB_SPEED_HIGH:
171 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
172 			info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
173 			break;
174 		default: /* error */
175 			return;
176 		}
177 		info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
178 	}
179 }
180 
181 /*
182  * set virtual device connection status
183  */
xenhcd_rhport_connect(struct xenhcd_info * info,__u8 portnum,__u8 speed)184 static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
185 				 __u8 speed)
186 {
187 	int port;
188 
189 	if (portnum < 1 || portnum > info->rh_numports)
190 		return -EINVAL; /* invalid port number */
191 
192 	port = portnum - 1;
193 	if (info->devices[port].speed != speed) {
194 		switch (speed) {
195 		case XENUSB_SPEED_NONE: /* disconnect */
196 			info->devices[port].status = USB_STATE_NOTATTACHED;
197 			break;
198 		case XENUSB_SPEED_LOW:
199 		case XENUSB_SPEED_FULL:
200 		case XENUSB_SPEED_HIGH:
201 			info->devices[port].status = USB_STATE_ATTACHED;
202 			break;
203 		default: /* error */
204 			return -EINVAL;
205 		}
206 		info->devices[port].speed = speed;
207 		info->ports[port].c_connection = true;
208 
209 		xenhcd_set_connect_state(info, portnum);
210 	}
211 
212 	return 0;
213 }
214 
215 /*
216  * SetPortFeature(PORT_SUSPENDED)
217  */
xenhcd_rhport_suspend(struct xenhcd_info * info,int portnum)218 static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
219 {
220 	int port;
221 
222 	port = portnum - 1;
223 	info->ports[port].status |= USB_PORT_STAT_SUSPEND;
224 	info->devices[port].status = USB_STATE_SUSPENDED;
225 }
226 
227 /*
228  * ClearPortFeature(PORT_SUSPENDED)
229  */
xenhcd_rhport_resume(struct xenhcd_info * info,int portnum)230 static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
231 {
232 	int port;
233 
234 	port = portnum - 1;
235 	if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
236 		info->ports[port].resuming = true;
237 		info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
238 	}
239 }
240 
241 /*
242  * SetPortFeature(PORT_POWER)
243  */
xenhcd_rhport_power_on(struct xenhcd_info * info,int portnum)244 static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
245 {
246 	int port;
247 
248 	port = portnum - 1;
249 	if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
250 		info->ports[port].status |= USB_PORT_STAT_POWER;
251 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
252 			info->devices[port].status = USB_STATE_POWERED;
253 		if (info->ports[port].c_connection)
254 			xenhcd_set_connect_state(info, portnum);
255 	}
256 }
257 
258 /*
259  * ClearPortFeature(PORT_POWER)
260  * SetConfiguration(non-zero)
261  * Power_Source_Off
262  * Over-current
263  */
xenhcd_rhport_power_off(struct xenhcd_info * info,int portnum)264 static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
265 {
266 	int port;
267 
268 	port = portnum - 1;
269 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
270 		info->ports[port].status = 0;
271 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
272 			info->devices[port].status = USB_STATE_ATTACHED;
273 	}
274 }
275 
276 /*
277  * ClearPortFeature(PORT_ENABLE)
278  */
xenhcd_rhport_disable(struct xenhcd_info * info,int portnum)279 static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
280 {
281 	int port;
282 
283 	port = portnum - 1;
284 	info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
285 	info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
286 	info->ports[port].resuming = false;
287 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
288 		info->devices[port].status = USB_STATE_POWERED;
289 }
290 
291 /*
292  * SetPortFeature(PORT_RESET)
293  */
xenhcd_rhport_reset(struct xenhcd_info * info,int portnum)294 static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
295 {
296 	int port;
297 
298 	port = portnum - 1;
299 	info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
300 				      USB_PORT_STAT_LOW_SPEED |
301 				      USB_PORT_STAT_HIGH_SPEED);
302 	info->ports[port].status |= USB_PORT_STAT_RESET;
303 
304 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
305 		info->devices[port].status = USB_STATE_ATTACHED;
306 
307 	/* 10msec reset signaling */
308 	info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
309 }
310 
311 #ifdef CONFIG_PM
xenhcd_bus_suspend(struct usb_hcd * hcd)312 static int xenhcd_bus_suspend(struct usb_hcd *hcd)
313 {
314 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
315 	int ret = 0;
316 	int i, ports;
317 
318 	ports = info->rh_numports;
319 
320 	spin_lock_irq(&info->lock);
321 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
322 		ret = -ESHUTDOWN;
323 	} else {
324 		/* suspend any active ports*/
325 		for (i = 1; i <= ports; i++)
326 			xenhcd_rhport_suspend(info, i);
327 	}
328 	spin_unlock_irq(&info->lock);
329 
330 	del_timer_sync(&info->watchdog);
331 
332 	return ret;
333 }
334 
xenhcd_bus_resume(struct usb_hcd * hcd)335 static int xenhcd_bus_resume(struct usb_hcd *hcd)
336 {
337 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
338 	int ret = 0;
339 	int i, ports;
340 
341 	ports = info->rh_numports;
342 
343 	spin_lock_irq(&info->lock);
344 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
345 		ret = -ESHUTDOWN;
346 	} else {
347 		/* resume any suspended ports*/
348 		for (i = 1; i <= ports; i++)
349 			xenhcd_rhport_resume(info, i);
350 	}
351 	spin_unlock_irq(&info->lock);
352 
353 	return ret;
354 }
355 #endif
356 
xenhcd_hub_descriptor(struct xenhcd_info * info,struct usb_hub_descriptor * desc)357 static void xenhcd_hub_descriptor(struct xenhcd_info *info,
358 				  struct usb_hub_descriptor *desc)
359 {
360 	__u16 temp;
361 	int ports = info->rh_numports;
362 
363 	desc->bDescriptorType = 0x29;
364 	desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
365 	desc->bHubContrCurrent = 0;
366 	desc->bNbrPorts = ports;
367 
368 	/* size of DeviceRemovable and PortPwrCtrlMask fields */
369 	temp = 1 + (ports / 8);
370 	desc->bDescLength = 7 + 2 * temp;
371 
372 	/* bitmaps for DeviceRemovable and PortPwrCtrlMask */
373 	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
374 	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
375 
376 	/* per-port over current reporting and no power switching */
377 	temp = 0x000a;
378 	desc->wHubCharacteristics = cpu_to_le16(temp);
379 }
380 
381 /* port status change mask for hub_status_data */
382 #define PORT_C_MASK	((USB_PORT_STAT_C_CONNECTION |		\
383 			  USB_PORT_STAT_C_ENABLE |		\
384 			  USB_PORT_STAT_C_SUSPEND |		\
385 			  USB_PORT_STAT_C_OVERCURRENT |		\
386 			  USB_PORT_STAT_C_RESET) << 16)
387 
388 /*
389  * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
390  * If port status changed, writes the bitmap to buf and return
391  * that length(number of bytes).
392  * If Nothing changed, return 0.
393  */
xenhcd_hub_status_data(struct usb_hcd * hcd,char * buf)394 static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
395 {
396 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
397 	int ports;
398 	int i;
399 	unsigned long flags;
400 	int ret;
401 	int changed = 0;
402 
403 	/* initialize the status to no-changes */
404 	ports = info->rh_numports;
405 	ret = 1 + (ports / 8);
406 	memset(buf, 0, ret);
407 
408 	spin_lock_irqsave(&info->lock, flags);
409 
410 	for (i = 0; i < ports; i++) {
411 		/* check status for each port */
412 		if (info->ports[i].status & PORT_C_MASK) {
413 			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
414 			changed = 1;
415 		}
416 	}
417 
418 	if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
419 		usb_hcd_resume_root_hub(hcd);
420 
421 	spin_unlock_irqrestore(&info->lock, flags);
422 
423 	return changed ? ret : 0;
424 }
425 
xenhcd_hub_control(struct usb_hcd * hcd,__u16 typeReq,__u16 wValue,__u16 wIndex,char * buf,__u16 wLength)426 static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
427 			      __u16 wIndex, char *buf, __u16 wLength)
428 {
429 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
430 	int ports = info->rh_numports;
431 	unsigned long flags;
432 	int ret = 0;
433 	int i;
434 	int changed = 0;
435 
436 	spin_lock_irqsave(&info->lock, flags);
437 	switch (typeReq) {
438 	case ClearHubFeature:
439 		/* ignore this request */
440 		break;
441 	case ClearPortFeature:
442 		if (!wIndex || wIndex > ports)
443 			goto error;
444 
445 		switch (wValue) {
446 		case USB_PORT_FEAT_SUSPEND:
447 			xenhcd_rhport_resume(info, wIndex);
448 			break;
449 		case USB_PORT_FEAT_POWER:
450 			xenhcd_rhport_power_off(info, wIndex);
451 			break;
452 		case USB_PORT_FEAT_ENABLE:
453 			xenhcd_rhport_disable(info, wIndex);
454 			break;
455 		case USB_PORT_FEAT_C_CONNECTION:
456 			info->ports[wIndex - 1].c_connection = false;
457 			fallthrough;
458 		default:
459 			info->ports[wIndex - 1].status &= ~(1 << wValue);
460 			break;
461 		}
462 		break;
463 	case GetHubDescriptor:
464 		xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
465 		break;
466 	case GetHubStatus:
467 		/* always local power supply good and no over-current exists. */
468 		*(__le32 *)buf = cpu_to_le32(0);
469 		break;
470 	case GetPortStatus:
471 		if (!wIndex || wIndex > ports)
472 			goto error;
473 
474 		wIndex--;
475 
476 		/* resume completion */
477 		if (info->ports[wIndex].resuming &&
478 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
479 			info->ports[wIndex].status |=
480 				USB_PORT_STAT_C_SUSPEND << 16;
481 			info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
482 		}
483 
484 		/* reset completion */
485 		if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
486 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
487 			info->ports[wIndex].status |=
488 				USB_PORT_STAT_C_RESET << 16;
489 			info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
490 
491 			if (info->devices[wIndex].status !=
492 			    USB_STATE_NOTATTACHED) {
493 				info->ports[wIndex].status |=
494 					USB_PORT_STAT_ENABLE;
495 				info->devices[wIndex].status =
496 					USB_STATE_DEFAULT;
497 			}
498 
499 			switch (info->devices[wIndex].speed) {
500 			case XENUSB_SPEED_LOW:
501 				info->ports[wIndex].status |=
502 					USB_PORT_STAT_LOW_SPEED;
503 				break;
504 			case XENUSB_SPEED_HIGH:
505 				info->ports[wIndex].status |=
506 					USB_PORT_STAT_HIGH_SPEED;
507 				break;
508 			default:
509 				break;
510 			}
511 		}
512 
513 		*(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
514 		break;
515 	case SetPortFeature:
516 		if (!wIndex || wIndex > ports)
517 			goto error;
518 
519 		switch (wValue) {
520 		case USB_PORT_FEAT_POWER:
521 			xenhcd_rhport_power_on(info, wIndex);
522 			break;
523 		case USB_PORT_FEAT_RESET:
524 			xenhcd_rhport_reset(info, wIndex);
525 			break;
526 		case USB_PORT_FEAT_SUSPEND:
527 			xenhcd_rhport_suspend(info, wIndex);
528 			break;
529 		default:
530 			if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
531 				info->ports[wIndex-1].status |= (1 << wValue);
532 		}
533 		break;
534 
535 	case SetHubFeature:
536 		/* not supported */
537 	default:
538 error:
539 		ret = -EPIPE;
540 	}
541 	spin_unlock_irqrestore(&info->lock, flags);
542 
543 	/* check status for each port */
544 	for (i = 0; i < ports; i++) {
545 		if (info->ports[i].status & PORT_C_MASK)
546 			changed = 1;
547 	}
548 	if (changed)
549 		usb_hcd_poll_rh_status(hcd);
550 
551 	return ret;
552 }
553 
xenhcd_free_urb_priv(struct urb_priv * urbp)554 static void xenhcd_free_urb_priv(struct urb_priv *urbp)
555 {
556 	urbp->urb->hcpriv = NULL;
557 	kmem_cache_free(xenhcd_urbp_cachep, urbp);
558 }
559 
xenhcd_get_id_from_freelist(struct xenhcd_info * info)560 static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
561 {
562 	unsigned int free;
563 
564 	free = info->shadow_free;
565 	info->shadow_free = info->shadow[free].req.id;
566 	info->shadow[free].req.id = 0x0fff; /* debug */
567 	return free;
568 }
569 
xenhcd_add_id_to_freelist(struct xenhcd_info * info,unsigned int id)570 static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
571 					     unsigned int id)
572 {
573 	info->shadow[id].req.id	= info->shadow_free;
574 	info->shadow[id].urb = NULL;
575 	info->shadow_free = id;
576 }
577 
xenhcd_count_pages(void * addr,int length)578 static inline int xenhcd_count_pages(void *addr, int length)
579 {
580 	unsigned long vaddr = (unsigned long)addr;
581 
582 	return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
583 }
584 
xenhcd_gnttab_map(struct xenhcd_info * info,void * addr,int length,grant_ref_t * gref_head,struct xenusb_request_segment * seg,int nr_pages,int flags)585 static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
586 			      grant_ref_t *gref_head,
587 			      struct xenusb_request_segment *seg,
588 			      int nr_pages, int flags)
589 {
590 	grant_ref_t ref;
591 	unsigned int offset;
592 	unsigned int len = length;
593 	unsigned int bytes;
594 	int i;
595 
596 	for (i = 0; i < nr_pages; i++) {
597 		offset = offset_in_page(addr);
598 
599 		bytes = PAGE_SIZE - offset;
600 		if (bytes > len)
601 			bytes = len;
602 
603 		ref = gnttab_claim_grant_reference(gref_head);
604 		gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
605 						virt_to_gfn(addr), flags);
606 		seg[i].gref = ref;
607 		seg[i].offset = (__u16)offset;
608 		seg[i].length = (__u16)bytes;
609 
610 		addr += bytes;
611 		len -= bytes;
612 	}
613 }
614 
xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe,__u8 port)615 static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
616 {
617 	static __u32 pipe;
618 
619 	pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
620 	pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
621 	if (usb_pipein(urb_pipe))
622 		pipe |= XENUSB_PIPE_DIR;
623 	switch (usb_pipetype(urb_pipe)) {
624 	case PIPE_ISOCHRONOUS:
625 		pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
626 		break;
627 	case PIPE_INTERRUPT:
628 		pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
629 		break;
630 	case PIPE_CONTROL:
631 		pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
632 		break;
633 	case PIPE_BULK:
634 		pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
635 		break;
636 	}
637 	pipe = xenusb_setportnum_pipe(pipe, port);
638 
639 	return pipe;
640 }
641 
xenhcd_map_urb_for_request(struct xenhcd_info * info,struct urb * urb,struct xenusb_urb_request * req)642 static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
643 				      struct xenusb_urb_request *req)
644 {
645 	grant_ref_t gref_head;
646 	int nr_buff_pages = 0;
647 	int nr_isodesc_pages = 0;
648 	int nr_grants = 0;
649 
650 	if (urb->transfer_buffer_length) {
651 		nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
652 						urb->transfer_buffer_length);
653 
654 		if (usb_pipeisoc(urb->pipe))
655 			nr_isodesc_pages = xenhcd_count_pages(
656 				&urb->iso_frame_desc[0],
657 				sizeof(struct usb_iso_packet_descriptor) *
658 				urb->number_of_packets);
659 
660 		nr_grants = nr_buff_pages + nr_isodesc_pages;
661 		if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
662 			pr_err("xenhcd: error: %d grants\n", nr_grants);
663 			return -E2BIG;
664 		}
665 
666 		if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
667 			pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
668 			return -ENOMEM;
669 		}
670 
671 		xenhcd_gnttab_map(info, urb->transfer_buffer,
672 				  urb->transfer_buffer_length, &gref_head,
673 				  &req->seg[0], nr_buff_pages,
674 				  usb_pipein(urb->pipe) ? 0 : GTF_readonly);
675 	}
676 
677 	req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
678 	req->transfer_flags = 0;
679 	if (urb->transfer_flags & URB_SHORT_NOT_OK)
680 		req->transfer_flags |= XENUSB_SHORT_NOT_OK;
681 	req->buffer_length = urb->transfer_buffer_length;
682 	req->nr_buffer_segs = nr_buff_pages;
683 
684 	switch (usb_pipetype(urb->pipe)) {
685 	case PIPE_ISOCHRONOUS:
686 		req->u.isoc.interval = urb->interval;
687 		req->u.isoc.start_frame = urb->start_frame;
688 		req->u.isoc.number_of_packets = urb->number_of_packets;
689 		req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
690 
691 		xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
692 				  sizeof(struct usb_iso_packet_descriptor) *
693 				  urb->number_of_packets,
694 				  &gref_head, &req->seg[nr_buff_pages],
695 				  nr_isodesc_pages, 0);
696 		break;
697 	case PIPE_INTERRUPT:
698 		req->u.intr.interval = urb->interval;
699 		break;
700 	case PIPE_CONTROL:
701 		if (urb->setup_packet)
702 			memcpy(req->u.ctrl, urb->setup_packet, 8);
703 		break;
704 	case PIPE_BULK:
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	if (nr_grants)
711 		gnttab_free_grant_references(gref_head);
712 
713 	return 0;
714 }
715 
xenhcd_gnttab_done(struct xenhcd_info * info,unsigned int id)716 static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
717 {
718 	struct usb_shadow *shadow = info->shadow + id;
719 	int nr_segs = 0;
720 	int i;
721 
722 	if (!shadow->in_flight) {
723 		xenhcd_set_error(info, "Illegal request id");
724 		return;
725 	}
726 	shadow->in_flight = false;
727 
728 	nr_segs = shadow->req.nr_buffer_segs;
729 
730 	if (xenusb_pipeisoc(shadow->req.pipe))
731 		nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
732 
733 	for (i = 0; i < nr_segs; i++) {
734 		if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
735 			xenhcd_set_error(info, "backend didn't release grant");
736 	}
737 
738 	shadow->req.nr_buffer_segs = 0;
739 	shadow->req.u.isoc.nr_frame_desc_segs = 0;
740 }
741 
xenhcd_translate_status(int status)742 static int xenhcd_translate_status(int status)
743 {
744 	switch (status) {
745 	case XENUSB_STATUS_OK:
746 		return 0;
747 	case XENUSB_STATUS_NODEV:
748 		return -ENODEV;
749 	case XENUSB_STATUS_INVAL:
750 		return -EINVAL;
751 	case XENUSB_STATUS_STALL:
752 		return -EPIPE;
753 	case XENUSB_STATUS_IOERROR:
754 		return -EPROTO;
755 	case XENUSB_STATUS_BABBLE:
756 		return -EOVERFLOW;
757 	default:
758 		return -ESHUTDOWN;
759 	}
760 }
761 
xenhcd_giveback_urb(struct xenhcd_info * info,struct urb * urb,int status)762 static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
763 				int status)
764 {
765 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
766 	int priv_status = urbp->status;
767 
768 	list_del_init(&urbp->list);
769 	xenhcd_free_urb_priv(urbp);
770 
771 	if (urb->status == -EINPROGRESS)
772 		urb->status = xenhcd_translate_status(status);
773 
774 	spin_unlock(&info->lock);
775 	usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
776 			     priv_status <= 0 ? priv_status : urb->status);
777 	spin_lock(&info->lock);
778 }
779 
xenhcd_do_request(struct xenhcd_info * info,struct urb_priv * urbp)780 static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
781 {
782 	struct xenusb_urb_request *req;
783 	struct urb *urb = urbp->urb;
784 	unsigned int id;
785 	int notify;
786 	int ret;
787 
788 	id = xenhcd_get_id_from_freelist(info);
789 	req = &info->shadow[id].req;
790 	req->id = id;
791 
792 	if (unlikely(urbp->unlinked)) {
793 		req->u.unlink.unlink_id = urbp->req_id;
794 		req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
795 						 urb->pipe, urb->dev->portnum));
796 		urbp->unlink_req_id = id;
797 	} else {
798 		ret = xenhcd_map_urb_for_request(info, urb, req);
799 		if (ret) {
800 			xenhcd_add_id_to_freelist(info, id);
801 			return ret;
802 		}
803 		urbp->req_id = id;
804 	}
805 
806 	req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
807 	*req = info->shadow[id].req;
808 
809 	info->urb_ring.req_prod_pvt++;
810 	info->shadow[id].urb = urb;
811 	info->shadow[id].in_flight = true;
812 
813 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
814 	if (notify)
815 		notify_remote_via_irq(info->irq);
816 
817 	return 0;
818 }
819 
xenhcd_kick_pending_urbs(struct xenhcd_info * info)820 static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
821 {
822 	struct urb_priv *urbp;
823 
824 	while (!list_empty(&info->pending_submit_list)) {
825 		if (RING_FULL(&info->urb_ring)) {
826 			xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
827 			return;
828 		}
829 
830 		urbp = list_entry(info->pending_submit_list.next,
831 				  struct urb_priv, list);
832 		if (!xenhcd_do_request(info, urbp))
833 			list_move_tail(&urbp->list, &info->in_progress_list);
834 		else
835 			xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
836 	}
837 	xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
838 }
839 
840 /*
841  * caller must lock info->lock
842  */
xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info * info)843 static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
844 {
845 	struct urb_priv *urbp, *tmp;
846 	int req_id;
847 
848 	list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
849 		req_id = urbp->req_id;
850 		if (!urbp->unlinked) {
851 			xenhcd_gnttab_done(info, req_id);
852 			if (info->error)
853 				return;
854 			if (urbp->urb->status == -EINPROGRESS)
855 				/* not dequeued */
856 				xenhcd_giveback_urb(info, urbp->urb,
857 						    -ESHUTDOWN);
858 			else	/* dequeued */
859 				xenhcd_giveback_urb(info, urbp->urb,
860 						    urbp->urb->status);
861 		}
862 		info->shadow[req_id].urb = NULL;
863 	}
864 
865 	list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
866 		xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
867 }
868 
869 /*
870  * caller must lock info->lock
871  */
xenhcd_giveback_unlinked_urbs(struct xenhcd_info * info)872 static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
873 {
874 	struct urb_priv *urbp, *tmp;
875 
876 	list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
877 		xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
878 }
879 
xenhcd_submit_urb(struct xenhcd_info * info,struct urb_priv * urbp)880 static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
881 {
882 	int ret;
883 
884 	if (RING_FULL(&info->urb_ring)) {
885 		list_add_tail(&urbp->list, &info->pending_submit_list);
886 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
887 		return 0;
888 	}
889 
890 	if (!list_empty(&info->pending_submit_list)) {
891 		list_add_tail(&urbp->list, &info->pending_submit_list);
892 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
893 		return 0;
894 	}
895 
896 	ret = xenhcd_do_request(info, urbp);
897 	if (ret == 0)
898 		list_add_tail(&urbp->list, &info->in_progress_list);
899 
900 	return ret;
901 }
902 
xenhcd_unlink_urb(struct xenhcd_info * info,struct urb_priv * urbp)903 static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
904 {
905 	int ret;
906 
907 	/* already unlinked? */
908 	if (urbp->unlinked)
909 		return -EBUSY;
910 
911 	urbp->unlinked = true;
912 
913 	/* the urb is still in pending_submit queue */
914 	if (urbp->req_id == ~0) {
915 		list_move_tail(&urbp->list, &info->giveback_waiting_list);
916 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
917 		return 0;
918 	}
919 
920 	/* send unlink request to backend */
921 	if (RING_FULL(&info->urb_ring)) {
922 		list_move_tail(&urbp->list, &info->pending_unlink_list);
923 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
924 		return 0;
925 	}
926 
927 	if (!list_empty(&info->pending_unlink_list)) {
928 		list_move_tail(&urbp->list, &info->pending_unlink_list);
929 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
930 		return 0;
931 	}
932 
933 	ret = xenhcd_do_request(info, urbp);
934 	if (ret == 0)
935 		list_move_tail(&urbp->list, &info->in_progress_list);
936 
937 	return ret;
938 }
939 
xenhcd_res_to_urb(struct xenhcd_info * info,struct xenusb_urb_response * res,struct urb * urb)940 static void xenhcd_res_to_urb(struct xenhcd_info *info,
941 			      struct xenusb_urb_response *res, struct urb *urb)
942 {
943 	if (unlikely(!urb))
944 		return;
945 
946 	if (res->actual_length > urb->transfer_buffer_length)
947 		urb->actual_length = urb->transfer_buffer_length;
948 	else if (res->actual_length < 0)
949 		urb->actual_length = 0;
950 	else
951 		urb->actual_length = res->actual_length;
952 	urb->error_count = res->error_count;
953 	urb->start_frame = res->start_frame;
954 	xenhcd_giveback_urb(info, urb, res->status);
955 }
956 
xenhcd_urb_request_done(struct xenhcd_info * info,unsigned int * eoiflag)957 static int xenhcd_urb_request_done(struct xenhcd_info *info,
958 				   unsigned int *eoiflag)
959 {
960 	struct xenusb_urb_response res;
961 	RING_IDX i, rp;
962 	__u16 id;
963 	int more_to_do = 0;
964 	unsigned long flags;
965 
966 	spin_lock_irqsave(&info->lock, flags);
967 
968 	rp = info->urb_ring.sring->rsp_prod;
969 	if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
970 		xenhcd_set_error(info, "Illegal index on urb-ring");
971 		goto err;
972 	}
973 	rmb(); /* ensure we see queued responses up to "rp" */
974 
975 	for (i = info->urb_ring.rsp_cons; i != rp; i++) {
976 		RING_COPY_RESPONSE(&info->urb_ring, i, &res);
977 		id = res.id;
978 		if (id >= XENUSB_URB_RING_SIZE) {
979 			xenhcd_set_error(info, "Illegal data on urb-ring");
980 			goto err;
981 		}
982 
983 		if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
984 			xenhcd_gnttab_done(info, id);
985 			if (info->error)
986 				goto err;
987 			xenhcd_res_to_urb(info, &res, info->shadow[id].urb);
988 		}
989 
990 		xenhcd_add_id_to_freelist(info, id);
991 
992 		*eoiflag = 0;
993 	}
994 	info->urb_ring.rsp_cons = i;
995 
996 	if (i != info->urb_ring.req_prod_pvt)
997 		RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
998 	else
999 		info->urb_ring.sring->rsp_event = i + 1;
1000 
1001 	spin_unlock_irqrestore(&info->lock, flags);
1002 
1003 	return more_to_do;
1004 
1005  err:
1006 	spin_unlock_irqrestore(&info->lock, flags);
1007 	return 0;
1008 }
1009 
xenhcd_conn_notify(struct xenhcd_info * info,unsigned int * eoiflag)1010 static int xenhcd_conn_notify(struct xenhcd_info *info, unsigned int *eoiflag)
1011 {
1012 	struct xenusb_conn_response res;
1013 	struct xenusb_conn_request *req;
1014 	RING_IDX rc, rp;
1015 	__u16 id;
1016 	__u8 portnum, speed;
1017 	int more_to_do = 0;
1018 	int notify;
1019 	int port_changed = 0;
1020 	unsigned long flags;
1021 
1022 	spin_lock_irqsave(&info->lock, flags);
1023 
1024 	rc = info->conn_ring.rsp_cons;
1025 	rp = info->conn_ring.sring->rsp_prod;
1026 	if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
1027 		xenhcd_set_error(info, "Illegal index on conn-ring");
1028 		spin_unlock_irqrestore(&info->lock, flags);
1029 		return 0;
1030 	}
1031 	rmb(); /* ensure we see queued responses up to "rp" */
1032 
1033 	while (rc != rp) {
1034 		RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
1035 		id = res.id;
1036 		portnum = res.portnum;
1037 		speed = res.speed;
1038 		info->conn_ring.rsp_cons = ++rc;
1039 
1040 		if (xenhcd_rhport_connect(info, portnum, speed)) {
1041 			xenhcd_set_error(info, "Illegal data on conn-ring");
1042 			spin_unlock_irqrestore(&info->lock, flags);
1043 			return 0;
1044 		}
1045 
1046 		if (info->ports[portnum - 1].c_connection)
1047 			port_changed = 1;
1048 
1049 		barrier();
1050 
1051 		req = RING_GET_REQUEST(&info->conn_ring,
1052 				       info->conn_ring.req_prod_pvt);
1053 		req->id = id;
1054 		info->conn_ring.req_prod_pvt++;
1055 
1056 		*eoiflag = 0;
1057 	}
1058 
1059 	if (rc != info->conn_ring.req_prod_pvt)
1060 		RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
1061 	else
1062 		info->conn_ring.sring->rsp_event = rc + 1;
1063 
1064 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1065 	if (notify)
1066 		notify_remote_via_irq(info->irq);
1067 
1068 	spin_unlock_irqrestore(&info->lock, flags);
1069 
1070 	if (port_changed)
1071 		usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
1072 
1073 	return more_to_do;
1074 }
1075 
xenhcd_int(int irq,void * dev_id)1076 static irqreturn_t xenhcd_int(int irq, void *dev_id)
1077 {
1078 	struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
1079 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1080 
1081 	if (unlikely(info->error)) {
1082 		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1083 		return IRQ_HANDLED;
1084 	}
1085 
1086 	while (xenhcd_urb_request_done(info, &eoiflag) |
1087 	       xenhcd_conn_notify(info, &eoiflag))
1088 		/* Yield point for this unbounded loop. */
1089 		cond_resched();
1090 
1091 	xen_irq_lateeoi(irq, eoiflag);
1092 	return IRQ_HANDLED;
1093 }
1094 
xenhcd_destroy_rings(struct xenhcd_info * info)1095 static void xenhcd_destroy_rings(struct xenhcd_info *info)
1096 {
1097 	if (info->irq)
1098 		unbind_from_irqhandler(info->irq, info);
1099 	info->irq = 0;
1100 
1101 	xenbus_teardown_ring((void **)&info->urb_ring.sring, 1,
1102 			     &info->urb_ring_ref);
1103 	xenbus_teardown_ring((void **)&info->conn_ring.sring, 1,
1104 			     &info->conn_ring_ref);
1105 }
1106 
xenhcd_setup_rings(struct xenbus_device * dev,struct xenhcd_info * info)1107 static int xenhcd_setup_rings(struct xenbus_device *dev,
1108 			      struct xenhcd_info *info)
1109 {
1110 	struct xenusb_urb_sring *urb_sring;
1111 	struct xenusb_conn_sring *conn_sring;
1112 	int err;
1113 
1114 	info->conn_ring_ref = INVALID_GRANT_REF;
1115 	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
1116 				(void **)&urb_sring, 1, &info->urb_ring_ref);
1117 	if (err) {
1118 		xenbus_dev_fatal(dev, err, "allocating urb ring");
1119 		return err;
1120 	}
1121 	XEN_FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
1122 
1123 	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
1124 				(void **)&conn_sring, 1, &info->conn_ring_ref);
1125 	if (err) {
1126 		xenbus_dev_fatal(dev, err, "allocating conn ring");
1127 		goto fail;
1128 	}
1129 	XEN_FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
1130 
1131 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1132 	if (err) {
1133 		xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
1134 		goto fail;
1135 	}
1136 
1137 	err = bind_evtchn_to_irq_lateeoi(info->evtchn);
1138 	if (err <= 0) {
1139 		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq_lateeoi");
1140 		goto fail;
1141 	}
1142 
1143 	info->irq = err;
1144 
1145 	err = request_threaded_irq(info->irq, NULL, xenhcd_int,
1146 				   IRQF_ONESHOT, "xenhcd", info);
1147 	if (err) {
1148 		xenbus_dev_fatal(dev, err, "request_threaded_irq");
1149 		goto free_irq;
1150 	}
1151 
1152 	return 0;
1153 
1154 free_irq:
1155 	unbind_from_irqhandler(info->irq, info);
1156 fail:
1157 	xenhcd_destroy_rings(info);
1158 	return err;
1159 }
1160 
xenhcd_talk_to_backend(struct xenbus_device * dev,struct xenhcd_info * info)1161 static int xenhcd_talk_to_backend(struct xenbus_device *dev,
1162 				  struct xenhcd_info *info)
1163 {
1164 	const char *message;
1165 	struct xenbus_transaction xbt;
1166 	int err;
1167 
1168 	err = xenhcd_setup_rings(dev, info);
1169 	if (err)
1170 		return err;
1171 
1172 again:
1173 	err = xenbus_transaction_start(&xbt);
1174 	if (err) {
1175 		xenbus_dev_fatal(dev, err, "starting transaction");
1176 		goto destroy_ring;
1177 	}
1178 
1179 	err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
1180 			    info->urb_ring_ref);
1181 	if (err) {
1182 		message = "writing urb-ring-ref";
1183 		goto abort_transaction;
1184 	}
1185 
1186 	err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
1187 			    info->conn_ring_ref);
1188 	if (err) {
1189 		message = "writing conn-ring-ref";
1190 		goto abort_transaction;
1191 	}
1192 
1193 	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
1194 			    info->evtchn);
1195 	if (err) {
1196 		message = "writing event-channel";
1197 		goto abort_transaction;
1198 	}
1199 
1200 	err = xenbus_transaction_end(xbt, 0);
1201 	if (err) {
1202 		if (err == -EAGAIN)
1203 			goto again;
1204 		xenbus_dev_fatal(dev, err, "completing transaction");
1205 		goto destroy_ring;
1206 	}
1207 
1208 	return 0;
1209 
1210 abort_transaction:
1211 	xenbus_transaction_end(xbt, 1);
1212 	xenbus_dev_fatal(dev, err, "%s", message);
1213 
1214 destroy_ring:
1215 	xenhcd_destroy_rings(info);
1216 
1217 	return err;
1218 }
1219 
xenhcd_connect(struct xenbus_device * dev)1220 static int xenhcd_connect(struct xenbus_device *dev)
1221 {
1222 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1223 	struct xenusb_conn_request *req;
1224 	int idx, err;
1225 	int notify;
1226 	char name[TASK_COMM_LEN];
1227 	struct usb_hcd *hcd;
1228 
1229 	hcd = xenhcd_info_to_hcd(info);
1230 	snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
1231 
1232 	err = xenhcd_talk_to_backend(dev, info);
1233 	if (err)
1234 		return err;
1235 
1236 	/* prepare ring for hotplug notification */
1237 	for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
1238 		req = RING_GET_REQUEST(&info->conn_ring, idx);
1239 		req->id = idx;
1240 	}
1241 	info->conn_ring.req_prod_pvt = idx;
1242 
1243 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1244 	if (notify)
1245 		notify_remote_via_irq(info->irq);
1246 
1247 	return 0;
1248 }
1249 
xenhcd_disconnect(struct xenbus_device * dev)1250 static void xenhcd_disconnect(struct xenbus_device *dev)
1251 {
1252 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1253 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1254 
1255 	usb_remove_hcd(hcd);
1256 	xenbus_frontend_closed(dev);
1257 }
1258 
xenhcd_watchdog(struct timer_list * timer)1259 static void xenhcd_watchdog(struct timer_list *timer)
1260 {
1261 	struct xenhcd_info *info = from_timer(info, timer, watchdog);
1262 	unsigned long flags;
1263 
1264 	spin_lock_irqsave(&info->lock, flags);
1265 	if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
1266 		xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
1267 		xenhcd_giveback_unlinked_urbs(info);
1268 		xenhcd_kick_pending_urbs(info);
1269 	}
1270 	spin_unlock_irqrestore(&info->lock, flags);
1271 }
1272 
1273 /*
1274  * one-time HC init
1275  */
xenhcd_setup(struct usb_hcd * hcd)1276 static int xenhcd_setup(struct usb_hcd *hcd)
1277 {
1278 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1279 
1280 	spin_lock_init(&info->lock);
1281 	INIT_LIST_HEAD(&info->pending_submit_list);
1282 	INIT_LIST_HEAD(&info->pending_unlink_list);
1283 	INIT_LIST_HEAD(&info->in_progress_list);
1284 	INIT_LIST_HEAD(&info->giveback_waiting_list);
1285 	timer_setup(&info->watchdog, xenhcd_watchdog, 0);
1286 
1287 	hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
1288 
1289 	return 0;
1290 }
1291 
1292 /*
1293  * start HC running
1294  */
xenhcd_run(struct usb_hcd * hcd)1295 static int xenhcd_run(struct usb_hcd *hcd)
1296 {
1297 	hcd->uses_new_polling = 1;
1298 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1299 	hcd->state = HC_STATE_RUNNING;
1300 	return 0;
1301 }
1302 
1303 /*
1304  * stop running HC
1305  */
xenhcd_stop(struct usb_hcd * hcd)1306 static void xenhcd_stop(struct usb_hcd *hcd)
1307 {
1308 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1309 
1310 	del_timer_sync(&info->watchdog);
1311 	spin_lock_irq(&info->lock);
1312 	/* cancel all urbs */
1313 	hcd->state = HC_STATE_HALT;
1314 	xenhcd_cancel_all_enqueued_urbs(info);
1315 	xenhcd_giveback_unlinked_urbs(info);
1316 	spin_unlock_irq(&info->lock);
1317 }
1318 
1319 /*
1320  * called as .urb_enqueue()
1321  * non-error returns are promise to giveback the urb later
1322  */
xenhcd_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1323 static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1324 			      gfp_t mem_flags)
1325 {
1326 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1327 	struct urb_priv *urbp;
1328 	unsigned long flags;
1329 	int ret;
1330 
1331 	if (unlikely(info->error))
1332 		return -ESHUTDOWN;
1333 
1334 	urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
1335 	if (!urbp)
1336 		return -ENOMEM;
1337 
1338 	spin_lock_irqsave(&info->lock, flags);
1339 
1340 	urbp->urb = urb;
1341 	urb->hcpriv = urbp;
1342 	urbp->req_id = ~0;
1343 	urbp->unlink_req_id = ~0;
1344 	INIT_LIST_HEAD(&urbp->list);
1345 	urbp->status = 1;
1346 	urb->unlinked = false;
1347 
1348 	ret = xenhcd_submit_urb(info, urbp);
1349 
1350 	if (ret)
1351 		xenhcd_free_urb_priv(urbp);
1352 
1353 	spin_unlock_irqrestore(&info->lock, flags);
1354 
1355 	return ret;
1356 }
1357 
1358 /*
1359  * called as .urb_dequeue()
1360  */
xenhcd_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1361 static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1362 {
1363 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1364 	struct urb_priv *urbp;
1365 	unsigned long flags;
1366 	int ret = 0;
1367 
1368 	spin_lock_irqsave(&info->lock, flags);
1369 
1370 	urbp = urb->hcpriv;
1371 	if (urbp) {
1372 		urbp->status = status;
1373 		ret = xenhcd_unlink_urb(info, urbp);
1374 	}
1375 
1376 	spin_unlock_irqrestore(&info->lock, flags);
1377 
1378 	return ret;
1379 }
1380 
1381 /*
1382  * called from usb_get_current_frame_number(),
1383  * but, almost all drivers not use such function.
1384  */
xenhcd_get_frame(struct usb_hcd * hcd)1385 static int xenhcd_get_frame(struct usb_hcd *hcd)
1386 {
1387 	/* it means error, but probably no problem :-) */
1388 	return 0;
1389 }
1390 
1391 static struct hc_driver xenhcd_usb20_hc_driver = {
1392 	.description = "xen-hcd",
1393 	.product_desc = "Xen USB2.0 Virtual Host Controller",
1394 	.hcd_priv_size = sizeof(struct xenhcd_info),
1395 	.flags = HCD_USB2,
1396 
1397 	/* basic HC lifecycle operations */
1398 	.reset = xenhcd_setup,
1399 	.start = xenhcd_run,
1400 	.stop = xenhcd_stop,
1401 
1402 	/* managing urb I/O */
1403 	.urb_enqueue = xenhcd_urb_enqueue,
1404 	.urb_dequeue = xenhcd_urb_dequeue,
1405 	.get_frame_number = xenhcd_get_frame,
1406 
1407 	/* root hub operations */
1408 	.hub_status_data = xenhcd_hub_status_data,
1409 	.hub_control = xenhcd_hub_control,
1410 #ifdef CONFIG_PM
1411 	.bus_suspend = xenhcd_bus_suspend,
1412 	.bus_resume = xenhcd_bus_resume,
1413 #endif
1414 };
1415 
1416 static struct hc_driver xenhcd_usb11_hc_driver = {
1417 	.description = "xen-hcd",
1418 	.product_desc = "Xen USB1.1 Virtual Host Controller",
1419 	.hcd_priv_size = sizeof(struct xenhcd_info),
1420 	.flags = HCD_USB11,
1421 
1422 	/* basic HC lifecycle operations */
1423 	.reset = xenhcd_setup,
1424 	.start = xenhcd_run,
1425 	.stop = xenhcd_stop,
1426 
1427 	/* managing urb I/O */
1428 	.urb_enqueue = xenhcd_urb_enqueue,
1429 	.urb_dequeue = xenhcd_urb_dequeue,
1430 	.get_frame_number = xenhcd_get_frame,
1431 
1432 	/* root hub operations */
1433 	.hub_status_data = xenhcd_hub_status_data,
1434 	.hub_control = xenhcd_hub_control,
1435 #ifdef CONFIG_PM
1436 	.bus_suspend = xenhcd_bus_suspend,
1437 	.bus_resume = xenhcd_bus_resume,
1438 #endif
1439 };
1440 
xenhcd_create_hcd(struct xenbus_device * dev)1441 static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
1442 {
1443 	int i;
1444 	int err = 0;
1445 	int num_ports;
1446 	int usb_ver;
1447 	struct usb_hcd *hcd = NULL;
1448 	struct xenhcd_info *info;
1449 
1450 	err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
1451 			   &num_ports);
1452 	if (err != 1) {
1453 		xenbus_dev_fatal(dev, err, "reading num-ports");
1454 		return ERR_PTR(-EINVAL);
1455 	}
1456 	if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
1457 		xenbus_dev_fatal(dev, err, "invalid num-ports");
1458 		return ERR_PTR(-EINVAL);
1459 	}
1460 
1461 	err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
1462 	if (err != 1) {
1463 		xenbus_dev_fatal(dev, err, "reading usb-ver");
1464 		return ERR_PTR(-EINVAL);
1465 	}
1466 	switch (usb_ver) {
1467 	case XENUSB_VER_USB11:
1468 		hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
1469 				     dev_name(&dev->dev));
1470 		break;
1471 	case XENUSB_VER_USB20:
1472 		hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
1473 				     dev_name(&dev->dev));
1474 		break;
1475 	default:
1476 		xenbus_dev_fatal(dev, err, "invalid usb-ver");
1477 		return ERR_PTR(-EINVAL);
1478 	}
1479 	if (!hcd) {
1480 		xenbus_dev_fatal(dev, err,
1481 				 "fail to allocate USB host controller");
1482 		return ERR_PTR(-ENOMEM);
1483 	}
1484 
1485 	info = xenhcd_hcd_to_info(hcd);
1486 	info->xbdev = dev;
1487 	info->rh_numports = num_ports;
1488 
1489 	for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
1490 		info->shadow[i].req.id = i + 1;
1491 		info->shadow[i].urb = NULL;
1492 		info->shadow[i].in_flight = false;
1493 	}
1494 	info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
1495 
1496 	return hcd;
1497 }
1498 
xenhcd_backend_changed(struct xenbus_device * dev,enum xenbus_state backend_state)1499 static void xenhcd_backend_changed(struct xenbus_device *dev,
1500 				   enum xenbus_state backend_state)
1501 {
1502 	switch (backend_state) {
1503 	case XenbusStateInitialising:
1504 	case XenbusStateReconfiguring:
1505 	case XenbusStateReconfigured:
1506 	case XenbusStateUnknown:
1507 		break;
1508 
1509 	case XenbusStateInitWait:
1510 	case XenbusStateInitialised:
1511 	case XenbusStateConnected:
1512 		if (dev->state != XenbusStateInitialising)
1513 			break;
1514 		if (!xenhcd_connect(dev))
1515 			xenbus_switch_state(dev, XenbusStateConnected);
1516 		break;
1517 
1518 	case XenbusStateClosed:
1519 		if (dev->state == XenbusStateClosed)
1520 			break;
1521 		fallthrough;	/* Missed the backend's Closing state. */
1522 	case XenbusStateClosing:
1523 		xenhcd_disconnect(dev);
1524 		break;
1525 
1526 	default:
1527 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1528 				 backend_state);
1529 		break;
1530 	}
1531 }
1532 
xenhcd_remove(struct xenbus_device * dev)1533 static int xenhcd_remove(struct xenbus_device *dev)
1534 {
1535 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1536 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1537 
1538 	xenhcd_destroy_rings(info);
1539 	usb_put_hcd(hcd);
1540 
1541 	return 0;
1542 }
1543 
xenhcd_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)1544 static int xenhcd_probe(struct xenbus_device *dev,
1545 			const struct xenbus_device_id *id)
1546 {
1547 	int err;
1548 	struct usb_hcd *hcd;
1549 	struct xenhcd_info *info;
1550 
1551 	if (usb_disabled())
1552 		return -ENODEV;
1553 
1554 	hcd = xenhcd_create_hcd(dev);
1555 	if (IS_ERR(hcd)) {
1556 		err = PTR_ERR(hcd);
1557 		xenbus_dev_fatal(dev, err,
1558 				 "fail to create usb host controller");
1559 		return err;
1560 	}
1561 
1562 	info = xenhcd_hcd_to_info(hcd);
1563 	dev_set_drvdata(&dev->dev, info);
1564 
1565 	err = usb_add_hcd(hcd, 0, 0);
1566 	if (err) {
1567 		xenbus_dev_fatal(dev, err, "fail to add USB host controller");
1568 		usb_put_hcd(hcd);
1569 		dev_set_drvdata(&dev->dev, NULL);
1570 	}
1571 
1572 	return err;
1573 }
1574 
1575 static const struct xenbus_device_id xenhcd_ids[] = {
1576 	{ "vusb" },
1577 	{ "" },
1578 };
1579 
1580 static struct xenbus_driver xenhcd_driver = {
1581 	.ids			= xenhcd_ids,
1582 	.probe			= xenhcd_probe,
1583 	.otherend_changed	= xenhcd_backend_changed,
1584 	.remove			= xenhcd_remove,
1585 };
1586 
xenhcd_init(void)1587 static int __init xenhcd_init(void)
1588 {
1589 	if (!xen_domain())
1590 		return -ENODEV;
1591 
1592 	xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
1593 					sizeof(struct urb_priv), 0, 0, NULL);
1594 	if (!xenhcd_urbp_cachep) {
1595 		pr_err("xenhcd failed to create kmem cache\n");
1596 		return -ENOMEM;
1597 	}
1598 
1599 	return xenbus_register_frontend(&xenhcd_driver);
1600 }
1601 module_init(xenhcd_init);
1602 
xenhcd_exit(void)1603 static void __exit xenhcd_exit(void)
1604 {
1605 	kmem_cache_destroy(xenhcd_urbp_cachep);
1606 	xenbus_unregister_driver(&xenhcd_driver);
1607 }
1608 module_exit(xenhcd_exit);
1609 
1610 MODULE_ALIAS("xen:vusb");
1611 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
1612 MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
1613 MODULE_LICENSE("Dual BSD/GPL");
1614