• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2003-2008 Takahiro Hirofuchi
3  *
4  * This is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17  * USA.
18  */
19 
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 
24 #include "usbip_common.h"
25 #include "vhci.h"
26 
setup_cmd_submit_pdu(struct usbip_header * pdup,struct urb * urb)27 static void setup_cmd_submit_pdu(struct usbip_header *pdup,  struct urb *urb)
28 {
29 	struct vhci_priv *priv = ((struct vhci_priv *)urb->hcpriv);
30 	struct vhci_device *vdev = priv->vdev;
31 
32 	usbip_dbg_vhci_tx("URB, local devnum %u, remote devid %u\n",
33 			  usb_pipedevice(urb->pipe), vdev->devid);
34 
35 	pdup->base.command   = USBIP_CMD_SUBMIT;
36 	pdup->base.seqnum    = priv->seqnum;
37 	pdup->base.devid     = vdev->devid;
38 	pdup->base.direction = usb_pipein(urb->pipe) ?
39 		USBIP_DIR_IN : USBIP_DIR_OUT;
40 	pdup->base.ep	     = usb_pipeendpoint(urb->pipe);
41 
42 	usbip_pack_pdu(pdup, urb, USBIP_CMD_SUBMIT, 1);
43 
44 	if (urb->setup_packet)
45 		memcpy(pdup->u.cmd_submit.setup, urb->setup_packet, 8);
46 }
47 
dequeue_from_priv_tx(struct vhci_device * vdev)48 static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
49 {
50 	struct vhci_priv *priv, *tmp;
51 	unsigned long flags;
52 
53 	spin_lock_irqsave(&vdev->priv_lock, flags);
54 
55 	list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
56 		list_move_tail(&priv->list, &vdev->priv_rx);
57 		spin_unlock_irqrestore(&vdev->priv_lock, flags);
58 		return priv;
59 	}
60 
61 	spin_unlock_irqrestore(&vdev->priv_lock, flags);
62 
63 	return NULL;
64 }
65 
vhci_send_cmd_submit(struct vhci_device * vdev)66 static int vhci_send_cmd_submit(struct vhci_device *vdev)
67 {
68 	struct usbip_iso_packet_descriptor *iso_buffer = NULL;
69 	struct vhci_priv *priv = NULL;
70 	struct scatterlist *sg;
71 
72 	struct msghdr msg;
73 	struct kvec *iov;
74 	size_t txsize;
75 
76 	size_t total_size = 0;
77 	int iovnum;
78 	int err = -ENOMEM;
79 	int i;
80 
81 	while ((priv = dequeue_from_priv_tx(vdev)) != NULL) {
82 		int ret;
83 		struct urb *urb = priv->urb;
84 		struct usbip_header pdu_header;
85 
86 		txsize = 0;
87 		memset(&pdu_header, 0, sizeof(pdu_header));
88 		memset(&msg, 0, sizeof(msg));
89 		memset(&iov, 0, sizeof(iov));
90 
91 		usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
92 				  priv->seqnum);
93 
94 		if (urb->num_sgs && usb_pipeout(urb->pipe))
95 			iovnum = 2 + urb->num_sgs;
96 		else
97 			iovnum = 3;
98 
99 		iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
100 		if (!iov) {
101 			usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC);
102 			return -ENOMEM;
103 		}
104 
105 		if (urb->num_sgs)
106 			urb->transfer_flags |= URB_DMA_MAP_SG;
107 
108 		/* 1. setup usbip_header */
109 		setup_cmd_submit_pdu(&pdu_header, urb);
110 		usbip_header_correct_endian(&pdu_header, 1);
111 		iovnum = 0;
112 
113 		iov[iovnum].iov_base = &pdu_header;
114 		iov[iovnum].iov_len  = sizeof(pdu_header);
115 		txsize += sizeof(pdu_header);
116 		iovnum++;
117 
118 		/* 2. setup transfer buffer */
119 		if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) {
120 			if (urb->num_sgs &&
121 				      !usb_endpoint_xfer_isoc(&urb->ep->desc)) {
122 				for_each_sg(urb->sg, sg, urb->num_sgs, i) {
123 					iov[iovnum].iov_base = sg_virt(sg);
124 					iov[iovnum].iov_len = sg->length;
125 					iovnum++;
126 				}
127 			} else {
128 				iov[iovnum].iov_base = urb->transfer_buffer;
129 				iov[iovnum].iov_len  =
130 						urb->transfer_buffer_length;
131 				iovnum++;
132 			}
133 			txsize += urb->transfer_buffer_length;
134 		}
135 
136 		/* 3. setup iso_packet_descriptor */
137 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
138 			ssize_t len = 0;
139 
140 			iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
141 			if (!iso_buffer) {
142 				usbip_event_add(&vdev->ud,
143 						SDEV_EVENT_ERROR_MALLOC);
144 				goto err_iso_buffer;
145 			}
146 
147 			iov[iovnum].iov_base = iso_buffer;
148 			iov[iovnum].iov_len  = len;
149 			iovnum++;
150 			txsize += len;
151 		}
152 
153 		ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum,
154 				     txsize);
155 		if (ret != txsize) {
156 			pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
157 			       txsize);
158 			usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
159 			err = -EPIPE;
160 			goto err_tx;
161 		}
162 
163 		kfree(iov);
164 		/* This is only for isochronous case */
165 		kfree(iso_buffer);
166 		iso_buffer = NULL;
167 
168 		usbip_dbg_vhci_tx("send txdata\n");
169 
170 		total_size += txsize;
171 	}
172 
173 	return total_size;
174 
175 err_tx:
176 	kfree(iso_buffer);
177 err_iso_buffer:
178 	kfree(iov);
179 
180 	return err;
181 }
182 
dequeue_from_unlink_tx(struct vhci_device * vdev)183 static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
184 {
185 	struct vhci_unlink *unlink, *tmp;
186 	unsigned long flags;
187 
188 	spin_lock_irqsave(&vdev->priv_lock, flags);
189 
190 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
191 		list_move_tail(&unlink->list, &vdev->unlink_rx);
192 		spin_unlock_irqrestore(&vdev->priv_lock, flags);
193 		return unlink;
194 	}
195 
196 	spin_unlock_irqrestore(&vdev->priv_lock, flags);
197 
198 	return NULL;
199 }
200 
vhci_send_cmd_unlink(struct vhci_device * vdev)201 static int vhci_send_cmd_unlink(struct vhci_device *vdev)
202 {
203 	struct vhci_unlink *unlink = NULL;
204 
205 	struct msghdr msg;
206 	struct kvec iov[3];
207 	size_t txsize;
208 
209 	size_t total_size = 0;
210 
211 	while ((unlink = dequeue_from_unlink_tx(vdev)) != NULL) {
212 		int ret;
213 		struct usbip_header pdu_header;
214 
215 		txsize = 0;
216 		memset(&pdu_header, 0, sizeof(pdu_header));
217 		memset(&msg, 0, sizeof(msg));
218 		memset(&iov, 0, sizeof(iov));
219 
220 		usbip_dbg_vhci_tx("setup cmd unlink, %lu\n", unlink->seqnum);
221 
222 		/* 1. setup usbip_header */
223 		pdu_header.base.command = USBIP_CMD_UNLINK;
224 		pdu_header.base.seqnum  = unlink->seqnum;
225 		pdu_header.base.devid	= vdev->devid;
226 		pdu_header.base.ep	= 0;
227 		pdu_header.u.cmd_unlink.seqnum = unlink->unlink_seqnum;
228 
229 		usbip_header_correct_endian(&pdu_header, 1);
230 
231 		iov[0].iov_base = &pdu_header;
232 		iov[0].iov_len  = sizeof(pdu_header);
233 		txsize += sizeof(pdu_header);
234 
235 		ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, 1, txsize);
236 		if (ret != txsize) {
237 			pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
238 			       txsize);
239 			usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
240 			return -1;
241 		}
242 
243 		usbip_dbg_vhci_tx("send txdata\n");
244 
245 		total_size += txsize;
246 	}
247 
248 	return total_size;
249 }
250 
vhci_tx_loop(void * data)251 int vhci_tx_loop(void *data)
252 {
253 	struct usbip_device *ud = data;
254 	struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
255 
256 	while (!kthread_should_stop()) {
257 		if (vhci_send_cmd_submit(vdev) < 0)
258 			break;
259 
260 		if (vhci_send_cmd_unlink(vdev) < 0)
261 			break;
262 
263 		wait_event_interruptible(vdev->waitq_tx,
264 					 (!list_empty(&vdev->priv_tx) ||
265 					  !list_empty(&vdev->unlink_tx) ||
266 					  kthread_should_stop()));
267 
268 		usbip_dbg_vhci_tx("pending urbs ?, now wake up\n");
269 	}
270 
271 	return 0;
272 }
273