• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
3  * Copyright (C) 2015-2016 Samsung Electronics
4  *               Igor Kotrasinski <i.kotrasinsk@samsung.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <net/sock.h>
21 #include <linux/list.h>
22 #include <linux/kthread.h>
23 
24 #include "usbip_common.h"
25 #include "vudc.h"
26 
setup_base_pdu(struct usbip_header_basic * base,__u32 command,__u32 seqnum)27 static inline void setup_base_pdu(struct usbip_header_basic *base,
28 				  __u32 command, __u32 seqnum)
29 {
30 	base->command	= command;
31 	base->seqnum	= seqnum;
32 	base->devid	= 0;
33 	base->ep	= 0;
34 	base->direction = 0;
35 }
36 
setup_ret_submit_pdu(struct usbip_header * rpdu,struct urbp * urb_p)37 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
38 {
39 	setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
40 	usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
41 }
42 
setup_ret_unlink_pdu(struct usbip_header * rpdu,struct v_unlink * unlink)43 static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
44 				 struct v_unlink *unlink)
45 {
46 	setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
47 	rpdu->u.ret_unlink.status = unlink->status;
48 }
49 
v_send_ret_unlink(struct vudc * udc,struct v_unlink * unlink)50 static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
51 {
52 	struct msghdr msg;
53 	struct kvec iov[1];
54 	size_t txsize;
55 
56 	int ret;
57 	struct usbip_header pdu_header;
58 
59 	txsize = 0;
60 	memset(&pdu_header, 0, sizeof(pdu_header));
61 	memset(&msg, 0, sizeof(msg));
62 	memset(&iov, 0, sizeof(iov));
63 
64 	/* 1. setup usbip_header */
65 	setup_ret_unlink_pdu(&pdu_header, unlink);
66 	usbip_header_correct_endian(&pdu_header, 1);
67 
68 	iov[0].iov_base = &pdu_header;
69 	iov[0].iov_len  = sizeof(pdu_header);
70 	txsize += sizeof(pdu_header);
71 
72 	ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
73 			     1, txsize);
74 	if (ret != txsize) {
75 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
76 		if (ret >= 0)
77 			return -EPIPE;
78 		return ret;
79 	}
80 	kfree(unlink);
81 
82 	return txsize;
83 }
84 
v_send_ret_submit(struct vudc * udc,struct urbp * urb_p)85 static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
86 {
87 	struct urb *urb = urb_p->urb;
88 	struct usbip_header pdu_header;
89 	struct usbip_iso_packet_descriptor *iso_buffer = NULL;
90 	struct kvec *iov = NULL;
91 	int iovnum = 0;
92 	int ret = 0;
93 	size_t txsize;
94 	struct msghdr msg;
95 
96 	txsize = 0;
97 	memset(&pdu_header, 0, sizeof(pdu_header));
98 	memset(&msg, 0, sizeof(msg));
99 
100 	if (urb->actual_length > 0 && !urb->transfer_buffer) {
101 		dev_err(&udc->gadget.dev,
102 			"urb: actual_length %d transfer_buffer null\n",
103 			urb->actual_length);
104 		return -1;
105 	}
106 
107 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
108 		iovnum = 2 + urb->number_of_packets;
109 	else
110 		iovnum = 2;
111 
112 	iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
113 	if (!iov) {
114 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
115 		ret = -ENOMEM;
116 		goto out;
117 	}
118 	iovnum = 0;
119 
120 	/* 1. setup usbip_header */
121 	setup_ret_submit_pdu(&pdu_header, urb_p);
122 	usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
123 			  pdu_header.base.seqnum);
124 	usbip_header_correct_endian(&pdu_header, 1);
125 
126 	iov[iovnum].iov_base = &pdu_header;
127 	iov[iovnum].iov_len  = sizeof(pdu_header);
128 	iovnum++;
129 	txsize += sizeof(pdu_header);
130 
131 	/* 2. setup transfer buffer */
132 	if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
133 	    usb_pipein(urb->pipe) && urb->actual_length > 0) {
134 		iov[iovnum].iov_base = urb->transfer_buffer;
135 		iov[iovnum].iov_len  = urb->actual_length;
136 		iovnum++;
137 		txsize += urb->actual_length;
138 	} else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
139 		usb_pipein(urb->pipe)) {
140 		/* FIXME - copypasted from stub_tx, refactor */
141 		int i;
142 
143 		for (i = 0; i < urb->number_of_packets; i++) {
144 			iov[iovnum].iov_base = urb->transfer_buffer +
145 				urb->iso_frame_desc[i].offset;
146 			iov[iovnum].iov_len =
147 				urb->iso_frame_desc[i].actual_length;
148 			iovnum++;
149 			txsize += urb->iso_frame_desc[i].actual_length;
150 		}
151 
152 		if (txsize != sizeof(pdu_header) + urb->actual_length) {
153 			usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
154 			ret = -EPIPE;
155 			goto out;
156 		}
157 	}
158 	/* else - no buffer to send */
159 
160 	/* 3. setup iso_packet_descriptor */
161 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
162 		ssize_t len = 0;
163 
164 		iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
165 		if (!iso_buffer) {
166 			usbip_event_add(&udc->ud,
167 					VUDC_EVENT_ERROR_MALLOC);
168 			ret = -ENOMEM;
169 			goto out;
170 		}
171 
172 		iov[iovnum].iov_base = iso_buffer;
173 		iov[iovnum].iov_len  = len;
174 		txsize += len;
175 		iovnum++;
176 	}
177 
178 	ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
179 						iov,  iovnum, txsize);
180 	if (ret != txsize) {
181 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
182 		if (ret >= 0)
183 			ret = -EPIPE;
184 		goto out;
185 	}
186 
187 out:
188 	kfree(iov);
189 	kfree(iso_buffer);
190 	free_urbp_and_urb(urb_p);
191 	if (ret < 0)
192 		return ret;
193 	return txsize;
194 }
195 
v_send_ret(struct vudc * udc)196 static int v_send_ret(struct vudc *udc)
197 {
198 	unsigned long flags;
199 	struct tx_item *txi;
200 	size_t total_size = 0;
201 	int ret = 0;
202 
203 	spin_lock_irqsave(&udc->lock_tx, flags);
204 	while (!list_empty(&udc->tx_queue)) {
205 		txi = list_first_entry(&udc->tx_queue, struct tx_item,
206 				       tx_entry);
207 		list_del(&txi->tx_entry);
208 		spin_unlock_irqrestore(&udc->lock_tx, flags);
209 
210 		switch (txi->type) {
211 		case TX_SUBMIT:
212 			ret = v_send_ret_submit(udc, txi->s);
213 			break;
214 		case TX_UNLINK:
215 			ret = v_send_ret_unlink(udc, txi->u);
216 			break;
217 		}
218 		kfree(txi);
219 
220 		if (ret < 0)
221 			return ret;
222 
223 		total_size += ret;
224 
225 		spin_lock_irqsave(&udc->lock_tx, flags);
226 	}
227 
228 	spin_unlock_irqrestore(&udc->lock_tx, flags);
229 	return total_size;
230 }
231 
232 
v_tx_loop(void * data)233 int v_tx_loop(void *data)
234 {
235 	struct usbip_device *ud = (struct usbip_device *) data;
236 	struct vudc *udc = container_of(ud, struct vudc, ud);
237 	int ret;
238 
239 	while (!kthread_should_stop()) {
240 		if (usbip_event_happened(&udc->ud))
241 			break;
242 		ret = v_send_ret(udc);
243 		if (ret < 0) {
244 			pr_warn("v_tx exit with error %d", ret);
245 			break;
246 		}
247 		wait_event_interruptible(udc->tx_waitq,
248 					 (!list_empty(&udc->tx_queue) ||
249 					 kthread_should_stop()));
250 	}
251 
252 	return 0;
253 }
254 
255 /* called with spinlocks held */
v_enqueue_ret_unlink(struct vudc * udc,__u32 seqnum,__u32 status)256 void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
257 {
258 	struct tx_item *txi;
259 	struct v_unlink *unlink;
260 
261 	txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
262 	if (!txi) {
263 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
264 		return;
265 	}
266 	unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
267 	if (!unlink) {
268 		kfree(txi);
269 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
270 		return;
271 	}
272 
273 	unlink->seqnum = seqnum;
274 	unlink->status = status;
275 	txi->type = TX_UNLINK;
276 	txi->u = unlink;
277 
278 	list_add_tail(&txi->tx_entry, &udc->tx_queue);
279 }
280 
281 /* called with spinlocks held */
v_enqueue_ret_submit(struct vudc * udc,struct urbp * urb_p)282 void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
283 {
284 	struct tx_item *txi;
285 
286 	txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
287 	if (!txi) {
288 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
289 		return;
290 	}
291 
292 	txi->type = TX_SUBMIT;
293 	txi->s = urb_p;
294 
295 	list_add_tail(&txi->tx_entry, &udc->tx_queue);
296 }
297