• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * WUSB Wire Adapter: WLP interface
3  * Driver for the Linux Network stack.
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * i1480u's RX handling is simple. i1480u will send the received
24  * network packets broken up in fragments; 1 to N fragments make a
25  * packet, we assemble them together and deliver the packet with netif_rx().
26  *
27  * Beacuse each USB transfer is a *single* fragment (except when the
28  * transfer contains a first fragment), each URB called thus
29  * back contains one or two fragments. So we queue N URBs, each with its own
30  * fragment buffer. When a URB is done, we process it (adding to the
31  * current skb from the fragment buffer until complete). Once
32  * processed, we requeue the URB. There is always a bunch of URBs
33  * ready to take data, so the intergap should be minimal.
34  *
35  * An URB's transfer buffer is the data field of a socket buffer. This
36  * reduces copying as data can be passed directly to network layer. If a
37  * complete packet or 1st fragment is received the URB's transfer buffer is
38  * taken away from it and used to send data to the network layer. In this
39  * case a new transfer buffer is allocated to the URB before being requeued.
40  * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41  * appended to the RX packet under construction and the transfer buffer
42  * is reused. To be able to use this buffer to assemble complete packets
43  * we set each buffer's size to that of the MAX ethernet packet that can
44  * be received. There is thus room for improvement in memory usage.
45  *
46  * When the max tx fragment size increases, we should be able to read
47  * data into the skbs directly with very simple code.
48  *
49  * ROADMAP:
50  *
51  *   ENTRY POINTS:
52  *
53  *     i1480u_rx_setup(): setup RX context [from i1480u_open()]
54  *
55  *     i1480u_rx_release(): release RX context [from i1480u_stop()]
56  *
57  *     i1480u_rx_cb(): called when the RX USB URB receives a
58  *                     packet. It removes the header and pushes it up
59  *                     the Linux netdev stack with netif_rx().
60  *
61  *       i1480u_rx_buffer()
62  *         i1480u_drop() and i1480u_fix()
63  *         i1480u_skb_deliver
64  *
65  */
66 
67 #include <linux/netdevice.h>
68 #include <linux/etherdevice.h>
69 #include "i1480u-wlp.h"
70 
71 /*
72  * Setup the RX context
73  *
74  * Each URB is provided with a transfer_buffer that is the data field
75  * of a new socket buffer.
76  */
i1480u_rx_setup(struct i1480u * i1480u)77 int i1480u_rx_setup(struct i1480u *i1480u)
78 {
79 	int result, cnt;
80 	struct device *dev = &i1480u->usb_iface->dev;
81 	struct net_device *net_dev = i1480u->net_dev;
82 	struct usb_endpoint_descriptor *epd;
83 	struct sk_buff *skb;
84 
85 	/* Alloc RX stuff */
86 	i1480u->rx_skb = NULL;	/* not in process of receiving packet */
87 	result = -ENOMEM;
88 	epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
89 	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
90 		struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
91 		rx_buf->i1480u = i1480u;
92 		skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
93 		if (!skb) {
94 			dev_err(dev,
95 				"RX: cannot allocate RX buffer %d\n", cnt);
96 			result = -ENOMEM;
97 			goto error;
98 		}
99 		skb->dev = net_dev;
100 		skb->ip_summed = CHECKSUM_NONE;
101 		skb_reserve(skb, 2);
102 		rx_buf->data = skb;
103 		rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
104 		if (unlikely(rx_buf->urb == NULL)) {
105 			dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
106 			result = -ENOMEM;
107 			goto error;
108 		}
109 		usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
110 			  usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
111 			  rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
112 			  i1480u_rx_cb, rx_buf);
113 		result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
114 		if (unlikely(result < 0)) {
115 			dev_err(dev, "RX: cannot submit URB %d: %d\n",
116 				cnt, result);
117 			goto error;
118 		}
119 	}
120 	return 0;
121 
122 error:
123 	i1480u_rx_release(i1480u);
124 	return result;
125 }
126 
127 
128 /* Release resources associated to the rx context */
i1480u_rx_release(struct i1480u * i1480u)129 void i1480u_rx_release(struct i1480u *i1480u)
130 {
131 	int cnt;
132 	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
133 		if (i1480u->rx_buf[cnt].data)
134 			dev_kfree_skb(i1480u->rx_buf[cnt].data);
135 		if (i1480u->rx_buf[cnt].urb) {
136 			usb_kill_urb(i1480u->rx_buf[cnt].urb);
137 			usb_free_urb(i1480u->rx_buf[cnt].urb);
138 		}
139 	}
140 	if (i1480u->rx_skb != NULL)
141 		dev_kfree_skb(i1480u->rx_skb);
142 }
143 
144 static
i1480u_rx_unlink_urbs(struct i1480u * i1480u)145 void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
146 {
147 	int cnt;
148 	for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
149 		if (i1480u->rx_buf[cnt].urb)
150 			usb_unlink_urb(i1480u->rx_buf[cnt].urb);
151 	}
152 }
153 
154 /* Fix an out-of-sequence packet */
155 #define i1480u_fix(i1480u, msg...)			\
156 do {							\
157 	if (printk_ratelimit())				\
158 		dev_err(&i1480u->usb_iface->dev, msg);	\
159 	dev_kfree_skb_irq(i1480u->rx_skb);		\
160 	i1480u->rx_skb = NULL;				\
161 	i1480u->rx_untd_pkt_size = 0;			\
162 } while (0)
163 
164 
165 /* Drop an out-of-sequence packet */
166 #define i1480u_drop(i1480u, msg...)			\
167 do {							\
168 	if (printk_ratelimit())				\
169 		dev_err(&i1480u->usb_iface->dev, msg);	\
170 	i1480u->net_dev->stats.rx_dropped++;			\
171 } while (0)
172 
173 
174 
175 
176 /* Finalizes setting up the SKB and delivers it
177  *
178  * We first pass the incoming frame to WLP substack for verification. It
179  * may also be a WLP association frame in which case WLP will take over the
180  * processing. If WLP does not take it over it will still verify it, if the
181  * frame is invalid the skb will be freed by WLP and we will not continue
182  * parsing.
183  * */
184 static
i1480u_skb_deliver(struct i1480u * i1480u)185 void i1480u_skb_deliver(struct i1480u *i1480u)
186 {
187 	int should_parse;
188 	struct net_device *net_dev = i1480u->net_dev;
189 	struct device *dev = &i1480u->usb_iface->dev;
190 
191 	should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
192 					 &i1480u->rx_srcaddr);
193 	if (!should_parse)
194 		goto out;
195 	i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
196 	net_dev->stats.rx_packets++;
197 	net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
198 
199 	netif_rx(i1480u->rx_skb);		/* deliver */
200 out:
201 	i1480u->rx_skb = NULL;
202 	i1480u->rx_untd_pkt_size = 0;
203 }
204 
205 
206 /*
207  * Process a buffer of data received from the USB RX endpoint
208  *
209  * First fragment arrives with next or last fragment. All other fragments
210  * arrive alone.
211  *
212  * /me hates long functions.
213  */
214 static
i1480u_rx_buffer(struct i1480u_rx_buf * rx_buf)215 void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
216 {
217 	unsigned pkt_completed = 0;	/* !0 when we got all pkt fragments */
218 	size_t untd_hdr_size, untd_frg_size;
219 	size_t i1480u_hdr_size;
220 	struct wlp_rx_hdr *i1480u_hdr = NULL;
221 
222 	struct i1480u *i1480u = rx_buf->i1480u;
223 	struct sk_buff *skb = rx_buf->data;
224 	int size_left = rx_buf->urb->actual_length;
225 	void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
226 	struct untd_hdr *untd_hdr;
227 
228 	struct net_device *net_dev = i1480u->net_dev;
229 	struct device *dev = &i1480u->usb_iface->dev;
230 	struct sk_buff *new_skb;
231 
232 #if 0
233 	dev_fnstart(dev,
234 		    "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
235 	dev_err(dev, "RX packet, %zu bytes\n", size_left);
236 	dump_bytes(dev, ptr, size_left);
237 #endif
238 	i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
239 
240 	while (size_left > 0) {
241 		if (pkt_completed) {
242 			i1480u_drop(i1480u, "RX: fragment follows completed"
243 					 "packet in same buffer. Dropping\n");
244 			break;
245 		}
246 		untd_hdr = ptr;
247 		if (size_left < sizeof(*untd_hdr)) {	/*  Check the UNTD header */
248 			i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
249 			goto out;
250 		}
251 		if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) {	/* Paranoia: TX set? */
252 			i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
253 			goto out;
254 		}
255 		switch (untd_hdr_type(untd_hdr)) {	/* Check the UNTD header type */
256 		case i1480u_PKT_FRAG_1ST: {
257 			struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
258 			dev_dbg(dev, "1st fragment\n");
259 			untd_hdr_size = sizeof(struct untd_hdr_1st);
260 			if (i1480u->rx_skb != NULL)
261 				i1480u_fix(i1480u, "RX: 1st fragment out of "
262 					"sequence! Fixing\n");
263 			if (size_left < untd_hdr_size + i1480u_hdr_size) {
264 				i1480u_drop(i1480u, "RX: short 1st fragment! "
265 					"Dropping\n");
266 				goto out;
267 			}
268 			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
269 						 - i1480u_hdr_size;
270 			untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
271 			if (size_left < untd_hdr_size + untd_frg_size) {
272 				i1480u_drop(i1480u,
273 					    "RX: short payload! Dropping\n");
274 				goto out;
275 			}
276 			i1480u->rx_skb = skb;
277 			i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
278 			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
279 			skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
280 			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
281 			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
282 			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
283 			rx_buf->data = NULL; /* need to create new buffer */
284 			break;
285 		}
286 		case i1480u_PKT_FRAG_NXT: {
287 			dev_dbg(dev, "nxt fragment\n");
288 			untd_hdr_size = sizeof(struct untd_hdr_rst);
289 			if (i1480u->rx_skb == NULL) {
290 				i1480u_drop(i1480u, "RX: next fragment out of "
291 					    "sequence! Dropping\n");
292 				goto out;
293 			}
294 			if (size_left < untd_hdr_size) {
295 				i1480u_drop(i1480u, "RX: short NXT fragment! "
296 					    "Dropping\n");
297 				goto out;
298 			}
299 			untd_frg_size = le16_to_cpu(untd_hdr->len);
300 			if (size_left < untd_hdr_size + untd_frg_size) {
301 				i1480u_drop(i1480u,
302 					    "RX: short payload! Dropping\n");
303 				goto out;
304 			}
305 			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
306 					ptr + untd_hdr_size, untd_frg_size);
307 			break;
308 		}
309 		case i1480u_PKT_FRAG_LST: {
310 			dev_dbg(dev, "Lst fragment\n");
311 			untd_hdr_size = sizeof(struct untd_hdr_rst);
312 			if (i1480u->rx_skb == NULL) {
313 				i1480u_drop(i1480u, "RX: last fragment out of "
314 					    "sequence! Dropping\n");
315 				goto out;
316 			}
317 			if (size_left < untd_hdr_size) {
318 				i1480u_drop(i1480u, "RX: short LST fragment! "
319 					    "Dropping\n");
320 				goto out;
321 			}
322 			untd_frg_size = le16_to_cpu(untd_hdr->len);
323 			if (size_left < untd_frg_size + untd_hdr_size) {
324 				i1480u_drop(i1480u,
325 					    "RX: short payload! Dropping\n");
326 				goto out;
327 			}
328 			memmove(skb_put(i1480u->rx_skb, untd_frg_size),
329 					ptr + untd_hdr_size, untd_frg_size);
330 			pkt_completed = 1;
331 			break;
332 		}
333 		case i1480u_PKT_FRAG_CMP: {
334 			dev_dbg(dev, "cmp fragment\n");
335 			untd_hdr_size = sizeof(struct untd_hdr_cmp);
336 			if (i1480u->rx_skb != NULL)
337 				i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
338 					   " fragment!\n");
339 			if (size_left < untd_hdr_size + i1480u_hdr_size) {
340 				i1480u_drop(i1480u, "RX: short CMP fragment! "
341 					    "Dropping\n");
342 				goto out;
343 			}
344 			i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
345 			untd_frg_size = i1480u->rx_untd_pkt_size;
346 			if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
347 				i1480u_drop(i1480u,
348 					    "RX: short payload! Dropping\n");
349 				goto out;
350 			}
351 			i1480u->rx_skb = skb;
352 			i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
353 			i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
354 			stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
355 			stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
356 			skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
357 			skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
358 			rx_buf->data = NULL;	/* for hand off skb to network stack */
359 			pkt_completed = 1;
360 			i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
361 			break;
362 		}
363 		default:
364 			i1480u_drop(i1480u, "RX: unknown packet type %u! "
365 				    "Dropping\n", untd_hdr_type(untd_hdr));
366 			goto out;
367 		}
368 		size_left -= untd_hdr_size + untd_frg_size;
369 		if (size_left > 0)
370 			ptr += untd_hdr_size + untd_frg_size;
371 	}
372 	if (pkt_completed)
373 		i1480u_skb_deliver(i1480u);
374 out:
375 	/* recreate needed RX buffers*/
376 	if (rx_buf->data == NULL) {
377 		/* buffer is being used to receive packet, create new */
378 		new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
379 		if (!new_skb) {
380 			if (printk_ratelimit())
381 				dev_err(dev,
382 				"RX: cannot allocate RX buffer\n");
383 		} else {
384 			new_skb->dev = net_dev;
385 			new_skb->ip_summed = CHECKSUM_NONE;
386 			skb_reserve(new_skb, 2);
387 			rx_buf->data = new_skb;
388 		}
389 	}
390 	return;
391 }
392 
393 
394 /*
395  * Called when an RX URB has finished receiving or has found some kind
396  * of error condition.
397  *
398  * LIMITATIONS:
399  *
400  *  - We read USB-transfers, each transfer contains a SINGLE fragment
401  *    (can contain a complete packet, or a 1st, next, or last fragment
402  *    of a packet).
403  *    Looks like a transfer can contain more than one fragment (07/18/06)
404  *
405  *  - Each transfer buffer is the size of the maximum packet size (minus
406  *    headroom), i1480u_MAX_PKT_SIZE - 2
407  *
408  *  - We always read the full USB-transfer, no partials.
409  *
410  *  - Each transfer is read directly into a skb. This skb will be used to
411  *    send data to the upper layers if it is the first fragment or a complete
412  *    packet. In the other cases the data will be copied from the skb to
413  *    another skb that is being prepared for the upper layers from a prev
414  *    first fragment.
415  *
416  * It is simply too much of a pain. Gosh, there should be a unified
417  * SG infrastructure for *everything* [so that I could declare a SG
418  * buffer, pass it to USB for receiving, append some space to it if
419  * I wish, receive more until I have the whole chunk, adapt
420  * pointers on each fragment to remove hardware headers and then
421  * attach that to an skbuff and netif_rx()].
422  */
i1480u_rx_cb(struct urb * urb)423 void i1480u_rx_cb(struct urb *urb)
424 {
425 	int result;
426 	int do_parse_buffer = 1;
427 	struct i1480u_rx_buf *rx_buf = urb->context;
428 	struct i1480u *i1480u = rx_buf->i1480u;
429 	struct device *dev = &i1480u->usb_iface->dev;
430 	unsigned long flags;
431 	u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
432 
433 	switch (urb->status) {
434 	case 0:
435 		break;
436 	case -ECONNRESET:	/* Not an error, but a controlled situation; */
437 	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
438 	case -ESHUTDOWN:	/* going away! */
439 		dev_err(dev, "RX URB[%u]: goind down %d\n",
440 			rx_buf_idx, urb->status);
441 		goto error;
442 	default:
443 		dev_err(dev, "RX URB[%u]: unknown status %d\n",
444 			rx_buf_idx, urb->status);
445 		if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
446 					EDC_ERROR_TIMEFRAME)) {
447 			dev_err(dev, "RX: max acceptable errors exceeded,"
448 					" resetting device.\n");
449 			i1480u_rx_unlink_urbs(i1480u);
450 			wlp_reset_all(&i1480u->wlp);
451 			goto error;
452 		}
453 		do_parse_buffer = 0;
454 		break;
455 	}
456 	spin_lock_irqsave(&i1480u->lock, flags);
457 	/* chew the data fragments, extract network packets */
458 	if (do_parse_buffer) {
459 		i1480u_rx_buffer(rx_buf);
460 		if (rx_buf->data) {
461 			rx_buf->urb->transfer_buffer = rx_buf->data->data;
462 			result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
463 			if (result < 0) {
464 				dev_err(dev, "RX URB[%u]: cannot submit %d\n",
465 					rx_buf_idx, result);
466 			}
467 		}
468 	}
469 	spin_unlock_irqrestore(&i1480u->lock, flags);
470 error:
471 	return;
472 }
473 
474