1 /*********************************************************************
2 *
3 * Filename: wrapper.c
4 * Version: 1.2
5 * Description: IrDA SIR async wrapper layer
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Fri Jan 28 13:21:09 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Modified at: Fri May 28 3:11 CST 1999
12 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
13 *
14 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
15 * All Rights Reserved.
16 * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version.
22 *
23 * Neither Dag Brattli nor University of Tromsø admit liability nor
24 * provide warranty for any of this software. This material is
25 * provided "AS-IS" and at no charge.
26 *
27 ********************************************************************/
28
29 #include <linux/skbuff.h>
30 #include <linux/string.h>
31 #include <linux/module.h>
32 #include <asm/byteorder.h>
33
34 #include <net/irda/irda.h>
35 #include <net/irda/wrapper.h>
36 #include <net/irda/crc.h>
37 #include <net/irda/irlap.h>
38 #include <net/irda/irlap_frame.h>
39 #include <net/irda/irda_device.h>
40
41 /************************** FRAME WRAPPING **************************/
42 /*
43 * Unwrap and unstuff SIR frames
44 *
45 * Note : at FIR and MIR, HDLC framing is used and usually handled
46 * by the controller, so we come here only for SIR... Jean II
47 */
48
49 /*
50 * Function stuff_byte (byte, buf)
51 *
52 * Byte stuff one single byte and put the result in buffer pointed to by
53 * buf. The buffer must at all times be able to have two bytes inserted.
54 *
55 * This is in a tight loop, better inline it, so need to be prior to callers.
56 * (2000 bytes on P6 200MHz, non-inlined ~370us, inline ~170us) - Jean II
57 */
stuff_byte(__u8 byte,__u8 * buf)58 static inline int stuff_byte(__u8 byte, __u8 *buf)
59 {
60 switch (byte) {
61 case BOF: /* FALLTHROUGH */
62 case EOF: /* FALLTHROUGH */
63 case CE:
64 /* Insert transparently coded */
65 buf[0] = CE; /* Send link escape */
66 buf[1] = byte^IRDA_TRANS; /* Complement bit 5 */
67 return 2;
68 /* break; */
69 default:
70 /* Non-special value, no transparency required */
71 buf[0] = byte;
72 return 1;
73 /* break; */
74 }
75 }
76
77 /*
78 * Function async_wrap (skb, *tx_buff, buffsize)
79 *
80 * Makes a new buffer with wrapping and stuffing, should check that
81 * we don't get tx buffer overflow.
82 */
async_wrap_skb(struct sk_buff * skb,__u8 * tx_buff,int buffsize)83 int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
84 {
85 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
86 int xbofs;
87 int i;
88 int n;
89 union {
90 __u16 value;
91 __u8 bytes[2];
92 } fcs;
93
94 /* Initialize variables */
95 fcs.value = INIT_FCS;
96 n = 0;
97
98 /*
99 * Send XBOF's for required min. turn time and for the negotiated
100 * additional XBOFS
101 */
102
103 if (cb->magic != LAP_MAGIC) {
104 /*
105 * This will happen for all frames sent from user-space.
106 * Nothing to worry about, but we set the default number of
107 * BOF's
108 */
109 IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__);
110 xbofs = 10;
111 } else
112 xbofs = cb->xbofs + cb->xbofs_delay;
113
114 IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs);
115
116 /* Check that we never use more than 115 + 48 xbofs */
117 if (xbofs > 163) {
118 IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__,
119 xbofs);
120 xbofs = 163;
121 }
122
123 memset(tx_buff + n, XBOF, xbofs);
124 n += xbofs;
125
126 /* Start of packet character BOF */
127 tx_buff[n++] = BOF;
128
129 /* Insert frame and calc CRC */
130 for (i=0; i < skb->len; i++) {
131 /*
132 * Check for the possibility of tx buffer overflow. We use
133 * bufsize-5 since the maximum number of bytes that can be
134 * transmitted after this point is 5.
135 */
136 if(n >= (buffsize-5)) {
137 IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n",
138 __func__, n);
139 return n;
140 }
141
142 n += stuff_byte(skb->data[i], tx_buff+n);
143 fcs.value = irda_fcs(fcs.value, skb->data[i]);
144 }
145
146 /* Insert CRC in little endian format (LSB first) */
147 fcs.value = ~fcs.value;
148 #ifdef __LITTLE_ENDIAN
149 n += stuff_byte(fcs.bytes[0], tx_buff+n);
150 n += stuff_byte(fcs.bytes[1], tx_buff+n);
151 #else /* ifdef __BIG_ENDIAN */
152 n += stuff_byte(fcs.bytes[1], tx_buff+n);
153 n += stuff_byte(fcs.bytes[0], tx_buff+n);
154 #endif
155 tx_buff[n++] = EOF;
156
157 return n;
158 }
159 EXPORT_SYMBOL(async_wrap_skb);
160
161 /************************* FRAME UNWRAPPING *************************/
162 /*
163 * Unwrap and unstuff SIR frames
164 *
165 * Complete rewrite by Jean II :
166 * More inline, faster, more compact, more logical. Jean II
167 * (16 bytes on P6 200MHz, old 5 to 7 us, new 4 to 6 us)
168 * (24 bytes on P6 200MHz, old 9 to 10 us, new 7 to 8 us)
169 * (for reference, 115200 b/s is 1 byte every 69 us)
170 * And reduce wrapper.o by ~900B in the process ;-)
171 *
172 * Then, we have the addition of ZeroCopy, which is optional
173 * (i.e. the driver must initiate it) and improve final processing.
174 * (2005 B frame + EOF on P6 200MHz, without 30 to 50 us, with 10 to 25 us)
175 *
176 * Note : at FIR and MIR, HDLC framing is used and usually handled
177 * by the controller, so we come here only for SIR... Jean II
178 */
179
180 /*
181 * We can also choose where we want to do the CRC calculation. We can
182 * do it "inline", as we receive the bytes, or "postponed", when
183 * receiving the End-Of-Frame.
184 * (16 bytes on P6 200MHz, inlined 4 to 6 us, postponed 4 to 5 us)
185 * (24 bytes on P6 200MHz, inlined 7 to 8 us, postponed 5 to 7 us)
186 * With ZeroCopy :
187 * (2005 B frame on P6 200MHz, inlined 10 to 25 us, postponed 140 to 180 us)
188 * Without ZeroCopy :
189 * (2005 B frame on P6 200MHz, inlined 30 to 50 us, postponed 150 to 180 us)
190 * (Note : numbers taken with irq disabled)
191 *
192 * From those numbers, it's not clear which is the best strategy, because
193 * we end up running through a lot of data one way or another (i.e. cache
194 * misses). I personally prefer to avoid the huge latency spike of the
195 * "postponed" solution, because it come just at the time when we have
196 * lot's of protocol processing to do and it will hurt our ability to
197 * reach low link turnaround times... Jean II
198 */
199 //#define POSTPONE_RX_CRC
200
201 /*
202 * Function async_bump (buf, len, stats)
203 *
204 * Got a frame, make a copy of it, and pass it up the stack! We can try
205 * to inline it since it's only called from state_inside_frame
206 */
207 static inline void
async_bump(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff)208 async_bump(struct net_device *dev,
209 struct net_device_stats *stats,
210 iobuff_t *rx_buff)
211 {
212 struct sk_buff *newskb;
213 struct sk_buff *dataskb;
214 int docopy;
215
216 /* Check if we need to copy the data to a new skb or not.
217 * If the driver doesn't use ZeroCopy Rx, we have to do it.
218 * With ZeroCopy Rx, the rx_buff already point to a valid
219 * skb. But, if the frame is small, it is more efficient to
220 * copy it to save memory (copy will be fast anyway - that's
221 * called Rx-copy-break). Jean II */
222 docopy = ((rx_buff->skb == NULL) ||
223 (rx_buff->len < IRDA_RX_COPY_THRESHOLD));
224
225 /* Allocate a new skb */
226 newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize);
227 if (!newskb) {
228 stats->rx_dropped++;
229 /* We could deliver the current skb if doing ZeroCopy Rx,
230 * but this would stall the Rx path. Better drop the
231 * packet... Jean II */
232 return;
233 }
234
235 /* Align IP header to 20 bytes (i.e. increase skb->data)
236 * Note this is only useful with IrLAN, as PPP has a variable
237 * header size (2 or 1 bytes) - Jean II */
238 skb_reserve(newskb, 1);
239
240 if(docopy) {
241 /* Copy data without CRC (length already checked) */
242 skb_copy_to_linear_data(newskb, rx_buff->data,
243 rx_buff->len - 2);
244 /* Deliver this skb */
245 dataskb = newskb;
246 } else {
247 /* We are using ZeroCopy. Deliver old skb */
248 dataskb = rx_buff->skb;
249 /* And hook the new skb to the rx_buff */
250 rx_buff->skb = newskb;
251 rx_buff->head = newskb->data; /* NOT newskb->head */
252 //printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb);
253 }
254
255 /* Set proper length on skb (without CRC) */
256 skb_put(dataskb, rx_buff->len - 2);
257
258 /* Feed it to IrLAP layer */
259 dataskb->dev = dev;
260 skb_reset_mac_header(dataskb);
261 dataskb->protocol = htons(ETH_P_IRDA);
262
263 netif_rx(dataskb);
264
265 stats->rx_packets++;
266 stats->rx_bytes += rx_buff->len;
267
268 /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */
269 rx_buff->data = rx_buff->head;
270 rx_buff->len = 0;
271 }
272
273 /*
274 * Function async_unwrap_bof(dev, byte)
275 *
276 * Handle Beginning Of Frame character received within a frame
277 *
278 */
279 static inline void
async_unwrap_bof(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff,__u8 byte)280 async_unwrap_bof(struct net_device *dev,
281 struct net_device_stats *stats,
282 iobuff_t *rx_buff, __u8 byte)
283 {
284 switch(rx_buff->state) {
285 case LINK_ESCAPE:
286 case INSIDE_FRAME:
287 /* Not supposed to happen, the previous frame is not
288 * finished - Jean II */
289 IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n",
290 __func__);
291 stats->rx_errors++;
292 stats->rx_missed_errors++;
293 irda_device_set_media_busy(dev, TRUE);
294 break;
295
296 case OUTSIDE_FRAME:
297 case BEGIN_FRAME:
298 default:
299 /* We may receive multiple BOF at the start of frame */
300 break;
301 }
302
303 /* Now receiving frame */
304 rx_buff->state = BEGIN_FRAME;
305 rx_buff->in_frame = TRUE;
306
307 /* Time to initialize receive buffer */
308 rx_buff->data = rx_buff->head;
309 rx_buff->len = 0;
310 rx_buff->fcs = INIT_FCS;
311 }
312
313 /*
314 * Function async_unwrap_eof(dev, byte)
315 *
316 * Handle End Of Frame character received within a frame
317 *
318 */
319 static inline void
async_unwrap_eof(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff,__u8 byte)320 async_unwrap_eof(struct net_device *dev,
321 struct net_device_stats *stats,
322 iobuff_t *rx_buff, __u8 byte)
323 {
324 #ifdef POSTPONE_RX_CRC
325 int i;
326 #endif
327
328 switch(rx_buff->state) {
329 case OUTSIDE_FRAME:
330 /* Probably missed the BOF */
331 stats->rx_errors++;
332 stats->rx_missed_errors++;
333 irda_device_set_media_busy(dev, TRUE);
334 break;
335
336 case BEGIN_FRAME:
337 case LINK_ESCAPE:
338 case INSIDE_FRAME:
339 default:
340 /* Note : in the case of BEGIN_FRAME and LINK_ESCAPE,
341 * the fcs will most likely not match and generate an
342 * error, as expected - Jean II */
343 rx_buff->state = OUTSIDE_FRAME;
344 rx_buff->in_frame = FALSE;
345
346 #ifdef POSTPONE_RX_CRC
347 /* If we haven't done the CRC as we receive bytes, we
348 * must do it now... Jean II */
349 for(i = 0; i < rx_buff->len; i++)
350 rx_buff->fcs = irda_fcs(rx_buff->fcs,
351 rx_buff->data[i]);
352 #endif
353
354 /* Test FCS and signal success if the frame is good */
355 if (rx_buff->fcs == GOOD_FCS) {
356 /* Deliver frame */
357 async_bump(dev, stats, rx_buff);
358 break;
359 } else {
360 /* Wrong CRC, discard frame! */
361 irda_device_set_media_busy(dev, TRUE);
362
363 IRDA_DEBUG(1, "%s(), crc error\n", __func__);
364 stats->rx_errors++;
365 stats->rx_crc_errors++;
366 }
367 break;
368 }
369 }
370
371 /*
372 * Function async_unwrap_ce(dev, byte)
373 *
374 * Handle Character Escape character received within a frame
375 *
376 */
377 static inline void
async_unwrap_ce(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff,__u8 byte)378 async_unwrap_ce(struct net_device *dev,
379 struct net_device_stats *stats,
380 iobuff_t *rx_buff, __u8 byte)
381 {
382 switch(rx_buff->state) {
383 case OUTSIDE_FRAME:
384 /* Activate carrier sense */
385 irda_device_set_media_busy(dev, TRUE);
386 break;
387
388 case LINK_ESCAPE:
389 IRDA_WARNING("%s: state not defined\n", __func__);
390 break;
391
392 case BEGIN_FRAME:
393 case INSIDE_FRAME:
394 default:
395 /* Stuffed byte coming */
396 rx_buff->state = LINK_ESCAPE;
397 break;
398 }
399 }
400
401 /*
402 * Function async_unwrap_other(dev, byte)
403 *
404 * Handle other characters received within a frame
405 *
406 */
407 static inline void
async_unwrap_other(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff,__u8 byte)408 async_unwrap_other(struct net_device *dev,
409 struct net_device_stats *stats,
410 iobuff_t *rx_buff, __u8 byte)
411 {
412 switch(rx_buff->state) {
413 /* This is on the critical path, case are ordered by
414 * probability (most frequent first) - Jean II */
415 case INSIDE_FRAME:
416 /* Must be the next byte of the frame */
417 if (rx_buff->len < rx_buff->truesize) {
418 rx_buff->data[rx_buff->len++] = byte;
419 #ifndef POSTPONE_RX_CRC
420 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
421 #endif
422 } else {
423 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
424 __func__);
425 rx_buff->state = OUTSIDE_FRAME;
426 }
427 break;
428
429 case LINK_ESCAPE:
430 /*
431 * Stuffed char, complement bit 5 of byte
432 * following CE, IrLAP p.114
433 */
434 byte ^= IRDA_TRANS;
435 if (rx_buff->len < rx_buff->truesize) {
436 rx_buff->data[rx_buff->len++] = byte;
437 #ifndef POSTPONE_RX_CRC
438 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
439 #endif
440 rx_buff->state = INSIDE_FRAME;
441 } else {
442 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
443 __func__);
444 rx_buff->state = OUTSIDE_FRAME;
445 }
446 break;
447
448 case OUTSIDE_FRAME:
449 /* Activate carrier sense */
450 if(byte != XBOF)
451 irda_device_set_media_busy(dev, TRUE);
452 break;
453
454 case BEGIN_FRAME:
455 default:
456 rx_buff->data[rx_buff->len++] = byte;
457 #ifndef POSTPONE_RX_CRC
458 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
459 #endif
460 rx_buff->state = INSIDE_FRAME;
461 break;
462 }
463 }
464
465 /*
466 * Function async_unwrap_char (dev, rx_buff, byte)
467 *
468 * Parse and de-stuff frame received from the IrDA-port
469 *
470 * This is the main entry point for SIR drivers.
471 */
async_unwrap_char(struct net_device * dev,struct net_device_stats * stats,iobuff_t * rx_buff,__u8 byte)472 void async_unwrap_char(struct net_device *dev,
473 struct net_device_stats *stats,
474 iobuff_t *rx_buff, __u8 byte)
475 {
476 switch(byte) {
477 case CE:
478 async_unwrap_ce(dev, stats, rx_buff, byte);
479 break;
480 case BOF:
481 async_unwrap_bof(dev, stats, rx_buff, byte);
482 break;
483 case EOF:
484 async_unwrap_eof(dev, stats, rx_buff, byte);
485 break;
486 default:
487 async_unwrap_other(dev, stats, rx_buff, byte);
488 break;
489 }
490 }
491 EXPORT_SYMBOL(async_unwrap_char);
492
493