• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*********************************************************************
2  *
3  * Filename:      irttp.c
4  * Version:       1.2
5  * Description:   Tiny Transport Protocol (TTP) implementation
6  * Status:        Stable
7  * Author:        Dag Brattli <dagb@cs.uit.no>
8  * Created at:    Sun Aug 31 20:14:31 1997
9  * Modified at:   Wed Jan  5 11:31:27 2000
10  * Modified by:   Dag Brattli <dagb@cs.uit.no>
11  *
12  *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
13  *     All Rights Reserved.
14  *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15  *
16  *     This program is free software; you can redistribute it and/or
17  *     modify it under the terms of the GNU General Public License as
18  *     published by the Free Software Foundation; either version 2 of
19  *     the License, or (at your option) any later version.
20  *
21  *     Neither Dag Brattli nor University of Tromsø admit liability nor
22  *     provide warranty for any of this software. This material is
23  *     provided "AS-IS" and at no charge.
24  *
25  ********************************************************************/
26 
27 #include <linux/skbuff.h>
28 #include <linux/init.h>
29 #include <linux/fs.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 
34 #include <asm/byteorder.h>
35 #include <asm/unaligned.h>
36 
37 #include <net/irda/irda.h>
38 #include <net/irda/irlap.h>
39 #include <net/irda/irlmp.h>
40 #include <net/irda/parameters.h>
41 #include <net/irda/irttp.h>
42 
43 static struct irttp_cb *irttp;
44 
45 static void __irttp_close_tsap(struct tsap_cb *self);
46 
47 static int irttp_data_indication(void *instance, void *sap,
48 				 struct sk_buff *skb);
49 static int irttp_udata_indication(void *instance, void *sap,
50 				  struct sk_buff *skb);
51 static void irttp_disconnect_indication(void *instance, void *sap,
52 					LM_REASON reason, struct sk_buff *);
53 static void irttp_connect_indication(void *instance, void *sap,
54 				     struct qos_info *qos, __u32 max_sdu_size,
55 				     __u8 header_size, struct sk_buff *skb);
56 static void irttp_connect_confirm(void *instance, void *sap,
57 				  struct qos_info *qos, __u32 max_sdu_size,
58 				  __u8 header_size, struct sk_buff *skb);
59 static void irttp_run_tx_queue(struct tsap_cb *self);
60 static void irttp_run_rx_queue(struct tsap_cb *self);
61 
62 static void irttp_flush_queues(struct tsap_cb *self);
63 static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
64 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
65 static void irttp_todo_expired(unsigned long data);
66 static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
67 				    int get);
68 
69 static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
70 static void irttp_status_indication(void *instance,
71 				    LINK_STATUS link, LOCK_STATUS lock);
72 
73 /* Information for parsing parameters in IrTTP */
74 static const pi_minor_info_t pi_minor_call_table[] = {
75 	{ NULL, 0 },                                             /* 0x00 */
76 	{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
77 };
78 static const pi_major_info_t pi_major_call_table[] = {
79 	{ pi_minor_call_table, 2 }
80 };
81 static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
82 
83 /************************ GLOBAL PROCEDURES ************************/
84 
85 /*
86  * Function irttp_init (void)
87  *
88  *    Initialize the IrTTP layer. Called by module initialization code
89  *
90  */
irttp_init(void)91 int __init irttp_init(void)
92 {
93 	irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
94 	if (irttp == NULL)
95 		return -ENOMEM;
96 
97 	irttp->magic = TTP_MAGIC;
98 
99 	irttp->tsaps = hashbin_new(HB_LOCK);
100 	if (!irttp->tsaps) {
101 		net_err_ratelimited("%s: can't allocate IrTTP hashbin!\n",
102 				    __func__);
103 		kfree(irttp);
104 		return -ENOMEM;
105 	}
106 
107 	return 0;
108 }
109 
110 /*
111  * Function irttp_cleanup (void)
112  *
113  *    Called by module destruction/cleanup code
114  *
115  */
irttp_cleanup(void)116 void irttp_cleanup(void)
117 {
118 	/* Check for main structure */
119 	IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
120 
121 	/*
122 	 *  Delete hashbin and close all TSAP instances in it
123 	 */
124 	hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
125 
126 	irttp->magic = 0;
127 
128 	/* De-allocate main structure */
129 	kfree(irttp);
130 
131 	irttp = NULL;
132 }
133 
134 /*************************** SUBROUTINES ***************************/
135 
136 /*
137  * Function irttp_start_todo_timer (self, timeout)
138  *
139  *    Start todo timer.
140  *
141  * Made it more effient and unsensitive to race conditions - Jean II
142  */
irttp_start_todo_timer(struct tsap_cb * self,int timeout)143 static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
144 {
145 	/* Set new value for timer */
146 	mod_timer(&self->todo_timer, jiffies + timeout);
147 }
148 
149 /*
150  * Function irttp_todo_expired (data)
151  *
152  *    Todo timer has expired!
153  *
154  * One of the restriction of the timer is that it is run only on the timer
155  * interrupt which run every 10ms. This mean that even if you set the timer
156  * with a delay of 0, it may take up to 10ms before it's run.
157  * So, to minimise latency and keep cache fresh, we try to avoid using
158  * it as much as possible.
159  * Note : we can't use tasklets, because they can't be asynchronously
160  * killed (need user context), and we can't guarantee that here...
161  * Jean II
162  */
irttp_todo_expired(unsigned long data)163 static void irttp_todo_expired(unsigned long data)
164 {
165 	struct tsap_cb *self = (struct tsap_cb *) data;
166 
167 	/* Check that we still exist */
168 	if (!self || self->magic != TTP_TSAP_MAGIC)
169 		return;
170 
171 	pr_debug("%s(instance=%p)\n", __func__, self);
172 
173 	/* Try to make some progress, especially on Tx side - Jean II */
174 	irttp_run_rx_queue(self);
175 	irttp_run_tx_queue(self);
176 
177 	/* Check if time for disconnect */
178 	if (test_bit(0, &self->disconnect_pend)) {
179 		/* Check if it's possible to disconnect yet */
180 		if (skb_queue_empty(&self->tx_queue)) {
181 			/* Make sure disconnect is not pending anymore */
182 			clear_bit(0, &self->disconnect_pend);	/* FALSE */
183 
184 			/* Note : self->disconnect_skb may be NULL */
185 			irttp_disconnect_request(self, self->disconnect_skb,
186 						 P_NORMAL);
187 			self->disconnect_skb = NULL;
188 		} else {
189 			/* Try again later */
190 			irttp_start_todo_timer(self, HZ/10);
191 
192 			/* No reason to try and close now */
193 			return;
194 		}
195 	}
196 
197 	/* Check if it's closing time */
198 	if (self->close_pend)
199 		/* Finish cleanup */
200 		irttp_close_tsap(self);
201 }
202 
203 /*
204  * Function irttp_flush_queues (self)
205  *
206  *     Flushes (removes all frames) in transitt-buffer (tx_list)
207  */
irttp_flush_queues(struct tsap_cb * self)208 static void irttp_flush_queues(struct tsap_cb *self)
209 {
210 	struct sk_buff *skb;
211 
212 	IRDA_ASSERT(self != NULL, return;);
213 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
214 
215 	/* Deallocate frames waiting to be sent */
216 	while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
217 		dev_kfree_skb(skb);
218 
219 	/* Deallocate received frames */
220 	while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
221 		dev_kfree_skb(skb);
222 
223 	/* Deallocate received fragments */
224 	while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
225 		dev_kfree_skb(skb);
226 }
227 
228 /*
229  * Function irttp_reassemble (self)
230  *
231  *    Makes a new (continuous) skb of all the fragments in the fragment
232  *    queue
233  *
234  */
irttp_reassemble_skb(struct tsap_cb * self)235 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
236 {
237 	struct sk_buff *skb, *frag;
238 	int n = 0;  /* Fragment index */
239 
240 	IRDA_ASSERT(self != NULL, return NULL;);
241 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
242 
243 	pr_debug("%s(), self->rx_sdu_size=%d\n", __func__,
244 		 self->rx_sdu_size);
245 
246 	skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
247 	if (!skb)
248 		return NULL;
249 
250 	/*
251 	 * Need to reserve space for TTP header in case this skb needs to
252 	 * be requeued in case delivery failes
253 	 */
254 	skb_reserve(skb, TTP_HEADER);
255 	skb_put(skb, self->rx_sdu_size);
256 
257 	/*
258 	 *  Copy all fragments to a new buffer
259 	 */
260 	while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
261 		skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
262 		n += frag->len;
263 
264 		dev_kfree_skb(frag);
265 	}
266 
267 	pr_debug("%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
268 		 __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
269 	/* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
270 	 * by summing the size of all fragments, so we should always
271 	 * have n == self->rx_sdu_size, except in cases where we
272 	 * droped the last fragment (when self->rx_sdu_size exceed
273 	 * self->rx_max_sdu_size), where n < self->rx_sdu_size.
274 	 * Jean II */
275 	IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
276 
277 	/* Set the new length */
278 	skb_trim(skb, n);
279 
280 	self->rx_sdu_size = 0;
281 
282 	return skb;
283 }
284 
285 /*
286  * Function irttp_fragment_skb (skb)
287  *
288  *    Fragments a frame and queues all the fragments for transmission
289  *
290  */
irttp_fragment_skb(struct tsap_cb * self,struct sk_buff * skb)291 static inline void irttp_fragment_skb(struct tsap_cb *self,
292 				      struct sk_buff *skb)
293 {
294 	struct sk_buff *frag;
295 	__u8 *frame;
296 
297 	IRDA_ASSERT(self != NULL, return;);
298 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
299 	IRDA_ASSERT(skb != NULL, return;);
300 
301 	/*
302 	 *  Split frame into a number of segments
303 	 */
304 	while (skb->len > self->max_seg_size) {
305 		pr_debug("%s(), fragmenting ...\n", __func__);
306 
307 		/* Make new segment */
308 		frag = alloc_skb(self->max_seg_size+self->max_header_size,
309 				 GFP_ATOMIC);
310 		if (!frag)
311 			return;
312 
313 		skb_reserve(frag, self->max_header_size);
314 
315 		/* Copy data from the original skb into this fragment. */
316 		skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
317 			      self->max_seg_size);
318 
319 		/* Insert TTP header, with the more bit set */
320 		frame = skb_push(frag, TTP_HEADER);
321 		frame[0] = TTP_MORE;
322 
323 		/* Hide the copied data from the original skb */
324 		skb_pull(skb, self->max_seg_size);
325 
326 		/* Queue fragment */
327 		skb_queue_tail(&self->tx_queue, frag);
328 	}
329 	/* Queue what is left of the original skb */
330 	pr_debug("%s(), queuing last segment\n", __func__);
331 
332 	frame = skb_push(skb, TTP_HEADER);
333 	frame[0] = 0x00; /* Clear more bit */
334 
335 	/* Queue fragment */
336 	skb_queue_tail(&self->tx_queue, skb);
337 }
338 
339 /*
340  * Function irttp_param_max_sdu_size (self, param)
341  *
342  *    Handle the MaxSduSize parameter in the connect frames, this function
343  *    will be called both when this parameter needs to be inserted into, and
344  *    extracted from the connect frames
345  */
irttp_param_max_sdu_size(void * instance,irda_param_t * param,int get)346 static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
347 				    int get)
348 {
349 	struct tsap_cb *self;
350 
351 	self = instance;
352 
353 	IRDA_ASSERT(self != NULL, return -1;);
354 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
355 
356 	if (get)
357 		param->pv.i = self->tx_max_sdu_size;
358 	else
359 		self->tx_max_sdu_size = param->pv.i;
360 
361 	pr_debug("%s(), MaxSduSize=%d\n", __func__, param->pv.i);
362 
363 	return 0;
364 }
365 
366 /*************************** CLIENT CALLS ***************************/
367 /************************** LMP CALLBACKS **************************/
368 /* Everything is happily mixed up. Waiting for next clean up - Jean II */
369 
370 /*
371  * Initialization, that has to be done on new tsap
372  * instance allocation and on duplication
373  */
irttp_init_tsap(struct tsap_cb * tsap)374 static void irttp_init_tsap(struct tsap_cb *tsap)
375 {
376 	spin_lock_init(&tsap->lock);
377 	init_timer(&tsap->todo_timer);
378 
379 	skb_queue_head_init(&tsap->rx_queue);
380 	skb_queue_head_init(&tsap->tx_queue);
381 	skb_queue_head_init(&tsap->rx_fragments);
382 }
383 
384 /*
385  * Function irttp_open_tsap (stsap, notify)
386  *
387  *    Create TSAP connection endpoint,
388  */
irttp_open_tsap(__u8 stsap_sel,int credit,notify_t * notify)389 struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
390 {
391 	struct tsap_cb *self;
392 	struct lsap_cb *lsap;
393 	notify_t ttp_notify;
394 
395 	IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
396 
397 	/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
398 	 * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
399 	 * JeanII */
400 	if ((stsap_sel != LSAP_ANY) &&
401 	   ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
402 		pr_debug("%s(), invalid tsap!\n", __func__);
403 		return NULL;
404 	}
405 
406 	self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
407 	if (self == NULL)
408 		return NULL;
409 
410 	/* Initialize internal objects */
411 	irttp_init_tsap(self);
412 
413 	/* Initialise todo timer */
414 	self->todo_timer.data     = (unsigned long) self;
415 	self->todo_timer.function = &irttp_todo_expired;
416 
417 	/* Initialize callbacks for IrLMP to use */
418 	irda_notify_init(&ttp_notify);
419 	ttp_notify.connect_confirm = irttp_connect_confirm;
420 	ttp_notify.connect_indication = irttp_connect_indication;
421 	ttp_notify.disconnect_indication = irttp_disconnect_indication;
422 	ttp_notify.data_indication = irttp_data_indication;
423 	ttp_notify.udata_indication = irttp_udata_indication;
424 	ttp_notify.flow_indication = irttp_flow_indication;
425 	if (notify->status_indication != NULL)
426 		ttp_notify.status_indication = irttp_status_indication;
427 	ttp_notify.instance = self;
428 	strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
429 
430 	self->magic = TTP_TSAP_MAGIC;
431 	self->connected = FALSE;
432 
433 	/*
434 	 *  Create LSAP at IrLMP layer
435 	 */
436 	lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
437 	if (lsap == NULL) {
438 		pr_debug("%s: unable to allocate LSAP!!\n", __func__);
439 		__irttp_close_tsap(self);
440 		return NULL;
441 	}
442 
443 	/*
444 	 *  If user specified LSAP_ANY as source TSAP selector, then IrLMP
445 	 *  will replace it with whatever source selector which is free, so
446 	 *  the stsap_sel we have might not be valid anymore
447 	 */
448 	self->stsap_sel = lsap->slsap_sel;
449 	pr_debug("%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
450 
451 	self->notify = *notify;
452 	self->lsap = lsap;
453 
454 	hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
455 
456 	if (credit > TTP_RX_MAX_CREDIT)
457 		self->initial_credit = TTP_RX_MAX_CREDIT;
458 	else
459 		self->initial_credit = credit;
460 
461 	return self;
462 }
463 EXPORT_SYMBOL(irttp_open_tsap);
464 
465 /*
466  * Function irttp_close (handle)
467  *
468  *    Remove an instance of a TSAP. This function should only deal with the
469  *    deallocation of the TSAP, and resetting of the TSAPs values;
470  *
471  */
__irttp_close_tsap(struct tsap_cb * self)472 static void __irttp_close_tsap(struct tsap_cb *self)
473 {
474 	/* First make sure we're connected. */
475 	IRDA_ASSERT(self != NULL, return;);
476 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
477 
478 	irttp_flush_queues(self);
479 
480 	del_timer(&self->todo_timer);
481 
482 	/* This one won't be cleaned up if we are disconnect_pend + close_pend
483 	 * and we receive a disconnect_indication */
484 	if (self->disconnect_skb)
485 		dev_kfree_skb(self->disconnect_skb);
486 
487 	self->connected = FALSE;
488 	self->magic = ~TTP_TSAP_MAGIC;
489 
490 	kfree(self);
491 }
492 
493 /*
494  * Function irttp_close (self)
495  *
496  *    Remove TSAP from list of all TSAPs and then deallocate all resources
497  *    associated with this TSAP
498  *
499  * Note : because we *free* the tsap structure, it is the responsibility
500  * of the caller to make sure we are called only once and to deal with
501  * possible race conditions. - Jean II
502  */
irttp_close_tsap(struct tsap_cb * self)503 int irttp_close_tsap(struct tsap_cb *self)
504 {
505 	struct tsap_cb *tsap;
506 
507 	IRDA_ASSERT(self != NULL, return -1;);
508 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
509 
510 	/* Make sure tsap has been disconnected */
511 	if (self->connected) {
512 		/* Check if disconnect is not pending */
513 		if (!test_bit(0, &self->disconnect_pend)) {
514 			net_warn_ratelimited("%s: TSAP still connected!\n",
515 					     __func__);
516 			irttp_disconnect_request(self, NULL, P_NORMAL);
517 		}
518 		self->close_pend = TRUE;
519 		irttp_start_todo_timer(self, HZ/10);
520 
521 		return 0; /* Will be back! */
522 	}
523 
524 	tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
525 
526 	IRDA_ASSERT(tsap == self, return -1;);
527 
528 	/* Close corresponding LSAP */
529 	if (self->lsap) {
530 		irlmp_close_lsap(self->lsap);
531 		self->lsap = NULL;
532 	}
533 
534 	__irttp_close_tsap(self);
535 
536 	return 0;
537 }
538 EXPORT_SYMBOL(irttp_close_tsap);
539 
540 /*
541  * Function irttp_udata_request (self, skb)
542  *
543  *    Send unreliable data on this TSAP
544  *
545  */
irttp_udata_request(struct tsap_cb * self,struct sk_buff * skb)546 int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
547 {
548 	int ret;
549 
550 	IRDA_ASSERT(self != NULL, return -1;);
551 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
552 	IRDA_ASSERT(skb != NULL, return -1;);
553 
554 	/* Take shortcut on zero byte packets */
555 	if (skb->len == 0) {
556 		ret = 0;
557 		goto err;
558 	}
559 
560 	/* Check that nothing bad happens */
561 	if (!self->connected) {
562 		net_warn_ratelimited("%s(), Not connected\n", __func__);
563 		ret = -ENOTCONN;
564 		goto err;
565 	}
566 
567 	if (skb->len > self->max_seg_size) {
568 		net_err_ratelimited("%s(), UData is too large for IrLAP!\n",
569 				    __func__);
570 		ret = -EMSGSIZE;
571 		goto err;
572 	}
573 
574 	irlmp_udata_request(self->lsap, skb);
575 	self->stats.tx_packets++;
576 
577 	return 0;
578 
579 err:
580 	dev_kfree_skb(skb);
581 	return ret;
582 }
583 EXPORT_SYMBOL(irttp_udata_request);
584 
585 
586 /*
587  * Function irttp_data_request (handle, skb)
588  *
589  *    Queue frame for transmission. If SAR is enabled, fragement the frame
590  *    and queue the fragments for transmission
591  */
irttp_data_request(struct tsap_cb * self,struct sk_buff * skb)592 int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
593 {
594 	__u8 *frame;
595 	int ret;
596 
597 	IRDA_ASSERT(self != NULL, return -1;);
598 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
599 	IRDA_ASSERT(skb != NULL, return -1;);
600 
601 	pr_debug("%s() : queue len = %d\n", __func__,
602 		 skb_queue_len(&self->tx_queue));
603 
604 	/* Take shortcut on zero byte packets */
605 	if (skb->len == 0) {
606 		ret = 0;
607 		goto err;
608 	}
609 
610 	/* Check that nothing bad happens */
611 	if (!self->connected) {
612 		net_warn_ratelimited("%s: Not connected\n", __func__);
613 		ret = -ENOTCONN;
614 		goto err;
615 	}
616 
617 	/*
618 	 *  Check if SAR is disabled, and the frame is larger than what fits
619 	 *  inside an IrLAP frame
620 	 */
621 	if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
622 		net_err_ratelimited("%s: SAR disabled, and data is too large for IrLAP!\n",
623 				    __func__);
624 		ret = -EMSGSIZE;
625 		goto err;
626 	}
627 
628 	/*
629 	 *  Check if SAR is enabled, and the frame is larger than the
630 	 *  TxMaxSduSize
631 	 */
632 	if ((self->tx_max_sdu_size != 0) &&
633 	    (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
634 	    (skb->len > self->tx_max_sdu_size)) {
635 		net_err_ratelimited("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
636 				    __func__);
637 		ret = -EMSGSIZE;
638 		goto err;
639 	}
640 	/*
641 	 *  Check if transmit queue is full
642 	 */
643 	if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
644 		/*
645 		 *  Give it a chance to empty itself
646 		 */
647 		irttp_run_tx_queue(self);
648 
649 		/* Drop packet. This error code should trigger the caller
650 		 * to resend the data in the client code - Jean II */
651 		ret = -ENOBUFS;
652 		goto err;
653 	}
654 
655 	/* Queue frame, or queue frame segments */
656 	if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
657 		/* Queue frame */
658 		IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
659 		frame = skb_push(skb, TTP_HEADER);
660 		frame[0] = 0x00; /* Clear more bit */
661 
662 		skb_queue_tail(&self->tx_queue, skb);
663 	} else {
664 		/*
665 		 *  Fragment the frame, this function will also queue the
666 		 *  fragments, we don't care about the fact the transmit
667 		 *  queue may be overfilled by all the segments for a little
668 		 *  while
669 		 */
670 		irttp_fragment_skb(self, skb);
671 	}
672 
673 	/* Check if we can accept more data from client */
674 	if ((!self->tx_sdu_busy) &&
675 	    (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
676 		/* Tx queue filling up, so stop client. */
677 		if (self->notify.flow_indication) {
678 			self->notify.flow_indication(self->notify.instance,
679 						     self, FLOW_STOP);
680 		}
681 		/* self->tx_sdu_busy is the state of the client.
682 		 * Update state after notifying client to avoid
683 		 * race condition with irttp_flow_indication().
684 		 * If the queue empty itself after our test but before
685 		 * we set the flag, we will fix ourselves below in
686 		 * irttp_run_tx_queue().
687 		 * Jean II */
688 		self->tx_sdu_busy = TRUE;
689 	}
690 
691 	/* Try to make some progress */
692 	irttp_run_tx_queue(self);
693 
694 	return 0;
695 
696 err:
697 	dev_kfree_skb(skb);
698 	return ret;
699 }
700 EXPORT_SYMBOL(irttp_data_request);
701 
702 /*
703  * Function irttp_run_tx_queue (self)
704  *
705  *    Transmit packets queued for transmission (if possible)
706  *
707  */
irttp_run_tx_queue(struct tsap_cb * self)708 static void irttp_run_tx_queue(struct tsap_cb *self)
709 {
710 	struct sk_buff *skb;
711 	unsigned long flags;
712 	int n;
713 
714 	pr_debug("%s() : send_credit = %d, queue_len = %d\n",
715 		 __func__,
716 		 self->send_credit, skb_queue_len(&self->tx_queue));
717 
718 	/* Get exclusive access to the tx queue, otherwise don't touch it */
719 	if (irda_lock(&self->tx_queue_lock) == FALSE)
720 		return;
721 
722 	/* Try to send out frames as long as we have credits
723 	 * and as long as LAP is not full. If LAP is full, it will
724 	 * poll us through irttp_flow_indication() - Jean II */
725 	while ((self->send_credit > 0) &&
726 	       (!irlmp_lap_tx_queue_full(self->lsap)) &&
727 	       (skb = skb_dequeue(&self->tx_queue))) {
728 		/*
729 		 *  Since we can transmit and receive frames concurrently,
730 		 *  the code below is a critical region and we must assure that
731 		 *  nobody messes with the credits while we update them.
732 		 */
733 		spin_lock_irqsave(&self->lock, flags);
734 
735 		n = self->avail_credit;
736 		self->avail_credit = 0;
737 
738 		/* Only room for 127 credits in frame */
739 		if (n > 127) {
740 			self->avail_credit = n-127;
741 			n = 127;
742 		}
743 		self->remote_credit += n;
744 		self->send_credit--;
745 
746 		spin_unlock_irqrestore(&self->lock, flags);
747 
748 		/*
749 		 *  More bit must be set by the data_request() or fragment()
750 		 *  functions
751 		 */
752 		skb->data[0] |= (n & 0x7f);
753 
754 		/* Detach from socket.
755 		 * The current skb has a reference to the socket that sent
756 		 * it (skb->sk). When we pass it to IrLMP, the skb will be
757 		 * stored in in IrLAP (self->wx_list). When we are within
758 		 * IrLAP, we lose the notion of socket, so we should not
759 		 * have a reference to a socket. So, we drop it here.
760 		 *
761 		 * Why does it matter ?
762 		 * When the skb is freed (kfree_skb), if it is associated
763 		 * with a socket, it release buffer space on the socket
764 		 * (through sock_wfree() and sock_def_write_space()).
765 		 * If the socket no longer exist, we may crash. Hard.
766 		 * When we close a socket, we make sure that associated packets
767 		 * in IrTTP are freed. However, we have no way to cancel
768 		 * the packet that we have passed to IrLAP. So, if a packet
769 		 * remains in IrLAP (retry on the link or else) after we
770 		 * close the socket, we are dead !
771 		 * Jean II */
772 		if (skb->sk != NULL) {
773 			/* IrSOCK application, IrOBEX, ... */
774 			skb_orphan(skb);
775 		}
776 			/* IrCOMM over IrTTP, IrLAN, ... */
777 
778 		/* Pass the skb to IrLMP - done */
779 		irlmp_data_request(self->lsap, skb);
780 		self->stats.tx_packets++;
781 	}
782 
783 	/* Check if we can accept more frames from client.
784 	 * We don't want to wait until the todo timer to do that, and we
785 	 * can't use tasklets (grr...), so we are obliged to give control
786 	 * to client. That's ok, this test will be true not too often
787 	 * (max once per LAP window) and we are called from places
788 	 * where we can spend a bit of time doing stuff. - Jean II */
789 	if ((self->tx_sdu_busy) &&
790 	    (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
791 	    (!self->close_pend)) {
792 		if (self->notify.flow_indication)
793 			self->notify.flow_indication(self->notify.instance,
794 						     self, FLOW_START);
795 
796 		/* self->tx_sdu_busy is the state of the client.
797 		 * We don't really have a race here, but it's always safer
798 		 * to update our state after the client - Jean II */
799 		self->tx_sdu_busy = FALSE;
800 	}
801 
802 	/* Reset lock */
803 	self->tx_queue_lock = 0;
804 }
805 
806 /*
807  * Function irttp_give_credit (self)
808  *
809  *    Send a dataless flowdata TTP-PDU and give available credit to peer
810  *    TSAP
811  */
irttp_give_credit(struct tsap_cb * self)812 static inline void irttp_give_credit(struct tsap_cb *self)
813 {
814 	struct sk_buff *tx_skb = NULL;
815 	unsigned long flags;
816 	int n;
817 
818 	IRDA_ASSERT(self != NULL, return;);
819 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
820 
821 	pr_debug("%s() send=%d,avail=%d,remote=%d\n",
822 		 __func__,
823 		 self->send_credit, self->avail_credit, self->remote_credit);
824 
825 	/* Give credit to peer */
826 	tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
827 	if (!tx_skb)
828 		return;
829 
830 	/* Reserve space for LMP, and LAP header */
831 	skb_reserve(tx_skb, LMP_MAX_HEADER);
832 
833 	/*
834 	 *  Since we can transmit and receive frames concurrently,
835 	 *  the code below is a critical region and we must assure that
836 	 *  nobody messes with the credits while we update them.
837 	 */
838 	spin_lock_irqsave(&self->lock, flags);
839 
840 	n = self->avail_credit;
841 	self->avail_credit = 0;
842 
843 	/* Only space for 127 credits in frame */
844 	if (n > 127) {
845 		self->avail_credit = n - 127;
846 		n = 127;
847 	}
848 	self->remote_credit += n;
849 
850 	spin_unlock_irqrestore(&self->lock, flags);
851 
852 	skb_put(tx_skb, 1);
853 	tx_skb->data[0] = (__u8) (n & 0x7f);
854 
855 	irlmp_data_request(self->lsap, tx_skb);
856 	self->stats.tx_packets++;
857 }
858 
859 /*
860  * Function irttp_udata_indication (instance, sap, skb)
861  *
862  *    Received some unit-data (unreliable)
863  *
864  */
irttp_udata_indication(void * instance,void * sap,struct sk_buff * skb)865 static int irttp_udata_indication(void *instance, void *sap,
866 				  struct sk_buff *skb)
867 {
868 	struct tsap_cb *self;
869 	int err;
870 
871 	self = instance;
872 
873 	IRDA_ASSERT(self != NULL, return -1;);
874 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
875 	IRDA_ASSERT(skb != NULL, return -1;);
876 
877 	self->stats.rx_packets++;
878 
879 	/* Just pass data to layer above */
880 	if (self->notify.udata_indication) {
881 		err = self->notify.udata_indication(self->notify.instance,
882 						    self, skb);
883 		/* Same comment as in irttp_do_data_indication() */
884 		if (!err)
885 			return 0;
886 	}
887 	/* Either no handler, or handler returns an error */
888 	dev_kfree_skb(skb);
889 
890 	return 0;
891 }
892 
893 /*
894  * Function irttp_data_indication (instance, sap, skb)
895  *
896  *    Receive segment from IrLMP.
897  *
898  */
irttp_data_indication(void * instance,void * sap,struct sk_buff * skb)899 static int irttp_data_indication(void *instance, void *sap,
900 				 struct sk_buff *skb)
901 {
902 	struct tsap_cb *self;
903 	unsigned long flags;
904 	int n;
905 
906 	self = instance;
907 
908 	n = skb->data[0] & 0x7f;     /* Extract the credits */
909 
910 	self->stats.rx_packets++;
911 
912 	/*  Deal with inbound credit
913 	 *  Since we can transmit and receive frames concurrently,
914 	 *  the code below is a critical region and we must assure that
915 	 *  nobody messes with the credits while we update them.
916 	 */
917 	spin_lock_irqsave(&self->lock, flags);
918 	self->send_credit += n;
919 	if (skb->len > 1)
920 		self->remote_credit--;
921 	spin_unlock_irqrestore(&self->lock, flags);
922 
923 	/*
924 	 *  Data or dataless packet? Dataless frames contains only the
925 	 *  TTP_HEADER.
926 	 */
927 	if (skb->len > 1) {
928 		/*
929 		 *  We don't remove the TTP header, since we must preserve the
930 		 *  more bit, so the defragment routing knows what to do
931 		 */
932 		skb_queue_tail(&self->rx_queue, skb);
933 	} else {
934 		/* Dataless flowdata TTP-PDU */
935 		dev_kfree_skb(skb);
936 	}
937 
938 
939 	/* Push data to the higher layer.
940 	 * We do it synchronously because running the todo timer for each
941 	 * receive packet would be too much overhead and latency.
942 	 * By passing control to the higher layer, we run the risk that
943 	 * it may take time or grab a lock. Most often, the higher layer
944 	 * will only put packet in a queue.
945 	 * Anyway, packets are only dripping through the IrDA, so we can
946 	 * have time before the next packet.
947 	 * Further, we are run from NET_BH, so the worse that can happen is
948 	 * us missing the optimal time to send back the PF bit in LAP.
949 	 * Jean II */
950 	irttp_run_rx_queue(self);
951 
952 	/* We now give credits to peer in irttp_run_rx_queue().
953 	 * We need to send credit *NOW*, otherwise we are going
954 	 * to miss the next Tx window. The todo timer may take
955 	 * a while before it's run... - Jean II */
956 
957 	/*
958 	 * If the peer device has given us some credits and we didn't have
959 	 * anyone from before, then we need to shedule the tx queue.
960 	 * We need to do that because our Tx have stopped (so we may not
961 	 * get any LAP flow indication) and the user may be stopped as
962 	 * well. - Jean II
963 	 */
964 	if (self->send_credit == n) {
965 		/* Restart pushing stuff to LAP */
966 		irttp_run_tx_queue(self);
967 		/* Note : we don't want to schedule the todo timer
968 		 * because it has horrible latency. No tasklets
969 		 * because the tasklet API is broken. - Jean II */
970 	}
971 
972 	return 0;
973 }
974 
975 /*
976  * Function irttp_status_indication (self, reason)
977  *
978  *    Status_indication, just pass to the higher layer...
979  *
980  */
irttp_status_indication(void * instance,LINK_STATUS link,LOCK_STATUS lock)981 static void irttp_status_indication(void *instance,
982 				    LINK_STATUS link, LOCK_STATUS lock)
983 {
984 	struct tsap_cb *self;
985 
986 	self = instance;
987 
988 	IRDA_ASSERT(self != NULL, return;);
989 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
990 
991 	/* Check if client has already closed the TSAP and gone away */
992 	if (self->close_pend)
993 		return;
994 
995 	/*
996 	 *  Inform service user if he has requested it
997 	 */
998 	if (self->notify.status_indication != NULL)
999 		self->notify.status_indication(self->notify.instance,
1000 					       link, lock);
1001 	else
1002 		pr_debug("%s(), no handler\n", __func__);
1003 }
1004 
1005 /*
1006  * Function irttp_flow_indication (self, reason)
1007  *
1008  *    Flow_indication : IrLAP tells us to send more data.
1009  *
1010  */
irttp_flow_indication(void * instance,void * sap,LOCAL_FLOW flow)1011 static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
1012 {
1013 	struct tsap_cb *self;
1014 
1015 	self = instance;
1016 
1017 	IRDA_ASSERT(self != NULL, return;);
1018 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1019 
1020 	pr_debug("%s(instance=%p)\n", __func__, self);
1021 
1022 	/* We are "polled" directly from LAP, and the LAP want to fill
1023 	 * its Tx window. We want to do our best to send it data, so that
1024 	 * we maximise the window. On the other hand, we want to limit the
1025 	 * amount of work here so that LAP doesn't hang forever waiting
1026 	 * for packets. - Jean II */
1027 
1028 	/* Try to send some packets. Currently, LAP calls us every time
1029 	 * there is one free slot, so we will send only one packet.
1030 	 * This allow the scheduler to do its round robin - Jean II */
1031 	irttp_run_tx_queue(self);
1032 
1033 	/* Note regarding the interraction with higher layer.
1034 	 * irttp_run_tx_queue() may call the client when its queue
1035 	 * start to empty, via notify.flow_indication(). Initially.
1036 	 * I wanted this to happen in a tasklet, to avoid client
1037 	 * grabbing the CPU, but we can't use tasklets safely. And timer
1038 	 * is definitely too slow.
1039 	 * This will happen only once per LAP window, and usually at
1040 	 * the third packet (unless window is smaller). LAP is still
1041 	 * doing mtt and sending first packet so it's sort of OK
1042 	 * to do that. Jean II */
1043 
1044 	/* If we need to send disconnect. try to do it now */
1045 	if (self->disconnect_pend)
1046 		irttp_start_todo_timer(self, 0);
1047 }
1048 
1049 /*
1050  * Function irttp_flow_request (self, command)
1051  *
1052  *    This function could be used by the upper layers to tell IrTTP to stop
1053  *    delivering frames if the receive queues are starting to get full, or
1054  *    to tell IrTTP to start delivering frames again.
1055  */
irttp_flow_request(struct tsap_cb * self,LOCAL_FLOW flow)1056 void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1057 {
1058 	IRDA_ASSERT(self != NULL, return;);
1059 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1060 
1061 	switch (flow) {
1062 	case FLOW_STOP:
1063 		pr_debug("%s(), flow stop\n", __func__);
1064 		self->rx_sdu_busy = TRUE;
1065 		break;
1066 	case FLOW_START:
1067 		pr_debug("%s(), flow start\n", __func__);
1068 		self->rx_sdu_busy = FALSE;
1069 
1070 		/* Client say he can accept more data, try to free our
1071 		 * queues ASAP - Jean II */
1072 		irttp_run_rx_queue(self);
1073 
1074 		break;
1075 	default:
1076 		pr_debug("%s(), Unknown flow command!\n", __func__);
1077 	}
1078 }
1079 EXPORT_SYMBOL(irttp_flow_request);
1080 
1081 /*
1082  * Function irttp_connect_request (self, dtsap_sel, daddr, qos)
1083  *
1084  *    Try to connect to remote destination TSAP selector
1085  *
1086  */
irttp_connect_request(struct tsap_cb * self,__u8 dtsap_sel,__u32 saddr,__u32 daddr,struct qos_info * qos,__u32 max_sdu_size,struct sk_buff * userdata)1087 int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1088 			  __u32 saddr, __u32 daddr,
1089 			  struct qos_info *qos, __u32 max_sdu_size,
1090 			  struct sk_buff *userdata)
1091 {
1092 	struct sk_buff *tx_skb;
1093 	__u8 *frame;
1094 	__u8 n;
1095 
1096 	pr_debug("%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
1097 
1098 	IRDA_ASSERT(self != NULL, return -EBADR;);
1099 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
1100 
1101 	if (self->connected) {
1102 		if (userdata)
1103 			dev_kfree_skb(userdata);
1104 		return -EISCONN;
1105 	}
1106 
1107 	/* Any userdata supplied? */
1108 	if (userdata == NULL) {
1109 		tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1110 				   GFP_ATOMIC);
1111 		if (!tx_skb)
1112 			return -ENOMEM;
1113 
1114 		/* Reserve space for MUX_CONTROL and LAP header */
1115 		skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
1116 	} else {
1117 		tx_skb = userdata;
1118 		/*
1119 		 *  Check that the client has reserved enough space for
1120 		 *  headers
1121 		 */
1122 		IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1123 			{ dev_kfree_skb(userdata); return -1; });
1124 	}
1125 
1126 	/* Initialize connection parameters */
1127 	self->connected = FALSE;
1128 	self->avail_credit = 0;
1129 	self->rx_max_sdu_size = max_sdu_size;
1130 	self->rx_sdu_size = 0;
1131 	self->rx_sdu_busy = FALSE;
1132 	self->dtsap_sel = dtsap_sel;
1133 
1134 	n = self->initial_credit;
1135 
1136 	self->remote_credit = 0;
1137 	self->send_credit = 0;
1138 
1139 	/*
1140 	 *  Give away max 127 credits for now
1141 	 */
1142 	if (n > 127) {
1143 		self->avail_credit = n - 127;
1144 		n = 127;
1145 	}
1146 
1147 	self->remote_credit = n;
1148 
1149 	/* SAR enabled? */
1150 	if (max_sdu_size > 0) {
1151 		IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1152 			{ dev_kfree_skb(tx_skb); return -1; });
1153 
1154 		/* Insert SAR parameters */
1155 		frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
1156 
1157 		frame[0] = TTP_PARAMETERS | n;
1158 		frame[1] = 0x04; /* Length */
1159 		frame[2] = 0x01; /* MaxSduSize */
1160 		frame[3] = 0x02; /* Value length */
1161 
1162 		put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1163 			      (__be16 *)(frame+4));
1164 	} else {
1165 		/* Insert plain TTP header */
1166 		frame = skb_push(tx_skb, TTP_HEADER);
1167 
1168 		/* Insert initial credit in frame */
1169 		frame[0] = n & 0x7f;
1170 	}
1171 
1172 	/* Connect with IrLMP. No QoS parameters for now */
1173 	return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
1174 				     tx_skb);
1175 }
1176 EXPORT_SYMBOL(irttp_connect_request);
1177 
1178 /*
1179  * Function irttp_connect_confirm (handle, qos, skb)
1180  *
1181  *    Service user confirms TSAP connection with peer.
1182  *
1183  */
irttp_connect_confirm(void * instance,void * sap,struct qos_info * qos,__u32 max_seg_size,__u8 max_header_size,struct sk_buff * skb)1184 static void irttp_connect_confirm(void *instance, void *sap,
1185 				  struct qos_info *qos, __u32 max_seg_size,
1186 				  __u8 max_header_size, struct sk_buff *skb)
1187 {
1188 	struct tsap_cb *self;
1189 	int parameters;
1190 	int ret;
1191 	__u8 plen;
1192 	__u8 n;
1193 
1194 	self = instance;
1195 
1196 	IRDA_ASSERT(self != NULL, return;);
1197 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1198 	IRDA_ASSERT(skb != NULL, return;);
1199 
1200 	self->max_seg_size = max_seg_size - TTP_HEADER;
1201 	self->max_header_size = max_header_size + TTP_HEADER;
1202 
1203 	/*
1204 	 *  Check if we have got some QoS parameters back! This should be the
1205 	 *  negotiated QoS for the link.
1206 	 */
1207 	if (qos) {
1208 		pr_debug("IrTTP, Negotiated BAUD_RATE: %02x\n",
1209 			 qos->baud_rate.bits);
1210 		pr_debug("IrTTP, Negotiated BAUD_RATE: %d bps.\n",
1211 			 qos->baud_rate.value);
1212 	}
1213 
1214 	n = skb->data[0] & 0x7f;
1215 
1216 	pr_debug("%s(), Initial send_credit=%d\n", __func__, n);
1217 
1218 	self->send_credit = n;
1219 	self->tx_max_sdu_size = 0;
1220 	self->connected = TRUE;
1221 
1222 	parameters = skb->data[0] & 0x80;
1223 
1224 	IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1225 	skb_pull(skb, TTP_HEADER);
1226 
1227 	if (parameters) {
1228 		plen = skb->data[0];
1229 
1230 		ret = irda_param_extract_all(self, skb->data+1,
1231 					     IRDA_MIN(skb->len-1, plen),
1232 					     &param_info);
1233 
1234 		/* Any errors in the parameter list? */
1235 		if (ret < 0) {
1236 			net_warn_ratelimited("%s: error extracting parameters\n",
1237 					     __func__);
1238 			dev_kfree_skb(skb);
1239 
1240 			/* Do not accept this connection attempt */
1241 			return;
1242 		}
1243 		/* Remove parameters */
1244 		skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1245 	}
1246 
1247 	pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
1248 		 self->send_credit, self->avail_credit, self->remote_credit);
1249 
1250 	pr_debug("%s(), MaxSduSize=%d\n", __func__,
1251 		 self->tx_max_sdu_size);
1252 
1253 	if (self->notify.connect_confirm) {
1254 		self->notify.connect_confirm(self->notify.instance, self, qos,
1255 					     self->tx_max_sdu_size,
1256 					     self->max_header_size, skb);
1257 	} else
1258 		dev_kfree_skb(skb);
1259 }
1260 
1261 /*
1262  * Function irttp_connect_indication (handle, skb)
1263  *
1264  *    Some other device is connecting to this TSAP
1265  *
1266  */
irttp_connect_indication(void * instance,void * sap,struct qos_info * qos,__u32 max_seg_size,__u8 max_header_size,struct sk_buff * skb)1267 static void irttp_connect_indication(void *instance, void *sap,
1268 		struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
1269 		struct sk_buff *skb)
1270 {
1271 	struct tsap_cb *self;
1272 	struct lsap_cb *lsap;
1273 	int parameters;
1274 	int ret;
1275 	__u8 plen;
1276 	__u8 n;
1277 
1278 	self = instance;
1279 
1280 	IRDA_ASSERT(self != NULL, return;);
1281 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1282 	IRDA_ASSERT(skb != NULL, return;);
1283 
1284 	lsap = sap;
1285 
1286 	self->max_seg_size = max_seg_size - TTP_HEADER;
1287 	self->max_header_size = max_header_size+TTP_HEADER;
1288 
1289 	pr_debug("%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
1290 
1291 	/* Need to update dtsap_sel if its equal to LSAP_ANY */
1292 	self->dtsap_sel = lsap->dlsap_sel;
1293 
1294 	n = skb->data[0] & 0x7f;
1295 
1296 	self->send_credit = n;
1297 	self->tx_max_sdu_size = 0;
1298 
1299 	parameters = skb->data[0] & 0x80;
1300 
1301 	IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1302 	skb_pull(skb, TTP_HEADER);
1303 
1304 	if (parameters) {
1305 		plen = skb->data[0];
1306 
1307 		ret = irda_param_extract_all(self, skb->data+1,
1308 					     IRDA_MIN(skb->len-1, plen),
1309 					     &param_info);
1310 
1311 		/* Any errors in the parameter list? */
1312 		if (ret < 0) {
1313 			net_warn_ratelimited("%s: error extracting parameters\n",
1314 					     __func__);
1315 			dev_kfree_skb(skb);
1316 
1317 			/* Do not accept this connection attempt */
1318 			return;
1319 		}
1320 
1321 		/* Remove parameters */
1322 		skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1323 	}
1324 
1325 	if (self->notify.connect_indication) {
1326 		self->notify.connect_indication(self->notify.instance, self,
1327 						qos, self->tx_max_sdu_size,
1328 						self->max_header_size, skb);
1329 	} else
1330 		dev_kfree_skb(skb);
1331 }
1332 
1333 /*
1334  * Function irttp_connect_response (handle, userdata)
1335  *
1336  *    Service user is accepting the connection, just pass it down to
1337  *    IrLMP!
1338  *
1339  */
irttp_connect_response(struct tsap_cb * self,__u32 max_sdu_size,struct sk_buff * userdata)1340 int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1341 			   struct sk_buff *userdata)
1342 {
1343 	struct sk_buff *tx_skb;
1344 	__u8 *frame;
1345 	int ret;
1346 	__u8 n;
1347 
1348 	IRDA_ASSERT(self != NULL, return -1;);
1349 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1350 
1351 	pr_debug("%s(), Source TSAP selector=%02x\n", __func__,
1352 		 self->stsap_sel);
1353 
1354 	/* Any userdata supplied? */
1355 	if (userdata == NULL) {
1356 		tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1357 				   GFP_ATOMIC);
1358 		if (!tx_skb)
1359 			return -ENOMEM;
1360 
1361 		/* Reserve space for MUX_CONTROL and LAP header */
1362 		skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
1363 	} else {
1364 		tx_skb = userdata;
1365 		/*
1366 		 *  Check that the client has reserved enough space for
1367 		 *  headers
1368 		 */
1369 		IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1370 			{ dev_kfree_skb(userdata); return -1; });
1371 	}
1372 
1373 	self->avail_credit = 0;
1374 	self->remote_credit = 0;
1375 	self->rx_max_sdu_size = max_sdu_size;
1376 	self->rx_sdu_size = 0;
1377 	self->rx_sdu_busy = FALSE;
1378 
1379 	n = self->initial_credit;
1380 
1381 	/* Frame has only space for max 127 credits (7 bits) */
1382 	if (n > 127) {
1383 		self->avail_credit = n - 127;
1384 		n = 127;
1385 	}
1386 
1387 	self->remote_credit = n;
1388 	self->connected = TRUE;
1389 
1390 	/* SAR enabled? */
1391 	if (max_sdu_size > 0) {
1392 		IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1393 			{ dev_kfree_skb(tx_skb); return -1; });
1394 
1395 		/* Insert TTP header with SAR parameters */
1396 		frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
1397 
1398 		frame[0] = TTP_PARAMETERS | n;
1399 		frame[1] = 0x04; /* Length */
1400 
1401 		/* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1,  */
1402 /*				  TTP_SAR_HEADER, &param_info) */
1403 
1404 		frame[2] = 0x01; /* MaxSduSize */
1405 		frame[3] = 0x02; /* Value length */
1406 
1407 		put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1408 			      (__be16 *)(frame+4));
1409 	} else {
1410 		/* Insert TTP header */
1411 		frame = skb_push(tx_skb, TTP_HEADER);
1412 
1413 		frame[0] = n & 0x7f;
1414 	}
1415 
1416 	ret = irlmp_connect_response(self->lsap, tx_skb);
1417 
1418 	return ret;
1419 }
1420 EXPORT_SYMBOL(irttp_connect_response);
1421 
1422 /*
1423  * Function irttp_dup (self, instance)
1424  *
1425  *    Duplicate TSAP, can be used by servers to confirm a connection on a
1426  *    new TSAP so it can keep listening on the old one.
1427  */
irttp_dup(struct tsap_cb * orig,void * instance)1428 struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1429 {
1430 	struct tsap_cb *new;
1431 	unsigned long flags;
1432 
1433 	/* Protect our access to the old tsap instance */
1434 	spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
1435 
1436 	/* Find the old instance */
1437 	if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
1438 		pr_debug("%s(), unable to find TSAP\n", __func__);
1439 		spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1440 		return NULL;
1441 	}
1442 
1443 	/* Allocate a new instance */
1444 	new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
1445 	if (!new) {
1446 		pr_debug("%s(), unable to kmalloc\n", __func__);
1447 		spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1448 		return NULL;
1449 	}
1450 	spin_lock_init(&new->lock);
1451 
1452 	/* We don't need the old instance any more */
1453 	spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1454 
1455 	/* Try to dup the LSAP (may fail if we were too slow) */
1456 	new->lsap = irlmp_dup(orig->lsap, new);
1457 	if (!new->lsap) {
1458 		pr_debug("%s(), dup failed!\n", __func__);
1459 		kfree(new);
1460 		return NULL;
1461 	}
1462 
1463 	/* Not everything should be copied */
1464 	new->notify.instance = instance;
1465 
1466 	/* Initialize internal objects */
1467 	irttp_init_tsap(new);
1468 
1469 	/* This is locked */
1470 	hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
1471 
1472 	return new;
1473 }
1474 EXPORT_SYMBOL(irttp_dup);
1475 
1476 /*
1477  * Function irttp_disconnect_request (self)
1478  *
1479  *    Close this connection please! If priority is high, the queued data
1480  *    segments, if any, will be deallocated first
1481  *
1482  */
irttp_disconnect_request(struct tsap_cb * self,struct sk_buff * userdata,int priority)1483 int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1484 			     int priority)
1485 {
1486 	int ret;
1487 
1488 	IRDA_ASSERT(self != NULL, return -1;);
1489 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1490 
1491 	/* Already disconnected? */
1492 	if (!self->connected) {
1493 		pr_debug("%s(), already disconnected!\n", __func__);
1494 		if (userdata)
1495 			dev_kfree_skb(userdata);
1496 		return -1;
1497 	}
1498 
1499 	/* Disconnect already pending ?
1500 	 * We need to use an atomic operation to prevent reentry. This
1501 	 * function may be called from various context, like user, timer
1502 	 * for following a disconnect_indication() (i.e. net_bh).
1503 	 * Jean II */
1504 	if (test_and_set_bit(0, &self->disconnect_pend)) {
1505 		pr_debug("%s(), disconnect already pending\n",
1506 			 __func__);
1507 		if (userdata)
1508 			dev_kfree_skb(userdata);
1509 
1510 		/* Try to make some progress */
1511 		irttp_run_tx_queue(self);
1512 		return -1;
1513 	}
1514 
1515 	/*
1516 	 *  Check if there is still data segments in the transmit queue
1517 	 */
1518 	if (!skb_queue_empty(&self->tx_queue)) {
1519 		if (priority == P_HIGH) {
1520 			/*
1521 			 *  No need to send the queued data, if we are
1522 			 *  disconnecting right now since the data will
1523 			 *  not have any usable connection to be sent on
1524 			 */
1525 			pr_debug("%s(): High priority!!()\n", __func__);
1526 			irttp_flush_queues(self);
1527 		} else if (priority == P_NORMAL) {
1528 			/*
1529 			 *  Must delay disconnect until after all data segments
1530 			 *  have been sent and the tx_queue is empty
1531 			 */
1532 			/* We'll reuse this one later for the disconnect */
1533 			self->disconnect_skb = userdata;  /* May be NULL */
1534 
1535 			irttp_run_tx_queue(self);
1536 
1537 			irttp_start_todo_timer(self, HZ/10);
1538 			return -1;
1539 		}
1540 	}
1541 	/* Note : we don't need to check if self->rx_queue is full and the
1542 	 * state of self->rx_sdu_busy because the disconnect response will
1543 	 * be sent at the LMP level (so even if the peer has its Tx queue
1544 	 * full of data). - Jean II */
1545 
1546 	pr_debug("%s(), Disconnecting ...\n", __func__);
1547 	self->connected = FALSE;
1548 
1549 	if (!userdata) {
1550 		struct sk_buff *tx_skb;
1551 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
1552 		if (!tx_skb)
1553 			return -ENOMEM;
1554 
1555 		/*
1556 		 *  Reserve space for MUX and LAP header
1557 		 */
1558 		skb_reserve(tx_skb, LMP_MAX_HEADER);
1559 
1560 		userdata = tx_skb;
1561 	}
1562 	ret = irlmp_disconnect_request(self->lsap, userdata);
1563 
1564 	/* The disconnect is no longer pending */
1565 	clear_bit(0, &self->disconnect_pend);	/* FALSE */
1566 
1567 	return ret;
1568 }
1569 EXPORT_SYMBOL(irttp_disconnect_request);
1570 
1571 /*
1572  * Function irttp_disconnect_indication (self, reason)
1573  *
1574  *    Disconnect indication, TSAP disconnected by peer?
1575  *
1576  */
irttp_disconnect_indication(void * instance,void * sap,LM_REASON reason,struct sk_buff * skb)1577 static void irttp_disconnect_indication(void *instance, void *sap,
1578 		LM_REASON reason, struct sk_buff *skb)
1579 {
1580 	struct tsap_cb *self;
1581 
1582 	self = instance;
1583 
1584 	IRDA_ASSERT(self != NULL, return;);
1585 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1586 
1587 	/* Prevent higher layer to send more data */
1588 	self->connected = FALSE;
1589 
1590 	/* Check if client has already tried to close the TSAP */
1591 	if (self->close_pend) {
1592 		/* In this case, the higher layer is probably gone. Don't
1593 		 * bother it and clean up the remains - Jean II */
1594 		if (skb)
1595 			dev_kfree_skb(skb);
1596 		irttp_close_tsap(self);
1597 		return;
1598 	}
1599 
1600 	/* If we are here, we assume that is the higher layer is still
1601 	 * waiting for the disconnect notification and able to process it,
1602 	 * even if he tried to disconnect. Otherwise, it would have already
1603 	 * attempted to close the tsap and self->close_pend would be TRUE.
1604 	 * Jean II */
1605 
1606 	/* No need to notify the client if has already tried to disconnect */
1607 	if (self->notify.disconnect_indication)
1608 		self->notify.disconnect_indication(self->notify.instance, self,
1609 						   reason, skb);
1610 	else
1611 		if (skb)
1612 			dev_kfree_skb(skb);
1613 }
1614 
1615 /*
1616  * Function irttp_do_data_indication (self, skb)
1617  *
1618  *    Try to deliver reassembled skb to layer above, and requeue it if that
1619  *    for some reason should fail. We mark rx sdu as busy to apply back
1620  *    pressure is necessary.
1621  */
irttp_do_data_indication(struct tsap_cb * self,struct sk_buff * skb)1622 static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
1623 {
1624 	int err;
1625 
1626 	/* Check if client has already closed the TSAP and gone away */
1627 	if (self->close_pend) {
1628 		dev_kfree_skb(skb);
1629 		return;
1630 	}
1631 
1632 	err = self->notify.data_indication(self->notify.instance, self, skb);
1633 
1634 	/* Usually the layer above will notify that it's input queue is
1635 	 * starting to get filled by using the flow request, but this may
1636 	 * be difficult, so it can instead just refuse to eat it and just
1637 	 * give an error back
1638 	 */
1639 	if (err) {
1640 		pr_debug("%s() requeueing skb!\n", __func__);
1641 
1642 		/* Make sure we take a break */
1643 		self->rx_sdu_busy = TRUE;
1644 
1645 		/* Need to push the header in again */
1646 		skb_push(skb, TTP_HEADER);
1647 		skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
1648 
1649 		/* Put skb back on queue */
1650 		skb_queue_head(&self->rx_queue, skb);
1651 	}
1652 }
1653 
1654 /*
1655  * Function irttp_run_rx_queue (self)
1656  *
1657  *     Check if we have any frames to be transmitted, or if we have any
1658  *     available credit to give away.
1659  */
irttp_run_rx_queue(struct tsap_cb * self)1660 static void irttp_run_rx_queue(struct tsap_cb *self)
1661 {
1662 	struct sk_buff *skb;
1663 	int more = 0;
1664 
1665 	pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
1666 		 self->send_credit, self->avail_credit, self->remote_credit);
1667 
1668 	/* Get exclusive access to the rx queue, otherwise don't touch it */
1669 	if (irda_lock(&self->rx_queue_lock) == FALSE)
1670 		return;
1671 
1672 	/*
1673 	 *  Reassemble all frames in receive queue and deliver them
1674 	 */
1675 	while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
1676 		/* This bit will tell us if it's the last fragment or not */
1677 		more = skb->data[0] & 0x80;
1678 
1679 		/* Remove TTP header */
1680 		skb_pull(skb, TTP_HEADER);
1681 
1682 		/* Add the length of the remaining data */
1683 		self->rx_sdu_size += skb->len;
1684 
1685 		/*
1686 		 * If SAR is disabled, or user has requested no reassembly
1687 		 * of received fragments then we just deliver them
1688 		 * immediately. This can be requested by clients that
1689 		 * implements byte streams without any message boundaries
1690 		 */
1691 		if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
1692 			irttp_do_data_indication(self, skb);
1693 			self->rx_sdu_size = 0;
1694 
1695 			continue;
1696 		}
1697 
1698 		/* Check if this is a fragment, and not the last fragment */
1699 		if (more) {
1700 			/*
1701 			 *  Queue the fragment if we still are within the
1702 			 *  limits of the maximum size of the rx_sdu
1703 			 */
1704 			if (self->rx_sdu_size <= self->rx_max_sdu_size) {
1705 				pr_debug("%s(), queueing frag\n",
1706 					 __func__);
1707 				skb_queue_tail(&self->rx_fragments, skb);
1708 			} else {
1709 				/* Free the part of the SDU that is too big */
1710 				dev_kfree_skb(skb);
1711 			}
1712 			continue;
1713 		}
1714 		/*
1715 		 *  This is the last fragment, so time to reassemble!
1716 		 */
1717 		if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
1718 		    (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
1719 			/*
1720 			 * A little optimizing. Only queue the fragment if
1721 			 * there are other fragments. Since if this is the
1722 			 * last and only fragment, there is no need to
1723 			 * reassemble :-)
1724 			 */
1725 			if (!skb_queue_empty(&self->rx_fragments)) {
1726 				skb_queue_tail(&self->rx_fragments,
1727 					       skb);
1728 
1729 				skb = irttp_reassemble_skb(self);
1730 			}
1731 
1732 			/* Now we can deliver the reassembled skb */
1733 			irttp_do_data_indication(self, skb);
1734 		} else {
1735 			pr_debug("%s(), Truncated frame\n", __func__);
1736 
1737 			/* Free the part of the SDU that is too big */
1738 			dev_kfree_skb(skb);
1739 
1740 			/* Deliver only the valid but truncated part of SDU */
1741 			skb = irttp_reassemble_skb(self);
1742 
1743 			irttp_do_data_indication(self, skb);
1744 		}
1745 		self->rx_sdu_size = 0;
1746 	}
1747 
1748 	/*
1749 	 * It's not trivial to keep track of how many credits are available
1750 	 * by incrementing at each packet, because delivery may fail
1751 	 * (irttp_do_data_indication() may requeue the frame) and because
1752 	 * we need to take care of fragmentation.
1753 	 * We want the other side to send up to initial_credit packets.
1754 	 * We have some frames in our queues, and we have already allowed it
1755 	 * to send remote_credit.
1756 	 * No need to spinlock, write is atomic and self correcting...
1757 	 * Jean II
1758 	 */
1759 	self->avail_credit = (self->initial_credit -
1760 			      (self->remote_credit +
1761 			       skb_queue_len(&self->rx_queue) +
1762 			       skb_queue_len(&self->rx_fragments)));
1763 
1764 	/* Do we have too much credits to send to peer ? */
1765 	if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
1766 	    (self->avail_credit > 0)) {
1767 		/* Send explicit credit frame */
1768 		irttp_give_credit(self);
1769 		/* Note : do *NOT* check if tx_queue is non-empty, that
1770 		 * will produce deadlocks. I repeat : send a credit frame
1771 		 * even if we have something to send in our Tx queue.
1772 		 * If we have credits, it means that our Tx queue is blocked.
1773 		 *
1774 		 * Let's suppose the peer can't keep up with our Tx. He will
1775 		 * flow control us by not sending us any credits, and we
1776 		 * will stop Tx and start accumulating credits here.
1777 		 * Up to the point where the peer will stop its Tx queue,
1778 		 * for lack of credits.
1779 		 * Let's assume the peer application is single threaded.
1780 		 * It will block on Tx and never consume any Rx buffer.
1781 		 * Deadlock. Guaranteed. - Jean II
1782 		 */
1783 	}
1784 
1785 	/* Reset lock */
1786 	self->rx_queue_lock = 0;
1787 }
1788 
1789 #ifdef CONFIG_PROC_FS
1790 struct irttp_iter_state {
1791 	int id;
1792 };
1793 
irttp_seq_start(struct seq_file * seq,loff_t * pos)1794 static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
1795 {
1796 	struct irttp_iter_state *iter = seq->private;
1797 	struct tsap_cb *self;
1798 
1799 	/* Protect our access to the tsap list */
1800 	spin_lock_irq(&irttp->tsaps->hb_spinlock);
1801 	iter->id = 0;
1802 
1803 	for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
1804 	     self != NULL;
1805 	     self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
1806 		if (iter->id == *pos)
1807 			break;
1808 		++iter->id;
1809 	}
1810 
1811 	return self;
1812 }
1813 
irttp_seq_next(struct seq_file * seq,void * v,loff_t * pos)1814 static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1815 {
1816 	struct irttp_iter_state *iter = seq->private;
1817 
1818 	++*pos;
1819 	++iter->id;
1820 	return (void *) hashbin_get_next(irttp->tsaps);
1821 }
1822 
irttp_seq_stop(struct seq_file * seq,void * v)1823 static void irttp_seq_stop(struct seq_file *seq, void *v)
1824 {
1825 	spin_unlock_irq(&irttp->tsaps->hb_spinlock);
1826 }
1827 
irttp_seq_show(struct seq_file * seq,void * v)1828 static int irttp_seq_show(struct seq_file *seq, void *v)
1829 {
1830 	const struct irttp_iter_state *iter = seq->private;
1831 	const struct tsap_cb *self = v;
1832 
1833 	seq_printf(seq, "TSAP %d, ", iter->id);
1834 	seq_printf(seq, "stsap_sel: %02x, ",
1835 		   self->stsap_sel);
1836 	seq_printf(seq, "dtsap_sel: %02x\n",
1837 		   self->dtsap_sel);
1838 	seq_printf(seq, "  connected: %s, ",
1839 		   self->connected ? "TRUE" : "FALSE");
1840 	seq_printf(seq, "avail credit: %d, ",
1841 		   self->avail_credit);
1842 	seq_printf(seq, "remote credit: %d, ",
1843 		   self->remote_credit);
1844 	seq_printf(seq, "send credit: %d\n",
1845 		   self->send_credit);
1846 	seq_printf(seq, "  tx packets: %lu, ",
1847 		   self->stats.tx_packets);
1848 	seq_printf(seq, "rx packets: %lu, ",
1849 		   self->stats.rx_packets);
1850 	seq_printf(seq, "tx_queue len: %u ",
1851 		   skb_queue_len(&self->tx_queue));
1852 	seq_printf(seq, "rx_queue len: %u\n",
1853 		   skb_queue_len(&self->rx_queue));
1854 	seq_printf(seq, "  tx_sdu_busy: %s, ",
1855 		   self->tx_sdu_busy ? "TRUE" : "FALSE");
1856 	seq_printf(seq, "rx_sdu_busy: %s\n",
1857 		   self->rx_sdu_busy ? "TRUE" : "FALSE");
1858 	seq_printf(seq, "  max_seg_size: %u, ",
1859 		   self->max_seg_size);
1860 	seq_printf(seq, "tx_max_sdu_size: %u, ",
1861 		   self->tx_max_sdu_size);
1862 	seq_printf(seq, "rx_max_sdu_size: %u\n",
1863 		   self->rx_max_sdu_size);
1864 
1865 	seq_printf(seq, "  Used by (%s)\n\n",
1866 		   self->notify.name);
1867 	return 0;
1868 }
1869 
1870 static const struct seq_operations irttp_seq_ops = {
1871 	.start  = irttp_seq_start,
1872 	.next   = irttp_seq_next,
1873 	.stop   = irttp_seq_stop,
1874 	.show   = irttp_seq_show,
1875 };
1876 
irttp_seq_open(struct inode * inode,struct file * file)1877 static int irttp_seq_open(struct inode *inode, struct file *file)
1878 {
1879 	return seq_open_private(file, &irttp_seq_ops,
1880 			sizeof(struct irttp_iter_state));
1881 }
1882 
1883 const struct file_operations irttp_seq_fops = {
1884 	.owner		= THIS_MODULE,
1885 	.open           = irttp_seq_open,
1886 	.read           = seq_read,
1887 	.llseek         = seq_lseek,
1888 	.release	= seq_release_private,
1889 };
1890 
1891 #endif /* PROC_FS */
1892