1 /*********************************************************************
2 *
3 * Filename: af_irda.c
4 * Version: 0.9
5 * Description: IrDA sockets implementation
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sun May 31 10:12:43 1998
9 * Modified at: Sat Dec 25 21:10:23 1999
10 * Modified by: Dag Brattli <dag@brattli.net>
11 * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
12 *
13 * Copyright (c) 1999 Dag Brattli <dagb@cs.uit.no>
14 * Copyright (c) 1999-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, see <http://www.gnu.org/licenses/>.
29 *
30 * Linux-IrDA now supports four different types of IrDA sockets:
31 *
32 * o SOCK_STREAM: TinyTP connections with SAR disabled. The
33 * max SDU size is 0 for conn. of this type
34 * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may
35 * fragment the messages, but will preserve
36 * the message boundaries
37 * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata
38 * (unreliable) transfers
39 * IRDAPROTO_ULTRA: Connectionless and unreliable data
40 *
41 ********************************************************************/
42
43 #include <linux/capability.h>
44 #include <linux/module.h>
45 #include <linux/types.h>
46 #include <linux/socket.h>
47 #include <linux/sockios.h>
48 #include <linux/slab.h>
49 #include <linux/init.h>
50 #include <linux/net.h>
51 #include <linux/irda.h>
52 #include <linux/poll.h>
53
54 #include <asm/ioctls.h> /* TIOCOUTQ, TIOCINQ */
55 #include <asm/uaccess.h>
56
57 #include <net/sock.h>
58 #include <net/tcp_states.h>
59
60 #include <net/irda/af_irda.h>
61
62 static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
63
64 static const struct proto_ops irda_stream_ops;
65 static const struct proto_ops irda_seqpacket_ops;
66 static const struct proto_ops irda_dgram_ops;
67
68 #ifdef CONFIG_IRDA_ULTRA
69 static const struct proto_ops irda_ultra_ops;
70 #define ULTRA_MAX_DATA 382
71 #endif /* CONFIG_IRDA_ULTRA */
72
73 #define IRDA_MAX_HEADER (TTP_MAX_HEADER)
74
75 /*
76 * Function irda_data_indication (instance, sap, skb)
77 *
78 * Received some data from TinyTP. Just queue it on the receive queue
79 *
80 */
irda_data_indication(void * instance,void * sap,struct sk_buff * skb)81 static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
82 {
83 struct irda_sock *self;
84 struct sock *sk;
85 int err;
86
87 self = instance;
88 sk = instance;
89
90 err = sock_queue_rcv_skb(sk, skb);
91 if (err) {
92 pr_debug("%s(), error: no more mem!\n", __func__);
93 self->rx_flow = FLOW_STOP;
94
95 /* When we return error, TTP will need to requeue the skb */
96 return err;
97 }
98
99 return 0;
100 }
101
102 /*
103 * Function irda_disconnect_indication (instance, sap, reason, skb)
104 *
105 * Connection has been closed. Check reason to find out why
106 *
107 */
irda_disconnect_indication(void * instance,void * sap,LM_REASON reason,struct sk_buff * skb)108 static void irda_disconnect_indication(void *instance, void *sap,
109 LM_REASON reason, struct sk_buff *skb)
110 {
111 struct irda_sock *self;
112 struct sock *sk;
113
114 self = instance;
115
116 pr_debug("%s(%p)\n", __func__, self);
117
118 /* Don't care about it, but let's not leak it */
119 if(skb)
120 dev_kfree_skb(skb);
121
122 sk = instance;
123 if (sk == NULL) {
124 pr_debug("%s(%p) : BUG : sk is NULL\n",
125 __func__, self);
126 return;
127 }
128
129 /* Prevent race conditions with irda_release() and irda_shutdown() */
130 bh_lock_sock(sk);
131 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
132 sk->sk_state = TCP_CLOSE;
133 sk->sk_shutdown |= SEND_SHUTDOWN;
134
135 sk->sk_state_change(sk);
136
137 /* Close our TSAP.
138 * If we leave it open, IrLMP put it back into the list of
139 * unconnected LSAPs. The problem is that any incoming request
140 * can then be matched to this socket (and it will be, because
141 * it is at the head of the list). This would prevent any
142 * listening socket waiting on the same TSAP to get those
143 * requests. Some apps forget to close sockets, or hang to it
144 * a bit too long, so we may stay in this dead state long
145 * enough to be noticed...
146 * Note : all socket function do check sk->sk_state, so we are
147 * safe...
148 * Jean II
149 */
150 if (self->tsap) {
151 irttp_close_tsap(self->tsap);
152 self->tsap = NULL;
153 }
154 }
155 bh_unlock_sock(sk);
156
157 /* Note : once we are there, there is not much you want to do
158 * with the socket anymore, apart from closing it.
159 * For example, bind() and connect() won't reset sk->sk_err,
160 * sk->sk_shutdown and sk->sk_flags to valid values...
161 * Jean II
162 */
163 }
164
165 /*
166 * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb)
167 *
168 * Connections has been confirmed by the remote device
169 *
170 */
irda_connect_confirm(void * instance,void * sap,struct qos_info * qos,__u32 max_sdu_size,__u8 max_header_size,struct sk_buff * skb)171 static void irda_connect_confirm(void *instance, void *sap,
172 struct qos_info *qos,
173 __u32 max_sdu_size, __u8 max_header_size,
174 struct sk_buff *skb)
175 {
176 struct irda_sock *self;
177 struct sock *sk;
178
179 self = instance;
180
181 pr_debug("%s(%p)\n", __func__, self);
182
183 sk = instance;
184 if (sk == NULL) {
185 dev_kfree_skb(skb);
186 return;
187 }
188
189 dev_kfree_skb(skb);
190 // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb);
191
192 /* How much header space do we need to reserve */
193 self->max_header_size = max_header_size;
194
195 /* IrTTP max SDU size in transmit direction */
196 self->max_sdu_size_tx = max_sdu_size;
197
198 /* Find out what the largest chunk of data that we can transmit is */
199 switch (sk->sk_type) {
200 case SOCK_STREAM:
201 if (max_sdu_size != 0) {
202 net_err_ratelimited("%s: max_sdu_size must be 0\n",
203 __func__);
204 return;
205 }
206 self->max_data_size = irttp_get_max_seg_size(self->tsap);
207 break;
208 case SOCK_SEQPACKET:
209 if (max_sdu_size == 0) {
210 net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
211 __func__);
212 return;
213 }
214 self->max_data_size = max_sdu_size;
215 break;
216 default:
217 self->max_data_size = irttp_get_max_seg_size(self->tsap);
218 }
219
220 pr_debug("%s(), max_data_size=%d\n", __func__,
221 self->max_data_size);
222
223 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
224
225 /* We are now connected! */
226 sk->sk_state = TCP_ESTABLISHED;
227 sk->sk_state_change(sk);
228 }
229
230 /*
231 * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata)
232 *
233 * Incoming connection
234 *
235 */
irda_connect_indication(void * instance,void * sap,struct qos_info * qos,__u32 max_sdu_size,__u8 max_header_size,struct sk_buff * skb)236 static void irda_connect_indication(void *instance, void *sap,
237 struct qos_info *qos, __u32 max_sdu_size,
238 __u8 max_header_size, struct sk_buff *skb)
239 {
240 struct irda_sock *self;
241 struct sock *sk;
242
243 self = instance;
244
245 pr_debug("%s(%p)\n", __func__, self);
246
247 sk = instance;
248 if (sk == NULL) {
249 dev_kfree_skb(skb);
250 return;
251 }
252
253 /* How much header space do we need to reserve */
254 self->max_header_size = max_header_size;
255
256 /* IrTTP max SDU size in transmit direction */
257 self->max_sdu_size_tx = max_sdu_size;
258
259 /* Find out what the largest chunk of data that we can transmit is */
260 switch (sk->sk_type) {
261 case SOCK_STREAM:
262 if (max_sdu_size != 0) {
263 net_err_ratelimited("%s: max_sdu_size must be 0\n",
264 __func__);
265 kfree_skb(skb);
266 return;
267 }
268 self->max_data_size = irttp_get_max_seg_size(self->tsap);
269 break;
270 case SOCK_SEQPACKET:
271 if (max_sdu_size == 0) {
272 net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
273 __func__);
274 kfree_skb(skb);
275 return;
276 }
277 self->max_data_size = max_sdu_size;
278 break;
279 default:
280 self->max_data_size = irttp_get_max_seg_size(self->tsap);
281 }
282
283 pr_debug("%s(), max_data_size=%d\n", __func__,
284 self->max_data_size);
285
286 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
287
288 skb_queue_tail(&sk->sk_receive_queue, skb);
289 sk->sk_state_change(sk);
290 }
291
292 /*
293 * Function irda_connect_response (handle)
294 *
295 * Accept incoming connection
296 *
297 */
irda_connect_response(struct irda_sock * self)298 static void irda_connect_response(struct irda_sock *self)
299 {
300 struct sk_buff *skb;
301
302 skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL);
303 if (skb == NULL) {
304 pr_debug("%s() Unable to allocate sk_buff!\n",
305 __func__);
306 return;
307 }
308
309 /* Reserve space for MUX_CONTROL and LAP header */
310 skb_reserve(skb, IRDA_MAX_HEADER);
311
312 irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb);
313 }
314
315 /*
316 * Function irda_flow_indication (instance, sap, flow)
317 *
318 * Used by TinyTP to tell us if it can accept more data or not
319 *
320 */
irda_flow_indication(void * instance,void * sap,LOCAL_FLOW flow)321 static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
322 {
323 struct irda_sock *self;
324 struct sock *sk;
325
326 self = instance;
327 sk = instance;
328 BUG_ON(sk == NULL);
329
330 switch (flow) {
331 case FLOW_STOP:
332 pr_debug("%s(), IrTTP wants us to slow down\n",
333 __func__);
334 self->tx_flow = flow;
335 break;
336 case FLOW_START:
337 self->tx_flow = flow;
338 pr_debug("%s(), IrTTP wants us to start again\n",
339 __func__);
340 wake_up_interruptible(sk_sleep(sk));
341 break;
342 default:
343 pr_debug("%s(), Unknown flow command!\n", __func__);
344 /* Unknown flow command, better stop */
345 self->tx_flow = flow;
346 break;
347 }
348 }
349
350 /*
351 * Function irda_getvalue_confirm (obj_id, value, priv)
352 *
353 * Got answer from remote LM-IAS, just pass object to requester...
354 *
355 * Note : duplicate from above, but we need our own version that
356 * doesn't touch the dtsap_sel and save the full value structure...
357 */
irda_getvalue_confirm(int result,__u16 obj_id,struct ias_value * value,void * priv)358 static void irda_getvalue_confirm(int result, __u16 obj_id,
359 struct ias_value *value, void *priv)
360 {
361 struct irda_sock *self;
362
363 self = priv;
364 if (!self) {
365 net_warn_ratelimited("%s: lost myself!\n", __func__);
366 return;
367 }
368
369 pr_debug("%s(%p)\n", __func__, self);
370
371 /* We probably don't need to make any more queries */
372 iriap_close(self->iriap);
373 self->iriap = NULL;
374
375 /* Check if request succeeded */
376 if (result != IAS_SUCCESS) {
377 pr_debug("%s(), IAS query failed! (%d)\n", __func__,
378 result);
379
380 self->errno = result; /* We really need it later */
381
382 /* Wake up any processes waiting for result */
383 wake_up_interruptible(&self->query_wait);
384
385 return;
386 }
387
388 /* Pass the object to the caller (so the caller must delete it) */
389 self->ias_result = value;
390 self->errno = 0;
391
392 /* Wake up any processes waiting for result */
393 wake_up_interruptible(&self->query_wait);
394 }
395
396 /*
397 * Function irda_selective_discovery_indication (discovery)
398 *
399 * Got a selective discovery indication from IrLMP.
400 *
401 * IrLMP is telling us that this node is new and matching our hint bit
402 * filter. Wake up any process waiting for answer...
403 */
irda_selective_discovery_indication(discinfo_t * discovery,DISCOVERY_MODE mode,void * priv)404 static void irda_selective_discovery_indication(discinfo_t *discovery,
405 DISCOVERY_MODE mode,
406 void *priv)
407 {
408 struct irda_sock *self;
409
410 self = priv;
411 if (!self) {
412 net_warn_ratelimited("%s: lost myself!\n", __func__);
413 return;
414 }
415
416 /* Pass parameter to the caller */
417 self->cachedaddr = discovery->daddr;
418
419 /* Wake up process if its waiting for device to be discovered */
420 wake_up_interruptible(&self->query_wait);
421 }
422
423 /*
424 * Function irda_discovery_timeout (priv)
425 *
426 * Timeout in the selective discovery process
427 *
428 * We were waiting for a node to be discovered, but nothing has come up
429 * so far. Wake up the user and tell him that we failed...
430 */
irda_discovery_timeout(u_long priv)431 static void irda_discovery_timeout(u_long priv)
432 {
433 struct irda_sock *self;
434
435 self = (struct irda_sock *) priv;
436 BUG_ON(self == NULL);
437
438 /* Nothing for the caller */
439 self->cachelog = NULL;
440 self->cachedaddr = 0;
441 self->errno = -ETIME;
442
443 /* Wake up process if its still waiting... */
444 wake_up_interruptible(&self->query_wait);
445 }
446
447 /*
448 * Function irda_open_tsap (self)
449 *
450 * Open local Transport Service Access Point (TSAP)
451 *
452 */
irda_open_tsap(struct irda_sock * self,__u8 tsap_sel,char * name)453 static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
454 {
455 notify_t notify;
456
457 if (self->tsap) {
458 pr_debug("%s: busy!\n", __func__);
459 return -EBUSY;
460 }
461
462 /* Initialize callbacks to be used by the IrDA stack */
463 irda_notify_init(¬ify);
464 notify.connect_confirm = irda_connect_confirm;
465 notify.connect_indication = irda_connect_indication;
466 notify.disconnect_indication = irda_disconnect_indication;
467 notify.data_indication = irda_data_indication;
468 notify.udata_indication = irda_data_indication;
469 notify.flow_indication = irda_flow_indication;
470 notify.instance = self;
471 strncpy(notify.name, name, NOTIFY_MAX_NAME);
472
473 self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT,
474 ¬ify);
475 if (self->tsap == NULL) {
476 pr_debug("%s(), Unable to allocate TSAP!\n",
477 __func__);
478 return -ENOMEM;
479 }
480 /* Remember which TSAP selector we actually got */
481 self->stsap_sel = self->tsap->stsap_sel;
482
483 return 0;
484 }
485
486 /*
487 * Function irda_open_lsap (self)
488 *
489 * Open local Link Service Access Point (LSAP). Used for opening Ultra
490 * sockets
491 */
492 #ifdef CONFIG_IRDA_ULTRA
irda_open_lsap(struct irda_sock * self,int pid)493 static int irda_open_lsap(struct irda_sock *self, int pid)
494 {
495 notify_t notify;
496
497 if (self->lsap) {
498 net_warn_ratelimited("%s(), busy!\n", __func__);
499 return -EBUSY;
500 }
501
502 /* Initialize callbacks to be used by the IrDA stack */
503 irda_notify_init(¬ify);
504 notify.udata_indication = irda_data_indication;
505 notify.instance = self;
506 strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME);
507
508 self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid);
509 if (self->lsap == NULL) {
510 pr_debug("%s(), Unable to allocate LSAP!\n", __func__);
511 return -ENOMEM;
512 }
513
514 return 0;
515 }
516 #endif /* CONFIG_IRDA_ULTRA */
517
518 /*
519 * Function irda_find_lsap_sel (self, name)
520 *
521 * Try to lookup LSAP selector in remote LM-IAS
522 *
523 * Basically, we start a IAP query, and then go to sleep. When the query
524 * return, irda_getvalue_confirm will wake us up, and we can examine the
525 * result of the query...
526 * Note that in some case, the query fail even before we go to sleep,
527 * creating some races...
528 */
irda_find_lsap_sel(struct irda_sock * self,char * name)529 static int irda_find_lsap_sel(struct irda_sock *self, char *name)
530 {
531 pr_debug("%s(%p, %s)\n", __func__, self, name);
532
533 if (self->iriap) {
534 net_warn_ratelimited("%s(): busy with a previous query\n",
535 __func__);
536 return -EBUSY;
537 }
538
539 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
540 irda_getvalue_confirm);
541 if(self->iriap == NULL)
542 return -ENOMEM;
543
544 /* Treat unexpected wakeup as disconnect */
545 self->errno = -EHOSTUNREACH;
546
547 /* Query remote LM-IAS */
548 iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr,
549 name, "IrDA:TinyTP:LsapSel");
550
551 /* Wait for answer, if not yet finished (or failed) */
552 if (wait_event_interruptible(self->query_wait, (self->iriap==NULL)))
553 /* Treat signals as disconnect */
554 return -EHOSTUNREACH;
555
556 /* Check what happened */
557 if (self->errno)
558 {
559 /* Requested object/attribute doesn't exist */
560 if((self->errno == IAS_CLASS_UNKNOWN) ||
561 (self->errno == IAS_ATTRIB_UNKNOWN))
562 return -EADDRNOTAVAIL;
563 else
564 return -EHOSTUNREACH;
565 }
566
567 /* Get the remote TSAP selector */
568 switch (self->ias_result->type) {
569 case IAS_INTEGER:
570 pr_debug("%s() int=%d\n",
571 __func__, self->ias_result->t.integer);
572
573 if (self->ias_result->t.integer != -1)
574 self->dtsap_sel = self->ias_result->t.integer;
575 else
576 self->dtsap_sel = 0;
577 break;
578 default:
579 self->dtsap_sel = 0;
580 pr_debug("%s(), bad type!\n", __func__);
581 break;
582 }
583 if (self->ias_result)
584 irias_delete_value(self->ias_result);
585
586 if (self->dtsap_sel)
587 return 0;
588
589 return -EADDRNOTAVAIL;
590 }
591
592 /*
593 * Function irda_discover_daddr_and_lsap_sel (self, name)
594 *
595 * This try to find a device with the requested service.
596 *
597 * It basically look into the discovery log. For each address in the list,
598 * it queries the LM-IAS of the device to find if this device offer
599 * the requested service.
600 * If there is more than one node supporting the service, we complain
601 * to the user (it should move devices around).
602 * The, we set both the destination address and the lsap selector to point
603 * on the service on the unique device we have found.
604 *
605 * Note : this function fails if there is more than one device in range,
606 * because IrLMP doesn't disconnect the LAP when the last LSAP is closed.
607 * Moreover, we would need to wait the LAP disconnection...
608 */
irda_discover_daddr_and_lsap_sel(struct irda_sock * self,char * name)609 static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
610 {
611 discinfo_t *discoveries; /* Copy of the discovery log */
612 int number; /* Number of nodes in the log */
613 int i;
614 int err = -ENETUNREACH;
615 __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */
616 __u8 dtsap_sel = 0x0; /* TSAP associated with it */
617
618 pr_debug("%s(), name=%s\n", __func__, name);
619
620 /* Ask lmp for the current discovery log
621 * Note : we have to use irlmp_get_discoveries(), as opposed
622 * to play with the cachelog directly, because while we are
623 * making our ias query, le log might change... */
624 discoveries = irlmp_get_discoveries(&number, self->mask.word,
625 self->nslots);
626 /* Check if the we got some results */
627 if (discoveries == NULL)
628 return -ENETUNREACH; /* No nodes discovered */
629
630 /*
631 * Now, check all discovered devices (if any), and connect
632 * client only about the services that the client is
633 * interested in...
634 */
635 for(i = 0; i < number; i++) {
636 /* Try the address in the log */
637 self->daddr = discoveries[i].daddr;
638 self->saddr = 0x0;
639 pr_debug("%s(), trying daddr = %08x\n",
640 __func__, self->daddr);
641
642 /* Query remote LM-IAS for this service */
643 err = irda_find_lsap_sel(self, name);
644 switch (err) {
645 case 0:
646 /* We found the requested service */
647 if(daddr != DEV_ADDR_ANY) {
648 pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n",
649 __func__, name);
650 self->daddr = DEV_ADDR_ANY;
651 kfree(discoveries);
652 return -ENOTUNIQ;
653 }
654 /* First time we found that one, save it ! */
655 daddr = self->daddr;
656 dtsap_sel = self->dtsap_sel;
657 break;
658 case -EADDRNOTAVAIL:
659 /* Requested service simply doesn't exist on this node */
660 break;
661 default:
662 /* Something bad did happen :-( */
663 pr_debug("%s(), unexpected IAS query failure\n",
664 __func__);
665 self->daddr = DEV_ADDR_ANY;
666 kfree(discoveries);
667 return -EHOSTUNREACH;
668 }
669 }
670 /* Cleanup our copy of the discovery log */
671 kfree(discoveries);
672
673 /* Check out what we found */
674 if(daddr == DEV_ADDR_ANY) {
675 pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n",
676 __func__, name);
677 self->daddr = DEV_ADDR_ANY;
678 return -EADDRNOTAVAIL;
679 }
680
681 /* Revert back to discovered device & service */
682 self->daddr = daddr;
683 self->saddr = 0x0;
684 self->dtsap_sel = dtsap_sel;
685
686 pr_debug("%s(), discovered requested service ''%s'' at address %08x\n",
687 __func__, name, self->daddr);
688
689 return 0;
690 }
691
692 /*
693 * Function irda_getname (sock, uaddr, uaddr_len, peer)
694 *
695 * Return the our own, or peers socket address (sockaddr_irda)
696 *
697 */
irda_getname(struct socket * sock,struct sockaddr * uaddr,int * uaddr_len,int peer)698 static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
699 int *uaddr_len, int peer)
700 {
701 struct sockaddr_irda saddr;
702 struct sock *sk = sock->sk;
703 struct irda_sock *self = irda_sk(sk);
704
705 memset(&saddr, 0, sizeof(saddr));
706 if (peer) {
707 if (sk->sk_state != TCP_ESTABLISHED)
708 return -ENOTCONN;
709
710 saddr.sir_family = AF_IRDA;
711 saddr.sir_lsap_sel = self->dtsap_sel;
712 saddr.sir_addr = self->daddr;
713 } else {
714 saddr.sir_family = AF_IRDA;
715 saddr.sir_lsap_sel = self->stsap_sel;
716 saddr.sir_addr = self->saddr;
717 }
718
719 pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
720 pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr);
721
722 /* uaddr_len come to us uninitialised */
723 *uaddr_len = sizeof (struct sockaddr_irda);
724 memcpy(uaddr, &saddr, *uaddr_len);
725
726 return 0;
727 }
728
729 /*
730 * Function irda_listen (sock, backlog)
731 *
732 * Just move to the listen state
733 *
734 */
irda_listen(struct socket * sock,int backlog)735 static int irda_listen(struct socket *sock, int backlog)
736 {
737 struct sock *sk = sock->sk;
738 int err = -EOPNOTSUPP;
739
740 lock_sock(sk);
741
742 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
743 (sk->sk_type != SOCK_DGRAM))
744 goto out;
745
746 if (sk->sk_state != TCP_LISTEN) {
747 sk->sk_max_ack_backlog = backlog;
748 sk->sk_state = TCP_LISTEN;
749
750 err = 0;
751 }
752 out:
753 release_sock(sk);
754
755 return err;
756 }
757
758 /*
759 * Function irda_bind (sock, uaddr, addr_len)
760 *
761 * Used by servers to register their well known TSAP
762 *
763 */
irda_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)764 static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
765 {
766 struct sock *sk = sock->sk;
767 struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
768 struct irda_sock *self = irda_sk(sk);
769 int err;
770
771 pr_debug("%s(%p)\n", __func__, self);
772
773 if (addr_len != sizeof(struct sockaddr_irda))
774 return -EINVAL;
775
776 lock_sock(sk);
777 #ifdef CONFIG_IRDA_ULTRA
778 /* Special care for Ultra sockets */
779 if ((sk->sk_type == SOCK_DGRAM) &&
780 (sk->sk_protocol == IRDAPROTO_ULTRA)) {
781 self->pid = addr->sir_lsap_sel;
782 err = -EOPNOTSUPP;
783 if (self->pid & 0x80) {
784 pr_debug("%s(), extension in PID not supp!\n",
785 __func__);
786 goto out;
787 }
788 err = irda_open_lsap(self, self->pid);
789 if (err < 0)
790 goto out;
791
792 /* Pretend we are connected */
793 sock->state = SS_CONNECTED;
794 sk->sk_state = TCP_ESTABLISHED;
795 err = 0;
796
797 goto out;
798 }
799 #endif /* CONFIG_IRDA_ULTRA */
800
801 self->ias_obj = irias_new_object(addr->sir_name, jiffies);
802 err = -ENOMEM;
803 if (self->ias_obj == NULL)
804 goto out;
805
806 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
807 if (err < 0) {
808 irias_delete_object(self->ias_obj);
809 self->ias_obj = NULL;
810 goto out;
811 }
812
813 /* Register with LM-IAS */
814 irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel",
815 self->stsap_sel, IAS_KERNEL_ATTR);
816 irias_insert_object(self->ias_obj);
817
818 err = 0;
819 out:
820 release_sock(sk);
821 return err;
822 }
823
824 /*
825 * Function irda_accept (sock, newsock, flags)
826 *
827 * Wait for incoming connection
828 *
829 */
irda_accept(struct socket * sock,struct socket * newsock,int flags)830 static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
831 {
832 struct sock *sk = sock->sk;
833 struct irda_sock *new, *self = irda_sk(sk);
834 struct sock *newsk;
835 struct sk_buff *skb = NULL;
836 int err;
837
838 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
839 if (err)
840 return err;
841
842 err = -EINVAL;
843
844 lock_sock(sk);
845 if (sock->state != SS_UNCONNECTED)
846 goto out;
847
848 err = -EOPNOTSUPP;
849 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
850 (sk->sk_type != SOCK_DGRAM))
851 goto out;
852
853 err = -EINVAL;
854 if (sk->sk_state != TCP_LISTEN)
855 goto out;
856
857 /*
858 * The read queue this time is holding sockets ready to use
859 * hooked into the SABM we saved
860 */
861
862 /*
863 * We can perform the accept only if there is incoming data
864 * on the listening socket.
865 * So, we will block the caller until we receive any data.
866 * If the caller was waiting on select() or poll() before
867 * calling us, the data is waiting for us ;-)
868 * Jean II
869 */
870 while (1) {
871 skb = skb_dequeue(&sk->sk_receive_queue);
872 if (skb)
873 break;
874
875 /* Non blocking operation */
876 err = -EWOULDBLOCK;
877 if (flags & O_NONBLOCK)
878 goto out;
879
880 err = wait_event_interruptible(*(sk_sleep(sk)),
881 skb_peek(&sk->sk_receive_queue));
882 if (err)
883 goto out;
884 }
885
886 newsk = newsock->sk;
887 err = -EIO;
888 if (newsk == NULL)
889 goto out;
890
891 newsk->sk_state = TCP_ESTABLISHED;
892
893 new = irda_sk(newsk);
894
895 /* Now attach up the new socket */
896 new->tsap = irttp_dup(self->tsap, new);
897 err = -EPERM; /* value does not seem to make sense. -arnd */
898 if (!new->tsap) {
899 pr_debug("%s(), dup failed!\n", __func__);
900 goto out;
901 }
902
903 new->stsap_sel = new->tsap->stsap_sel;
904 new->dtsap_sel = new->tsap->dtsap_sel;
905 new->saddr = irttp_get_saddr(new->tsap);
906 new->daddr = irttp_get_daddr(new->tsap);
907
908 new->max_sdu_size_tx = self->max_sdu_size_tx;
909 new->max_sdu_size_rx = self->max_sdu_size_rx;
910 new->max_data_size = self->max_data_size;
911 new->max_header_size = self->max_header_size;
912
913 memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info));
914
915 /* Clean up the original one to keep it in listen state */
916 irttp_listen(self->tsap);
917
918 sk->sk_ack_backlog--;
919
920 newsock->state = SS_CONNECTED;
921
922 irda_connect_response(new);
923 err = 0;
924 out:
925 kfree_skb(skb);
926 release_sock(sk);
927 return err;
928 }
929
930 /*
931 * Function irda_connect (sock, uaddr, addr_len, flags)
932 *
933 * Connect to a IrDA device
934 *
935 * The main difference with a "standard" connect is that with IrDA we need
936 * to resolve the service name into a TSAP selector (in TCP, port number
937 * doesn't have to be resolved).
938 * Because of this service name resolution, we can offer "auto-connect",
939 * where we connect to a service without specifying a destination address.
940 *
941 * Note : by consulting "errno", the user space caller may learn the cause
942 * of the failure. Most of them are visible in the function, others may come
943 * from subroutines called and are listed here :
944 * o EBUSY : already processing a connect
945 * o EHOSTUNREACH : bad addr->sir_addr argument
946 * o EADDRNOTAVAIL : bad addr->sir_name argument
947 * o ENOTUNIQ : more than one node has addr->sir_name (auto-connect)
948 * o ENETUNREACH : no node found on the network (auto-connect)
949 */
irda_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)950 static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
951 int addr_len, int flags)
952 {
953 struct sock *sk = sock->sk;
954 struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
955 struct irda_sock *self = irda_sk(sk);
956 int err;
957
958 pr_debug("%s(%p)\n", __func__, self);
959
960 lock_sock(sk);
961 /* Don't allow connect for Ultra sockets */
962 err = -ESOCKTNOSUPPORT;
963 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
964 goto out;
965
966 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
967 sock->state = SS_CONNECTED;
968 err = 0;
969 goto out; /* Connect completed during a ERESTARTSYS event */
970 }
971
972 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
973 sock->state = SS_UNCONNECTED;
974 err = -ECONNREFUSED;
975 goto out;
976 }
977
978 err = -EISCONN; /* No reconnect on a seqpacket socket */
979 if (sk->sk_state == TCP_ESTABLISHED)
980 goto out;
981
982 sk->sk_state = TCP_CLOSE;
983 sock->state = SS_UNCONNECTED;
984
985 err = -EINVAL;
986 if (addr_len != sizeof(struct sockaddr_irda))
987 goto out;
988
989 /* Check if user supplied any destination device address */
990 if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) {
991 /* Try to find one suitable */
992 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
993 if (err) {
994 pr_debug("%s(), auto-connect failed!\n", __func__);
995 goto out;
996 }
997 } else {
998 /* Use the one provided by the user */
999 self->daddr = addr->sir_addr;
1000 pr_debug("%s(), daddr = %08x\n", __func__, self->daddr);
1001
1002 /* If we don't have a valid service name, we assume the
1003 * user want to connect on a specific LSAP. Prevent
1004 * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */
1005 if((addr->sir_name[0] != '\0') ||
1006 (addr->sir_lsap_sel >= 0x70)) {
1007 /* Query remote LM-IAS using service name */
1008 err = irda_find_lsap_sel(self, addr->sir_name);
1009 if (err) {
1010 pr_debug("%s(), connect failed!\n", __func__);
1011 goto out;
1012 }
1013 } else {
1014 /* Directly connect to the remote LSAP
1015 * specified by the sir_lsap field.
1016 * Please use with caution, in IrDA LSAPs are
1017 * dynamic and there is no "well-known" LSAP. */
1018 self->dtsap_sel = addr->sir_lsap_sel;
1019 }
1020 }
1021
1022 /* Check if we have opened a local TSAP */
1023 if (!self->tsap) {
1024 err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
1025 if (err)
1026 goto out;
1027 }
1028
1029 /* Move to connecting socket, start sending Connect Requests */
1030 sock->state = SS_CONNECTING;
1031 sk->sk_state = TCP_SYN_SENT;
1032
1033 /* Connect to remote device */
1034 err = irttp_connect_request(self->tsap, self->dtsap_sel,
1035 self->saddr, self->daddr, NULL,
1036 self->max_sdu_size_rx, NULL);
1037 if (err) {
1038 pr_debug("%s(), connect failed!\n", __func__);
1039 goto out;
1040 }
1041
1042 /* Now the loop */
1043 err = -EINPROGRESS;
1044 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
1045 goto out;
1046
1047 err = -ERESTARTSYS;
1048 if (wait_event_interruptible(*(sk_sleep(sk)),
1049 (sk->sk_state != TCP_SYN_SENT)))
1050 goto out;
1051
1052 if (sk->sk_state != TCP_ESTABLISHED) {
1053 sock->state = SS_UNCONNECTED;
1054 err = sock_error(sk);
1055 if (!err)
1056 err = -ECONNRESET;
1057 goto out;
1058 }
1059
1060 sock->state = SS_CONNECTED;
1061
1062 /* At this point, IrLMP has assigned our source address */
1063 self->saddr = irttp_get_saddr(self->tsap);
1064 err = 0;
1065 out:
1066 release_sock(sk);
1067 return err;
1068 }
1069
1070 static struct proto irda_proto = {
1071 .name = "IRDA",
1072 .owner = THIS_MODULE,
1073 .obj_size = sizeof(struct irda_sock),
1074 };
1075
1076 /*
1077 * Function irda_create (sock, protocol)
1078 *
1079 * Create IrDA socket
1080 *
1081 */
irda_create(struct net * net,struct socket * sock,int protocol,int kern)1082 static int irda_create(struct net *net, struct socket *sock, int protocol,
1083 int kern)
1084 {
1085 struct sock *sk;
1086 struct irda_sock *self;
1087
1088 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1089 return -EINVAL;
1090
1091 if (net != &init_net)
1092 return -EAFNOSUPPORT;
1093
1094 /* Check for valid socket type */
1095 switch (sock->type) {
1096 case SOCK_STREAM: /* For TTP connections with SAR disabled */
1097 case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */
1098 case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */
1099 break;
1100 default:
1101 return -ESOCKTNOSUPPORT;
1102 }
1103
1104 /* Allocate networking socket */
1105 sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern);
1106 if (sk == NULL)
1107 return -ENOMEM;
1108
1109 self = irda_sk(sk);
1110 pr_debug("%s() : self is %p\n", __func__, self);
1111
1112 init_waitqueue_head(&self->query_wait);
1113
1114 switch (sock->type) {
1115 case SOCK_STREAM:
1116 sock->ops = &irda_stream_ops;
1117 self->max_sdu_size_rx = TTP_SAR_DISABLE;
1118 break;
1119 case SOCK_SEQPACKET:
1120 sock->ops = &irda_seqpacket_ops;
1121 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1122 break;
1123 case SOCK_DGRAM:
1124 switch (protocol) {
1125 #ifdef CONFIG_IRDA_ULTRA
1126 case IRDAPROTO_ULTRA:
1127 sock->ops = &irda_ultra_ops;
1128 /* Initialise now, because we may send on unbound
1129 * sockets. Jean II */
1130 self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER;
1131 self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER;
1132 break;
1133 #endif /* CONFIG_IRDA_ULTRA */
1134 case IRDAPROTO_UNITDATA:
1135 sock->ops = &irda_dgram_ops;
1136 /* We let Unitdata conn. be like seqpack conn. */
1137 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1138 break;
1139 default:
1140 sk_free(sk);
1141 return -ESOCKTNOSUPPORT;
1142 }
1143 break;
1144 default:
1145 sk_free(sk);
1146 return -ESOCKTNOSUPPORT;
1147 }
1148
1149 /* Initialise networking socket struct */
1150 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
1151 sk->sk_family = PF_IRDA;
1152 sk->sk_protocol = protocol;
1153
1154 /* Register as a client with IrLMP */
1155 self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
1156 self->mask.word = 0xffff;
1157 self->rx_flow = self->tx_flow = FLOW_START;
1158 self->nslots = DISCOVERY_DEFAULT_SLOTS;
1159 self->daddr = DEV_ADDR_ANY; /* Until we get connected */
1160 self->saddr = 0x0; /* so IrLMP assign us any link */
1161 return 0;
1162 }
1163
1164 /*
1165 * Function irda_destroy_socket (self)
1166 *
1167 * Destroy socket
1168 *
1169 */
irda_destroy_socket(struct irda_sock * self)1170 static void irda_destroy_socket(struct irda_sock *self)
1171 {
1172 pr_debug("%s(%p)\n", __func__, self);
1173
1174 /* Unregister with IrLMP */
1175 irlmp_unregister_client(self->ckey);
1176 irlmp_unregister_service(self->skey);
1177
1178 /* Unregister with LM-IAS */
1179 if (self->ias_obj) {
1180 irias_delete_object(self->ias_obj);
1181 self->ias_obj = NULL;
1182 }
1183
1184 if (self->iriap) {
1185 iriap_close(self->iriap);
1186 self->iriap = NULL;
1187 }
1188
1189 if (self->tsap) {
1190 irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
1191 irttp_close_tsap(self->tsap);
1192 self->tsap = NULL;
1193 }
1194 #ifdef CONFIG_IRDA_ULTRA
1195 if (self->lsap) {
1196 irlmp_close_lsap(self->lsap);
1197 self->lsap = NULL;
1198 }
1199 #endif /* CONFIG_IRDA_ULTRA */
1200 }
1201
1202 /*
1203 * Function irda_release (sock)
1204 */
irda_release(struct socket * sock)1205 static int irda_release(struct socket *sock)
1206 {
1207 struct sock *sk = sock->sk;
1208
1209 if (sk == NULL)
1210 return 0;
1211
1212 lock_sock(sk);
1213 sk->sk_state = TCP_CLOSE;
1214 sk->sk_shutdown |= SEND_SHUTDOWN;
1215 sk->sk_state_change(sk);
1216
1217 /* Destroy IrDA socket */
1218 irda_destroy_socket(irda_sk(sk));
1219
1220 sock_orphan(sk);
1221 sock->sk = NULL;
1222 release_sock(sk);
1223
1224 /* Purge queues (see sock_init_data()) */
1225 skb_queue_purge(&sk->sk_receive_queue);
1226
1227 /* Destroy networking socket if we are the last reference on it,
1228 * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
1229 sock_put(sk);
1230
1231 /* Notes on socket locking and deallocation... - Jean II
1232 * In theory we should put pairs of sock_hold() / sock_put() to
1233 * prevent the socket to be destroyed whenever there is an
1234 * outstanding request or outstanding incoming packet or event.
1235 *
1236 * 1) This may include IAS request, both in connect and getsockopt.
1237 * Unfortunately, the situation is a bit more messy than it looks,
1238 * because we close iriap and kfree(self) above.
1239 *
1240 * 2) This may include selective discovery in getsockopt.
1241 * Same stuff as above, irlmp registration and self are gone.
1242 *
1243 * Probably 1 and 2 may not matter, because it's all triggered
1244 * by a process and the socket layer already prevent the
1245 * socket to go away while a process is holding it, through
1246 * sockfd_put() and fput()...
1247 *
1248 * 3) This may include deferred TSAP closure. In particular,
1249 * we may receive a late irda_disconnect_indication()
1250 * Fortunately, (tsap_cb *)->close_pend should protect us
1251 * from that.
1252 *
1253 * I did some testing on SMP, and it looks solid. And the socket
1254 * memory leak is now gone... - Jean II
1255 */
1256
1257 return 0;
1258 }
1259
1260 /*
1261 * Function irda_sendmsg (sock, msg, len)
1262 *
1263 * Send message down to TinyTP. This function is used for both STREAM and
1264 * SEQPACK services. This is possible since it forces the client to
1265 * fragment the message if necessary
1266 */
irda_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1267 static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1268 {
1269 struct sock *sk = sock->sk;
1270 struct irda_sock *self;
1271 struct sk_buff *skb;
1272 int err = -EPIPE;
1273
1274 pr_debug("%s(), len=%zd\n", __func__, len);
1275
1276 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1277 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1278 MSG_NOSIGNAL)) {
1279 return -EINVAL;
1280 }
1281
1282 lock_sock(sk);
1283
1284 if (sk->sk_shutdown & SEND_SHUTDOWN)
1285 goto out_err;
1286
1287 if (sk->sk_state != TCP_ESTABLISHED) {
1288 err = -ENOTCONN;
1289 goto out;
1290 }
1291
1292 self = irda_sk(sk);
1293
1294 /* Check if IrTTP is wants us to slow down */
1295
1296 if (wait_event_interruptible(*(sk_sleep(sk)),
1297 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
1298 err = -ERESTARTSYS;
1299 goto out;
1300 }
1301
1302 /* Check if we are still connected */
1303 if (sk->sk_state != TCP_ESTABLISHED) {
1304 err = -ENOTCONN;
1305 goto out;
1306 }
1307
1308 /* Check that we don't send out too big frames */
1309 if (len > self->max_data_size) {
1310 pr_debug("%s(), Chopping frame from %zd to %d bytes!\n",
1311 __func__, len, self->max_data_size);
1312 len = self->max_data_size;
1313 }
1314
1315 skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16,
1316 msg->msg_flags & MSG_DONTWAIT, &err);
1317 if (!skb)
1318 goto out_err;
1319
1320 skb_reserve(skb, self->max_header_size + 16);
1321 skb_reset_transport_header(skb);
1322 skb_put(skb, len);
1323 err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1324 if (err) {
1325 kfree_skb(skb);
1326 goto out_err;
1327 }
1328
1329 /*
1330 * Just send the message to TinyTP, and let it deal with possible
1331 * errors. No need to duplicate all that here
1332 */
1333 err = irttp_data_request(self->tsap, skb);
1334 if (err) {
1335 pr_debug("%s(), err=%d\n", __func__, err);
1336 goto out_err;
1337 }
1338
1339 release_sock(sk);
1340 /* Tell client how much data we actually sent */
1341 return len;
1342
1343 out_err:
1344 err = sk_stream_error(sk, msg->msg_flags, err);
1345 out:
1346 release_sock(sk);
1347 return err;
1348
1349 }
1350
1351 /*
1352 * Function irda_recvmsg_dgram (sock, msg, size, flags)
1353 *
1354 * Try to receive message and copy it to user. The frame is discarded
1355 * after being read, regardless of how much the user actually read
1356 */
irda_recvmsg_dgram(struct socket * sock,struct msghdr * msg,size_t size,int flags)1357 static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
1358 size_t size, int flags)
1359 {
1360 struct sock *sk = sock->sk;
1361 struct irda_sock *self = irda_sk(sk);
1362 struct sk_buff *skb;
1363 size_t copied;
1364 int err;
1365
1366 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1367 flags & MSG_DONTWAIT, &err);
1368 if (!skb)
1369 return err;
1370
1371 skb_reset_transport_header(skb);
1372 copied = skb->len;
1373
1374 if (copied > size) {
1375 pr_debug("%s(), Received truncated frame (%zd < %zd)!\n",
1376 __func__, copied, size);
1377 copied = size;
1378 msg->msg_flags |= MSG_TRUNC;
1379 }
1380 skb_copy_datagram_msg(skb, 0, msg, copied);
1381
1382 skb_free_datagram(sk, skb);
1383
1384 /*
1385 * Check if we have previously stopped IrTTP and we know
1386 * have more free space in our rx_queue. If so tell IrTTP
1387 * to start delivering frames again before our rx_queue gets
1388 * empty
1389 */
1390 if (self->rx_flow == FLOW_STOP) {
1391 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1392 pr_debug("%s(), Starting IrTTP\n", __func__);
1393 self->rx_flow = FLOW_START;
1394 irttp_flow_request(self->tsap, FLOW_START);
1395 }
1396 }
1397
1398 return copied;
1399 }
1400
1401 /*
1402 * Function irda_recvmsg_stream (sock, msg, size, flags)
1403 */
irda_recvmsg_stream(struct socket * sock,struct msghdr * msg,size_t size,int flags)1404 static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
1405 size_t size, int flags)
1406 {
1407 struct sock *sk = sock->sk;
1408 struct irda_sock *self = irda_sk(sk);
1409 int noblock = flags & MSG_DONTWAIT;
1410 size_t copied = 0;
1411 int target, err;
1412 long timeo;
1413
1414 if ((err = sock_error(sk)) < 0)
1415 return err;
1416
1417 if (sock->flags & __SO_ACCEPTCON)
1418 return -EINVAL;
1419
1420 err =-EOPNOTSUPP;
1421 if (flags & MSG_OOB)
1422 return -EOPNOTSUPP;
1423
1424 err = 0;
1425 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
1426 timeo = sock_rcvtimeo(sk, noblock);
1427
1428 do {
1429 int chunk;
1430 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1431
1432 if (skb == NULL) {
1433 DEFINE_WAIT(wait);
1434 err = 0;
1435
1436 if (copied >= target)
1437 break;
1438
1439 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1440
1441 /*
1442 * POSIX 1003.1g mandates this order.
1443 */
1444 err = sock_error(sk);
1445 if (err)
1446 ;
1447 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1448 ;
1449 else if (noblock)
1450 err = -EAGAIN;
1451 else if (signal_pending(current))
1452 err = sock_intr_errno(timeo);
1453 else if (sk->sk_state != TCP_ESTABLISHED)
1454 err = -ENOTCONN;
1455 else if (skb_peek(&sk->sk_receive_queue) == NULL)
1456 /* Wait process until data arrives */
1457 schedule();
1458
1459 finish_wait(sk_sleep(sk), &wait);
1460
1461 if (err)
1462 return err;
1463 if (sk->sk_shutdown & RCV_SHUTDOWN)
1464 break;
1465
1466 continue;
1467 }
1468
1469 chunk = min_t(unsigned int, skb->len, size);
1470 if (memcpy_to_msg(msg, skb->data, chunk)) {
1471 skb_queue_head(&sk->sk_receive_queue, skb);
1472 if (copied == 0)
1473 copied = -EFAULT;
1474 break;
1475 }
1476 copied += chunk;
1477 size -= chunk;
1478
1479 /* Mark read part of skb as used */
1480 if (!(flags & MSG_PEEK)) {
1481 skb_pull(skb, chunk);
1482
1483 /* put the skb back if we didn't use it up.. */
1484 if (skb->len) {
1485 pr_debug("%s(), back on q!\n",
1486 __func__);
1487 skb_queue_head(&sk->sk_receive_queue, skb);
1488 break;
1489 }
1490
1491 kfree_skb(skb);
1492 } else {
1493 pr_debug("%s() questionable!?\n", __func__);
1494
1495 /* put message back and return */
1496 skb_queue_head(&sk->sk_receive_queue, skb);
1497 break;
1498 }
1499 } while (size);
1500
1501 /*
1502 * Check if we have previously stopped IrTTP and we know
1503 * have more free space in our rx_queue. If so tell IrTTP
1504 * to start delivering frames again before our rx_queue gets
1505 * empty
1506 */
1507 if (self->rx_flow == FLOW_STOP) {
1508 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1509 pr_debug("%s(), Starting IrTTP\n", __func__);
1510 self->rx_flow = FLOW_START;
1511 irttp_flow_request(self->tsap, FLOW_START);
1512 }
1513 }
1514
1515 return copied;
1516 }
1517
1518 /*
1519 * Function irda_sendmsg_dgram (sock, msg, len)
1520 *
1521 * Send message down to TinyTP for the unreliable sequenced
1522 * packet service...
1523 *
1524 */
irda_sendmsg_dgram(struct socket * sock,struct msghdr * msg,size_t len)1525 static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg,
1526 size_t len)
1527 {
1528 struct sock *sk = sock->sk;
1529 struct irda_sock *self;
1530 struct sk_buff *skb;
1531 int err;
1532
1533 pr_debug("%s(), len=%zd\n", __func__, len);
1534
1535 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1536 return -EINVAL;
1537
1538 lock_sock(sk);
1539
1540 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1541 send_sig(SIGPIPE, current, 0);
1542 err = -EPIPE;
1543 goto out;
1544 }
1545
1546 err = -ENOTCONN;
1547 if (sk->sk_state != TCP_ESTABLISHED)
1548 goto out;
1549
1550 self = irda_sk(sk);
1551
1552 /*
1553 * Check that we don't send out too big frames. This is an unreliable
1554 * service, so we have no fragmentation and no coalescence
1555 */
1556 if (len > self->max_data_size) {
1557 pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
1558 __func__, len, self->max_data_size);
1559 len = self->max_data_size;
1560 }
1561
1562 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1563 msg->msg_flags & MSG_DONTWAIT, &err);
1564 err = -ENOBUFS;
1565 if (!skb)
1566 goto out;
1567
1568 skb_reserve(skb, self->max_header_size);
1569 skb_reset_transport_header(skb);
1570
1571 pr_debug("%s(), appending user data\n", __func__);
1572 skb_put(skb, len);
1573 err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1574 if (err) {
1575 kfree_skb(skb);
1576 goto out;
1577 }
1578
1579 /*
1580 * Just send the message to TinyTP, and let it deal with possible
1581 * errors. No need to duplicate all that here
1582 */
1583 err = irttp_udata_request(self->tsap, skb);
1584 if (err) {
1585 pr_debug("%s(), err=%d\n", __func__, err);
1586 goto out;
1587 }
1588
1589 release_sock(sk);
1590 return len;
1591
1592 out:
1593 release_sock(sk);
1594 return err;
1595 }
1596
1597 /*
1598 * Function irda_sendmsg_ultra (sock, msg, len)
1599 *
1600 * Send message down to IrLMP for the unreliable Ultra
1601 * packet service...
1602 */
1603 #ifdef CONFIG_IRDA_ULTRA
irda_sendmsg_ultra(struct socket * sock,struct msghdr * msg,size_t len)1604 static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg,
1605 size_t len)
1606 {
1607 struct sock *sk = sock->sk;
1608 struct irda_sock *self;
1609 __u8 pid = 0;
1610 int bound = 0;
1611 struct sk_buff *skb;
1612 int err;
1613
1614 pr_debug("%s(), len=%zd\n", __func__, len);
1615
1616 err = -EINVAL;
1617 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1618 return -EINVAL;
1619
1620 lock_sock(sk);
1621
1622 err = -EPIPE;
1623 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1624 send_sig(SIGPIPE, current, 0);
1625 goto out;
1626 }
1627
1628 self = irda_sk(sk);
1629
1630 /* Check if an address was specified with sendto. Jean II */
1631 if (msg->msg_name) {
1632 DECLARE_SOCKADDR(struct sockaddr_irda *, addr, msg->msg_name);
1633 err = -EINVAL;
1634 /* Check address, extract pid. Jean II */
1635 if (msg->msg_namelen < sizeof(*addr))
1636 goto out;
1637 if (addr->sir_family != AF_IRDA)
1638 goto out;
1639
1640 pid = addr->sir_lsap_sel;
1641 if (pid & 0x80) {
1642 pr_debug("%s(), extension in PID not supp!\n",
1643 __func__);
1644 err = -EOPNOTSUPP;
1645 goto out;
1646 }
1647 } else {
1648 /* Check that the socket is properly bound to an Ultra
1649 * port. Jean II */
1650 if ((self->lsap == NULL) ||
1651 (sk->sk_state != TCP_ESTABLISHED)) {
1652 pr_debug("%s(), socket not bound to Ultra PID.\n",
1653 __func__);
1654 err = -ENOTCONN;
1655 goto out;
1656 }
1657 /* Use PID from socket */
1658 bound = 1;
1659 }
1660
1661 /*
1662 * Check that we don't send out too big frames. This is an unreliable
1663 * service, so we have no fragmentation and no coalescence
1664 */
1665 if (len > self->max_data_size) {
1666 pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
1667 __func__, len, self->max_data_size);
1668 len = self->max_data_size;
1669 }
1670
1671 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1672 msg->msg_flags & MSG_DONTWAIT, &err);
1673 err = -ENOBUFS;
1674 if (!skb)
1675 goto out;
1676
1677 skb_reserve(skb, self->max_header_size);
1678 skb_reset_transport_header(skb);
1679
1680 pr_debug("%s(), appending user data\n", __func__);
1681 skb_put(skb, len);
1682 err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1683 if (err) {
1684 kfree_skb(skb);
1685 goto out;
1686 }
1687
1688 err = irlmp_connless_data_request((bound ? self->lsap : NULL),
1689 skb, pid);
1690 if (err)
1691 pr_debug("%s(), err=%d\n", __func__, err);
1692 out:
1693 release_sock(sk);
1694 return err ? : len;
1695 }
1696 #endif /* CONFIG_IRDA_ULTRA */
1697
1698 /*
1699 * Function irda_shutdown (sk, how)
1700 */
irda_shutdown(struct socket * sock,int how)1701 static int irda_shutdown(struct socket *sock, int how)
1702 {
1703 struct sock *sk = sock->sk;
1704 struct irda_sock *self = irda_sk(sk);
1705
1706 pr_debug("%s(%p)\n", __func__, self);
1707
1708 lock_sock(sk);
1709
1710 sk->sk_state = TCP_CLOSE;
1711 sk->sk_shutdown |= SEND_SHUTDOWN;
1712 sk->sk_state_change(sk);
1713
1714 if (self->iriap) {
1715 iriap_close(self->iriap);
1716 self->iriap = NULL;
1717 }
1718
1719 if (self->tsap) {
1720 irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
1721 irttp_close_tsap(self->tsap);
1722 self->tsap = NULL;
1723 }
1724
1725 /* A few cleanup so the socket look as good as new... */
1726 self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */
1727 self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */
1728 self->saddr = 0x0; /* so IrLMP assign us any link */
1729
1730 release_sock(sk);
1731
1732 return 0;
1733 }
1734
1735 /*
1736 * Function irda_poll (file, sock, wait)
1737 */
irda_poll(struct file * file,struct socket * sock,poll_table * wait)1738 static unsigned int irda_poll(struct file * file, struct socket *sock,
1739 poll_table *wait)
1740 {
1741 struct sock *sk = sock->sk;
1742 struct irda_sock *self = irda_sk(sk);
1743 unsigned int mask;
1744
1745 poll_wait(file, sk_sleep(sk), wait);
1746 mask = 0;
1747
1748 /* Exceptional events? */
1749 if (sk->sk_err)
1750 mask |= POLLERR;
1751 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1752 pr_debug("%s(), POLLHUP\n", __func__);
1753 mask |= POLLHUP;
1754 }
1755
1756 /* Readable? */
1757 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1758 pr_debug("Socket is readable\n");
1759 mask |= POLLIN | POLLRDNORM;
1760 }
1761
1762 /* Connection-based need to check for termination and startup */
1763 switch (sk->sk_type) {
1764 case SOCK_STREAM:
1765 if (sk->sk_state == TCP_CLOSE) {
1766 pr_debug("%s(), POLLHUP\n", __func__);
1767 mask |= POLLHUP;
1768 }
1769
1770 if (sk->sk_state == TCP_ESTABLISHED) {
1771 if ((self->tx_flow == FLOW_START) &&
1772 sock_writeable(sk))
1773 {
1774 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1775 }
1776 }
1777 break;
1778 case SOCK_SEQPACKET:
1779 if ((self->tx_flow == FLOW_START) &&
1780 sock_writeable(sk))
1781 {
1782 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1783 }
1784 break;
1785 case SOCK_DGRAM:
1786 if (sock_writeable(sk))
1787 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1788 break;
1789 default:
1790 break;
1791 }
1792
1793 return mask;
1794 }
1795
1796 /*
1797 * Function irda_ioctl (sock, cmd, arg)
1798 */
irda_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1799 static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1800 {
1801 struct sock *sk = sock->sk;
1802 int err;
1803
1804 pr_debug("%s(), cmd=%#x\n", __func__, cmd);
1805
1806 err = -EINVAL;
1807 switch (cmd) {
1808 case TIOCOUTQ: {
1809 long amount;
1810
1811 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1812 if (amount < 0)
1813 amount = 0;
1814 err = put_user(amount, (unsigned int __user *)arg);
1815 break;
1816 }
1817
1818 case TIOCINQ: {
1819 struct sk_buff *skb;
1820 long amount = 0L;
1821 /* These two are safe on a single CPU system as only user tasks fiddle here */
1822 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1823 amount = skb->len;
1824 err = put_user(amount, (unsigned int __user *)arg);
1825 break;
1826 }
1827
1828 case SIOCGSTAMP:
1829 if (sk != NULL)
1830 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
1831 break;
1832
1833 case SIOCGIFADDR:
1834 case SIOCSIFADDR:
1835 case SIOCGIFDSTADDR:
1836 case SIOCSIFDSTADDR:
1837 case SIOCGIFBRDADDR:
1838 case SIOCSIFBRDADDR:
1839 case SIOCGIFNETMASK:
1840 case SIOCSIFNETMASK:
1841 case SIOCGIFMETRIC:
1842 case SIOCSIFMETRIC:
1843 break;
1844 default:
1845 pr_debug("%s(), doing device ioctl!\n", __func__);
1846 err = -ENOIOCTLCMD;
1847 }
1848
1849 return err;
1850 }
1851
1852 #ifdef CONFIG_COMPAT
1853 /*
1854 * Function irda_ioctl (sock, cmd, arg)
1855 */
irda_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1856 static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1857 {
1858 /*
1859 * All IRDA's ioctl are standard ones.
1860 */
1861 return -ENOIOCTLCMD;
1862 }
1863 #endif
1864
1865 /*
1866 * Function irda_setsockopt (sock, level, optname, optval, optlen)
1867 *
1868 * Set some options for the socket
1869 *
1870 */
irda_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int optlen)1871 static int irda_setsockopt(struct socket *sock, int level, int optname,
1872 char __user *optval, unsigned int optlen)
1873 {
1874 struct sock *sk = sock->sk;
1875 struct irda_sock *self = irda_sk(sk);
1876 struct irda_ias_set *ias_opt;
1877 struct ias_object *ias_obj;
1878 struct ias_attrib * ias_attr; /* Attribute in IAS object */
1879 int opt, free_ias = 0, err = 0;
1880
1881 pr_debug("%s(%p)\n", __func__, self);
1882
1883 if (level != SOL_IRLMP)
1884 return -ENOPROTOOPT;
1885
1886 lock_sock(sk);
1887
1888 switch (optname) {
1889 case IRLMP_IAS_SET:
1890 /* The user want to add an attribute to an existing IAS object
1891 * (in the IAS database) or to create a new object with this
1892 * attribute.
1893 * We first query IAS to know if the object exist, and then
1894 * create the right attribute...
1895 */
1896
1897 if (optlen != sizeof(struct irda_ias_set)) {
1898 err = -EINVAL;
1899 goto out;
1900 }
1901
1902 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
1903 if (ias_opt == NULL) {
1904 err = -ENOMEM;
1905 goto out;
1906 }
1907
1908 /* Copy query to the driver. */
1909 if (copy_from_user(ias_opt, optval, optlen)) {
1910 kfree(ias_opt);
1911 err = -EFAULT;
1912 goto out;
1913 }
1914
1915 /* Find the object we target.
1916 * If the user gives us an empty string, we use the object
1917 * associated with this socket. This will workaround
1918 * duplicated class name - Jean II */
1919 if(ias_opt->irda_class_name[0] == '\0') {
1920 if(self->ias_obj == NULL) {
1921 kfree(ias_opt);
1922 err = -EINVAL;
1923 goto out;
1924 }
1925 ias_obj = self->ias_obj;
1926 } else
1927 ias_obj = irias_find_object(ias_opt->irda_class_name);
1928
1929 /* Only ROOT can mess with the global IAS database.
1930 * Users can only add attributes to the object associated
1931 * with the socket they own - Jean II */
1932 if((!capable(CAP_NET_ADMIN)) &&
1933 ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
1934 kfree(ias_opt);
1935 err = -EPERM;
1936 goto out;
1937 }
1938
1939 /* If the object doesn't exist, create it */
1940 if(ias_obj == (struct ias_object *) NULL) {
1941 /* Create a new object */
1942 ias_obj = irias_new_object(ias_opt->irda_class_name,
1943 jiffies);
1944 if (ias_obj == NULL) {
1945 kfree(ias_opt);
1946 err = -ENOMEM;
1947 goto out;
1948 }
1949 free_ias = 1;
1950 }
1951
1952 /* Do we have the attribute already ? */
1953 if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) {
1954 kfree(ias_opt);
1955 if (free_ias) {
1956 kfree(ias_obj->name);
1957 kfree(ias_obj);
1958 }
1959 err = -EINVAL;
1960 goto out;
1961 }
1962
1963 /* Look at the type */
1964 switch(ias_opt->irda_attrib_type) {
1965 case IAS_INTEGER:
1966 /* Add an integer attribute */
1967 irias_add_integer_attrib(
1968 ias_obj,
1969 ias_opt->irda_attrib_name,
1970 ias_opt->attribute.irda_attrib_int,
1971 IAS_USER_ATTR);
1972 break;
1973 case IAS_OCT_SEQ:
1974 /* Check length */
1975 if(ias_opt->attribute.irda_attrib_octet_seq.len >
1976 IAS_MAX_OCTET_STRING) {
1977 kfree(ias_opt);
1978 if (free_ias) {
1979 kfree(ias_obj->name);
1980 kfree(ias_obj);
1981 }
1982
1983 err = -EINVAL;
1984 goto out;
1985 }
1986 /* Add an octet sequence attribute */
1987 irias_add_octseq_attrib(
1988 ias_obj,
1989 ias_opt->irda_attrib_name,
1990 ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
1991 ias_opt->attribute.irda_attrib_octet_seq.len,
1992 IAS_USER_ATTR);
1993 break;
1994 case IAS_STRING:
1995 /* Should check charset & co */
1996 /* Check length */
1997 /* The length is encoded in a __u8, and
1998 * IAS_MAX_STRING == 256, so there is no way
1999 * userspace can pass us a string too large.
2000 * Jean II */
2001 /* NULL terminate the string (avoid troubles) */
2002 ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0';
2003 /* Add a string attribute */
2004 irias_add_string_attrib(
2005 ias_obj,
2006 ias_opt->irda_attrib_name,
2007 ias_opt->attribute.irda_attrib_string.string,
2008 IAS_USER_ATTR);
2009 break;
2010 default :
2011 kfree(ias_opt);
2012 if (free_ias) {
2013 kfree(ias_obj->name);
2014 kfree(ias_obj);
2015 }
2016 err = -EINVAL;
2017 goto out;
2018 }
2019 irias_insert_object(ias_obj);
2020 kfree(ias_opt);
2021 break;
2022 case IRLMP_IAS_DEL:
2023 /* The user want to delete an object from our local IAS
2024 * database. We just need to query the IAS, check is the
2025 * object is not owned by the kernel and delete it.
2026 */
2027
2028 if (optlen != sizeof(struct irda_ias_set)) {
2029 err = -EINVAL;
2030 goto out;
2031 }
2032
2033 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2034 if (ias_opt == NULL) {
2035 err = -ENOMEM;
2036 goto out;
2037 }
2038
2039 /* Copy query to the driver. */
2040 if (copy_from_user(ias_opt, optval, optlen)) {
2041 kfree(ias_opt);
2042 err = -EFAULT;
2043 goto out;
2044 }
2045
2046 /* Find the object we target.
2047 * If the user gives us an empty string, we use the object
2048 * associated with this socket. This will workaround
2049 * duplicated class name - Jean II */
2050 if(ias_opt->irda_class_name[0] == '\0')
2051 ias_obj = self->ias_obj;
2052 else
2053 ias_obj = irias_find_object(ias_opt->irda_class_name);
2054 if(ias_obj == (struct ias_object *) NULL) {
2055 kfree(ias_opt);
2056 err = -EINVAL;
2057 goto out;
2058 }
2059
2060 /* Only ROOT can mess with the global IAS database.
2061 * Users can only del attributes from the object associated
2062 * with the socket they own - Jean II */
2063 if((!capable(CAP_NET_ADMIN)) &&
2064 ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
2065 kfree(ias_opt);
2066 err = -EPERM;
2067 goto out;
2068 }
2069
2070 /* Find the attribute (in the object) we target */
2071 ias_attr = irias_find_attrib(ias_obj,
2072 ias_opt->irda_attrib_name);
2073 if(ias_attr == (struct ias_attrib *) NULL) {
2074 kfree(ias_opt);
2075 err = -EINVAL;
2076 goto out;
2077 }
2078
2079 /* Check is the user space own the object */
2080 if(ias_attr->value->owner != IAS_USER_ATTR) {
2081 pr_debug("%s(), attempting to delete a kernel attribute\n",
2082 __func__);
2083 kfree(ias_opt);
2084 err = -EPERM;
2085 goto out;
2086 }
2087
2088 /* Remove the attribute (and maybe the object) */
2089 irias_delete_attrib(ias_obj, ias_attr, 1);
2090 kfree(ias_opt);
2091 break;
2092 case IRLMP_MAX_SDU_SIZE:
2093 if (optlen < sizeof(int)) {
2094 err = -EINVAL;
2095 goto out;
2096 }
2097
2098 if (get_user(opt, (int __user *)optval)) {
2099 err = -EFAULT;
2100 goto out;
2101 }
2102
2103 /* Only possible for a seqpacket service (TTP with SAR) */
2104 if (sk->sk_type != SOCK_SEQPACKET) {
2105 pr_debug("%s(), setting max_sdu_size = %d\n",
2106 __func__, opt);
2107 self->max_sdu_size_rx = opt;
2108 } else {
2109 net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
2110 __func__);
2111 err = -ENOPROTOOPT;
2112 goto out;
2113 }
2114 break;
2115 case IRLMP_HINTS_SET:
2116 if (optlen < sizeof(int)) {
2117 err = -EINVAL;
2118 goto out;
2119 }
2120
2121 /* The input is really a (__u8 hints[2]), easier as an int */
2122 if (get_user(opt, (int __user *)optval)) {
2123 err = -EFAULT;
2124 goto out;
2125 }
2126
2127 /* Unregister any old registration */
2128 irlmp_unregister_service(self->skey);
2129
2130 self->skey = irlmp_register_service((__u16) opt);
2131 break;
2132 case IRLMP_HINT_MASK_SET:
2133 /* As opposed to the previous case which set the hint bits
2134 * that we advertise, this one set the filter we use when
2135 * making a discovery (nodes which don't match any hint
2136 * bit in the mask are not reported).
2137 */
2138 if (optlen < sizeof(int)) {
2139 err = -EINVAL;
2140 goto out;
2141 }
2142
2143 /* The input is really a (__u8 hints[2]), easier as an int */
2144 if (get_user(opt, (int __user *)optval)) {
2145 err = -EFAULT;
2146 goto out;
2147 }
2148
2149 /* Set the new hint mask */
2150 self->mask.word = (__u16) opt;
2151 /* Mask out extension bits */
2152 self->mask.word &= 0x7f7f;
2153 /* Check if no bits */
2154 if(!self->mask.word)
2155 self->mask.word = 0xFFFF;
2156
2157 break;
2158 default:
2159 err = -ENOPROTOOPT;
2160 break;
2161 }
2162
2163 out:
2164 release_sock(sk);
2165
2166 return err;
2167 }
2168
2169 /*
2170 * Function irda_extract_ias_value(ias_opt, ias_value)
2171 *
2172 * Translate internal IAS value structure to the user space representation
2173 *
2174 * The external representation of IAS values, as we exchange them with
2175 * user space program is quite different from the internal representation,
2176 * as stored in the IAS database (because we need a flat structure for
2177 * crossing kernel boundary).
2178 * This function transform the former in the latter. We also check
2179 * that the value type is valid.
2180 */
irda_extract_ias_value(struct irda_ias_set * ias_opt,struct ias_value * ias_value)2181 static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
2182 struct ias_value *ias_value)
2183 {
2184 /* Look at the type */
2185 switch (ias_value->type) {
2186 case IAS_INTEGER:
2187 /* Copy the integer */
2188 ias_opt->attribute.irda_attrib_int = ias_value->t.integer;
2189 break;
2190 case IAS_OCT_SEQ:
2191 /* Set length */
2192 ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len;
2193 /* Copy over */
2194 memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
2195 ias_value->t.oct_seq, ias_value->len);
2196 break;
2197 case IAS_STRING:
2198 /* Set length */
2199 ias_opt->attribute.irda_attrib_string.len = ias_value->len;
2200 ias_opt->attribute.irda_attrib_string.charset = ias_value->charset;
2201 /* Copy over */
2202 memcpy(ias_opt->attribute.irda_attrib_string.string,
2203 ias_value->t.string, ias_value->len);
2204 /* NULL terminate the string (avoid troubles) */
2205 ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0';
2206 break;
2207 case IAS_MISSING:
2208 default :
2209 return -EINVAL;
2210 }
2211
2212 /* Copy type over */
2213 ias_opt->irda_attrib_type = ias_value->type;
2214
2215 return 0;
2216 }
2217
2218 /*
2219 * Function irda_getsockopt (sock, level, optname, optval, optlen)
2220 */
irda_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)2221 static int irda_getsockopt(struct socket *sock, int level, int optname,
2222 char __user *optval, int __user *optlen)
2223 {
2224 struct sock *sk = sock->sk;
2225 struct irda_sock *self = irda_sk(sk);
2226 struct irda_device_list list = { 0 };
2227 struct irda_device_info *discoveries;
2228 struct irda_ias_set * ias_opt; /* IAS get/query params */
2229 struct ias_object * ias_obj; /* Object in IAS */
2230 struct ias_attrib * ias_attr; /* Attribute in IAS object */
2231 int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */
2232 int val = 0;
2233 int len = 0;
2234 int err = 0;
2235 int offset, total;
2236
2237 pr_debug("%s(%p)\n", __func__, self);
2238
2239 if (level != SOL_IRLMP)
2240 return -ENOPROTOOPT;
2241
2242 if (get_user(len, optlen))
2243 return -EFAULT;
2244
2245 if(len < 0)
2246 return -EINVAL;
2247
2248 lock_sock(sk);
2249
2250 switch (optname) {
2251 case IRLMP_ENUMDEVICES:
2252
2253 /* Offset to first device entry */
2254 offset = sizeof(struct irda_device_list) -
2255 sizeof(struct irda_device_info);
2256
2257 if (len < offset) {
2258 err = -EINVAL;
2259 goto out;
2260 }
2261
2262 /* Ask lmp for the current discovery log */
2263 discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
2264 self->nslots);
2265 /* Check if the we got some results */
2266 if (discoveries == NULL) {
2267 err = -EAGAIN;
2268 goto out; /* Didn't find any devices */
2269 }
2270
2271 /* Write total list length back to client */
2272 if (copy_to_user(optval, &list, offset))
2273 err = -EFAULT;
2274
2275 /* Copy the list itself - watch for overflow */
2276 if (list.len > 2048) {
2277 err = -EINVAL;
2278 goto bed;
2279 }
2280 total = offset + (list.len * sizeof(struct irda_device_info));
2281 if (total > len)
2282 total = len;
2283 if (copy_to_user(optval+offset, discoveries, total - offset))
2284 err = -EFAULT;
2285
2286 /* Write total number of bytes used back to client */
2287 if (put_user(total, optlen))
2288 err = -EFAULT;
2289 bed:
2290 /* Free up our buffer */
2291 kfree(discoveries);
2292 break;
2293 case IRLMP_MAX_SDU_SIZE:
2294 val = self->max_data_size;
2295 len = sizeof(int);
2296 if (put_user(len, optlen)) {
2297 err = -EFAULT;
2298 goto out;
2299 }
2300
2301 if (copy_to_user(optval, &val, len)) {
2302 err = -EFAULT;
2303 goto out;
2304 }
2305
2306 break;
2307 case IRLMP_IAS_GET:
2308 /* The user want an object from our local IAS database.
2309 * We just need to query the IAS and return the value
2310 * that we found */
2311
2312 /* Check that the user has allocated the right space for us */
2313 if (len != sizeof(struct irda_ias_set)) {
2314 err = -EINVAL;
2315 goto out;
2316 }
2317
2318 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2319 if (ias_opt == NULL) {
2320 err = -ENOMEM;
2321 goto out;
2322 }
2323
2324 /* Copy query to the driver. */
2325 if (copy_from_user(ias_opt, optval, len)) {
2326 kfree(ias_opt);
2327 err = -EFAULT;
2328 goto out;
2329 }
2330
2331 /* Find the object we target.
2332 * If the user gives us an empty string, we use the object
2333 * associated with this socket. This will workaround
2334 * duplicated class name - Jean II */
2335 if(ias_opt->irda_class_name[0] == '\0')
2336 ias_obj = self->ias_obj;
2337 else
2338 ias_obj = irias_find_object(ias_opt->irda_class_name);
2339 if(ias_obj == (struct ias_object *) NULL) {
2340 kfree(ias_opt);
2341 err = -EINVAL;
2342 goto out;
2343 }
2344
2345 /* Find the attribute (in the object) we target */
2346 ias_attr = irias_find_attrib(ias_obj,
2347 ias_opt->irda_attrib_name);
2348 if(ias_attr == (struct ias_attrib *) NULL) {
2349 kfree(ias_opt);
2350 err = -EINVAL;
2351 goto out;
2352 }
2353
2354 /* Translate from internal to user structure */
2355 err = irda_extract_ias_value(ias_opt, ias_attr->value);
2356 if(err) {
2357 kfree(ias_opt);
2358 goto out;
2359 }
2360
2361 /* Copy reply to the user */
2362 if (copy_to_user(optval, ias_opt,
2363 sizeof(struct irda_ias_set))) {
2364 kfree(ias_opt);
2365 err = -EFAULT;
2366 goto out;
2367 }
2368 /* Note : don't need to put optlen, we checked it */
2369 kfree(ias_opt);
2370 break;
2371 case IRLMP_IAS_QUERY:
2372 /* The user want an object from a remote IAS database.
2373 * We need to use IAP to query the remote database and
2374 * then wait for the answer to come back. */
2375
2376 /* Check that the user has allocated the right space for us */
2377 if (len != sizeof(struct irda_ias_set)) {
2378 err = -EINVAL;
2379 goto out;
2380 }
2381
2382 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2383 if (ias_opt == NULL) {
2384 err = -ENOMEM;
2385 goto out;
2386 }
2387
2388 /* Copy query to the driver. */
2389 if (copy_from_user(ias_opt, optval, len)) {
2390 kfree(ias_opt);
2391 err = -EFAULT;
2392 goto out;
2393 }
2394
2395 /* At this point, there are two cases...
2396 * 1) the socket is connected - that's the easy case, we
2397 * just query the device we are connected to...
2398 * 2) the socket is not connected - the user doesn't want
2399 * to connect and/or may not have a valid service name
2400 * (so can't create a fake connection). In this case,
2401 * we assume that the user pass us a valid destination
2402 * address in the requesting structure...
2403 */
2404 if(self->daddr != DEV_ADDR_ANY) {
2405 /* We are connected - reuse known daddr */
2406 daddr = self->daddr;
2407 } else {
2408 /* We are not connected, we must specify a valid
2409 * destination address */
2410 daddr = ias_opt->daddr;
2411 if((!daddr) || (daddr == DEV_ADDR_ANY)) {
2412 kfree(ias_opt);
2413 err = -EINVAL;
2414 goto out;
2415 }
2416 }
2417
2418 /* Check that we can proceed with IAP */
2419 if (self->iriap) {
2420 net_warn_ratelimited("%s: busy with a previous query\n",
2421 __func__);
2422 kfree(ias_opt);
2423 err = -EBUSY;
2424 goto out;
2425 }
2426
2427 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
2428 irda_getvalue_confirm);
2429
2430 if (self->iriap == NULL) {
2431 kfree(ias_opt);
2432 err = -ENOMEM;
2433 goto out;
2434 }
2435
2436 /* Treat unexpected wakeup as disconnect */
2437 self->errno = -EHOSTUNREACH;
2438
2439 /* Query remote LM-IAS */
2440 iriap_getvaluebyclass_request(self->iriap,
2441 self->saddr, daddr,
2442 ias_opt->irda_class_name,
2443 ias_opt->irda_attrib_name);
2444
2445 /* Wait for answer, if not yet finished (or failed) */
2446 if (wait_event_interruptible(self->query_wait,
2447 (self->iriap == NULL))) {
2448 /* pending request uses copy of ias_opt-content
2449 * we can free it regardless! */
2450 kfree(ias_opt);
2451 /* Treat signals as disconnect */
2452 err = -EHOSTUNREACH;
2453 goto out;
2454 }
2455
2456 /* Check what happened */
2457 if (self->errno)
2458 {
2459 kfree(ias_opt);
2460 /* Requested object/attribute doesn't exist */
2461 if((self->errno == IAS_CLASS_UNKNOWN) ||
2462 (self->errno == IAS_ATTRIB_UNKNOWN))
2463 err = -EADDRNOTAVAIL;
2464 else
2465 err = -EHOSTUNREACH;
2466
2467 goto out;
2468 }
2469
2470 /* Translate from internal to user structure */
2471 err = irda_extract_ias_value(ias_opt, self->ias_result);
2472 if (self->ias_result)
2473 irias_delete_value(self->ias_result);
2474 if (err) {
2475 kfree(ias_opt);
2476 goto out;
2477 }
2478
2479 /* Copy reply to the user */
2480 if (copy_to_user(optval, ias_opt,
2481 sizeof(struct irda_ias_set))) {
2482 kfree(ias_opt);
2483 err = -EFAULT;
2484 goto out;
2485 }
2486 /* Note : don't need to put optlen, we checked it */
2487 kfree(ias_opt);
2488 break;
2489 case IRLMP_WAITDEVICE:
2490 /* This function is just another way of seeing life ;-)
2491 * IRLMP_ENUMDEVICES assumes that you have a static network,
2492 * and that you just want to pick one of the devices present.
2493 * On the other hand, in here we assume that no device is
2494 * present and that at some point in the future a device will
2495 * come into range. When this device arrive, we just wake
2496 * up the caller, so that he has time to connect to it before
2497 * the device goes away...
2498 * Note : once the node has been discovered for more than a
2499 * few second, it won't trigger this function, unless it
2500 * goes away and come back changes its hint bits (so we
2501 * might call it IRLMP_WAITNEWDEVICE).
2502 */
2503
2504 /* Check that the user is passing us an int */
2505 if (len != sizeof(int)) {
2506 err = -EINVAL;
2507 goto out;
2508 }
2509 /* Get timeout in ms (max time we block the caller) */
2510 if (get_user(val, (int __user *)optval)) {
2511 err = -EFAULT;
2512 goto out;
2513 }
2514
2515 /* Tell IrLMP we want to be notified */
2516 irlmp_update_client(self->ckey, self->mask.word,
2517 irda_selective_discovery_indication,
2518 NULL, (void *) self);
2519
2520 /* Do some discovery (and also return cached results) */
2521 irlmp_discovery_request(self->nslots);
2522
2523 /* Wait until a node is discovered */
2524 if (!self->cachedaddr) {
2525 pr_debug("%s(), nothing discovered yet, going to sleep...\n",
2526 __func__);
2527
2528 /* Set watchdog timer to expire in <val> ms. */
2529 self->errno = 0;
2530 setup_timer(&self->watchdog, irda_discovery_timeout,
2531 (unsigned long)self);
2532 mod_timer(&self->watchdog,
2533 jiffies + msecs_to_jiffies(val));
2534
2535 /* Wait for IR-LMP to call us back */
2536 err = __wait_event_interruptible(self->query_wait,
2537 (self->cachedaddr != 0 || self->errno == -ETIME));
2538
2539 /* If watchdog is still activated, kill it! */
2540 del_timer(&(self->watchdog));
2541
2542 pr_debug("%s(), ...waking up !\n", __func__);
2543
2544 if (err != 0)
2545 goto out;
2546 }
2547 else
2548 pr_debug("%s(), found immediately !\n",
2549 __func__);
2550
2551 /* Tell IrLMP that we have been notified */
2552 irlmp_update_client(self->ckey, self->mask.word,
2553 NULL, NULL, NULL);
2554
2555 /* Check if the we got some results */
2556 if (!self->cachedaddr) {
2557 err = -EAGAIN; /* Didn't find any devices */
2558 goto out;
2559 }
2560 daddr = self->cachedaddr;
2561 /* Cleanup */
2562 self->cachedaddr = 0;
2563
2564 /* We return the daddr of the device that trigger the
2565 * wakeup. As irlmp pass us only the new devices, we
2566 * are sure that it's not an old device.
2567 * If the user want more details, he should query
2568 * the whole discovery log and pick one device...
2569 */
2570 if (put_user(daddr, (int __user *)optval)) {
2571 err = -EFAULT;
2572 goto out;
2573 }
2574
2575 break;
2576 default:
2577 err = -ENOPROTOOPT;
2578 }
2579
2580 out:
2581
2582 release_sock(sk);
2583
2584 return err;
2585 }
2586
2587 static const struct net_proto_family irda_family_ops = {
2588 .family = PF_IRDA,
2589 .create = irda_create,
2590 .owner = THIS_MODULE,
2591 };
2592
2593 static const struct proto_ops irda_stream_ops = {
2594 .family = PF_IRDA,
2595 .owner = THIS_MODULE,
2596 .release = irda_release,
2597 .bind = irda_bind,
2598 .connect = irda_connect,
2599 .socketpair = sock_no_socketpair,
2600 .accept = irda_accept,
2601 .getname = irda_getname,
2602 .poll = irda_poll,
2603 .ioctl = irda_ioctl,
2604 #ifdef CONFIG_COMPAT
2605 .compat_ioctl = irda_compat_ioctl,
2606 #endif
2607 .listen = irda_listen,
2608 .shutdown = irda_shutdown,
2609 .setsockopt = irda_setsockopt,
2610 .getsockopt = irda_getsockopt,
2611 .sendmsg = irda_sendmsg,
2612 .recvmsg = irda_recvmsg_stream,
2613 .mmap = sock_no_mmap,
2614 .sendpage = sock_no_sendpage,
2615 };
2616
2617 static const struct proto_ops irda_seqpacket_ops = {
2618 .family = PF_IRDA,
2619 .owner = THIS_MODULE,
2620 .release = irda_release,
2621 .bind = irda_bind,
2622 .connect = irda_connect,
2623 .socketpair = sock_no_socketpair,
2624 .accept = irda_accept,
2625 .getname = irda_getname,
2626 .poll = datagram_poll,
2627 .ioctl = irda_ioctl,
2628 #ifdef CONFIG_COMPAT
2629 .compat_ioctl = irda_compat_ioctl,
2630 #endif
2631 .listen = irda_listen,
2632 .shutdown = irda_shutdown,
2633 .setsockopt = irda_setsockopt,
2634 .getsockopt = irda_getsockopt,
2635 .sendmsg = irda_sendmsg,
2636 .recvmsg = irda_recvmsg_dgram,
2637 .mmap = sock_no_mmap,
2638 .sendpage = sock_no_sendpage,
2639 };
2640
2641 static const struct proto_ops irda_dgram_ops = {
2642 .family = PF_IRDA,
2643 .owner = THIS_MODULE,
2644 .release = irda_release,
2645 .bind = irda_bind,
2646 .connect = irda_connect,
2647 .socketpair = sock_no_socketpair,
2648 .accept = irda_accept,
2649 .getname = irda_getname,
2650 .poll = datagram_poll,
2651 .ioctl = irda_ioctl,
2652 #ifdef CONFIG_COMPAT
2653 .compat_ioctl = irda_compat_ioctl,
2654 #endif
2655 .listen = irda_listen,
2656 .shutdown = irda_shutdown,
2657 .setsockopt = irda_setsockopt,
2658 .getsockopt = irda_getsockopt,
2659 .sendmsg = irda_sendmsg_dgram,
2660 .recvmsg = irda_recvmsg_dgram,
2661 .mmap = sock_no_mmap,
2662 .sendpage = sock_no_sendpage,
2663 };
2664
2665 #ifdef CONFIG_IRDA_ULTRA
2666 static const struct proto_ops irda_ultra_ops = {
2667 .family = PF_IRDA,
2668 .owner = THIS_MODULE,
2669 .release = irda_release,
2670 .bind = irda_bind,
2671 .connect = sock_no_connect,
2672 .socketpair = sock_no_socketpair,
2673 .accept = sock_no_accept,
2674 .getname = irda_getname,
2675 .poll = datagram_poll,
2676 .ioctl = irda_ioctl,
2677 #ifdef CONFIG_COMPAT
2678 .compat_ioctl = irda_compat_ioctl,
2679 #endif
2680 .listen = sock_no_listen,
2681 .shutdown = irda_shutdown,
2682 .setsockopt = irda_setsockopt,
2683 .getsockopt = irda_getsockopt,
2684 .sendmsg = irda_sendmsg_ultra,
2685 .recvmsg = irda_recvmsg_dgram,
2686 .mmap = sock_no_mmap,
2687 .sendpage = sock_no_sendpage,
2688 };
2689 #endif /* CONFIG_IRDA_ULTRA */
2690
2691 /*
2692 * Function irsock_init (pro)
2693 *
2694 * Initialize IrDA protocol
2695 *
2696 */
irsock_init(void)2697 int __init irsock_init(void)
2698 {
2699 int rc = proto_register(&irda_proto, 0);
2700
2701 if (rc == 0)
2702 rc = sock_register(&irda_family_ops);
2703
2704 return rc;
2705 }
2706
2707 /*
2708 * Function irsock_cleanup (void)
2709 *
2710 * Remove IrDA protocol
2711 *
2712 */
irsock_cleanup(void)2713 void irsock_cleanup(void)
2714 {
2715 sock_unregister(PF_IRDA);
2716 proto_unregister(&irda_proto);
2717 }
2718