• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications Inc.
4  * Copyright (c) 2013, The Linux Foundation. All rights reserved.
5  */
6 #include <linux/module.h>
7 #include <linux/netlink.h>
8 #include <linux/qrtr.h>
9 #include <linux/termios.h>	/* For TIOCINQ/OUTQ */
10 #include <linux/numa.h>
11 
12 #include <net/sock.h>
13 
14 #include "qrtr.h"
15 
16 #define QRTR_PROTO_VER_1 1
17 #define QRTR_PROTO_VER_2 3
18 
19 /* auto-bind range */
20 #define QRTR_MIN_EPH_SOCKET 0x4000
21 #define QRTR_MAX_EPH_SOCKET 0x7fff
22 
23 /**
24  * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
25  * @version: protocol version
26  * @type: packet type; one of QRTR_TYPE_*
27  * @src_node_id: source node
28  * @src_port_id: source port
29  * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
30  * @size: length of packet, excluding this header
31  * @dst_node_id: destination node
32  * @dst_port_id: destination port
33  */
34 struct qrtr_hdr_v1 {
35 	__le32 version;
36 	__le32 type;
37 	__le32 src_node_id;
38 	__le32 src_port_id;
39 	__le32 confirm_rx;
40 	__le32 size;
41 	__le32 dst_node_id;
42 	__le32 dst_port_id;
43 } __packed;
44 
45 /**
46  * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
47  * @version: protocol version
48  * @type: packet type; one of QRTR_TYPE_*
49  * @flags: bitmask of QRTR_FLAGS_*
50  * @optlen: length of optional header data
51  * @size: length of packet, excluding this header and optlen
52  * @src_node_id: source node
53  * @src_port_id: source port
54  * @dst_node_id: destination node
55  * @dst_port_id: destination port
56  */
57 struct qrtr_hdr_v2 {
58 	u8 version;
59 	u8 type;
60 	u8 flags;
61 	u8 optlen;
62 	__le32 size;
63 	__le16 src_node_id;
64 	__le16 src_port_id;
65 	__le16 dst_node_id;
66 	__le16 dst_port_id;
67 };
68 
69 #define QRTR_FLAGS_CONFIRM_RX	BIT(0)
70 
71 struct qrtr_cb {
72 	u32 src_node;
73 	u32 src_port;
74 	u32 dst_node;
75 	u32 dst_port;
76 
77 	u8 type;
78 	u8 confirm_rx;
79 };
80 
81 #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
82 					sizeof(struct qrtr_hdr_v2))
83 
84 struct qrtr_sock {
85 	/* WARNING: sk must be the first member */
86 	struct sock sk;
87 	struct sockaddr_qrtr us;
88 	struct sockaddr_qrtr peer;
89 };
90 
qrtr_sk(struct sock * sk)91 static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
92 {
93 	BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
94 	return container_of(sk, struct qrtr_sock, sk);
95 }
96 
97 static unsigned int qrtr_local_nid = NUMA_NO_NODE;
98 
99 /* for node ids */
100 static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
101 /* broadcast list */
102 static LIST_HEAD(qrtr_all_nodes);
103 /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
104 static DEFINE_MUTEX(qrtr_node_lock);
105 
106 /* local port allocation management */
107 static DEFINE_IDR(qrtr_ports);
108 static DEFINE_MUTEX(qrtr_port_lock);
109 
110 /**
111  * struct qrtr_node - endpoint node
112  * @ep_lock: lock for endpoint management and callbacks
113  * @ep: endpoint
114  * @ref: reference count for node
115  * @nid: node id
116  * @rx_queue: receive queue
117  * @work: scheduled work struct for recv work
118  * @item: list item for broadcast list
119  */
120 struct qrtr_node {
121 	struct mutex ep_lock;
122 	struct qrtr_endpoint *ep;
123 	struct kref ref;
124 	unsigned int nid;
125 
126 	struct sk_buff_head rx_queue;
127 	struct work_struct work;
128 	struct list_head item;
129 };
130 
131 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
132 			      int type, struct sockaddr_qrtr *from,
133 			      struct sockaddr_qrtr *to);
134 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
135 			      int type, struct sockaddr_qrtr *from,
136 			      struct sockaddr_qrtr *to);
137 
138 /* Release node resources and free the node.
139  *
140  * Do not call directly, use qrtr_node_release.  To be used with
141  * kref_put_mutex.  As such, the node mutex is expected to be locked on call.
142  */
__qrtr_node_release(struct kref * kref)143 static void __qrtr_node_release(struct kref *kref)
144 {
145 	struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
146 
147 	if (node->nid != QRTR_EP_NID_AUTO)
148 		radix_tree_delete(&qrtr_nodes, node->nid);
149 
150 	list_del(&node->item);
151 	mutex_unlock(&qrtr_node_lock);
152 
153 	cancel_work_sync(&node->work);
154 	skb_queue_purge(&node->rx_queue);
155 	kfree(node);
156 }
157 
158 /* Increment reference to node. */
qrtr_node_acquire(struct qrtr_node * node)159 static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
160 {
161 	if (node)
162 		kref_get(&node->ref);
163 	return node;
164 }
165 
166 /* Decrement reference to node and release as necessary. */
qrtr_node_release(struct qrtr_node * node)167 static void qrtr_node_release(struct qrtr_node *node)
168 {
169 	if (!node)
170 		return;
171 	kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
172 }
173 
174 /* Pass an outgoing packet socket buffer to the endpoint driver. */
qrtr_node_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)175 static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
176 			     int type, struct sockaddr_qrtr *from,
177 			     struct sockaddr_qrtr *to)
178 {
179 	struct qrtr_hdr_v1 *hdr;
180 	size_t len = skb->len;
181 	int rc = -ENODEV;
182 
183 	hdr = skb_push(skb, sizeof(*hdr));
184 	hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
185 	hdr->type = cpu_to_le32(type);
186 	hdr->src_node_id = cpu_to_le32(from->sq_node);
187 	hdr->src_port_id = cpu_to_le32(from->sq_port);
188 	if (to->sq_port == QRTR_PORT_CTRL) {
189 		hdr->dst_node_id = cpu_to_le32(node->nid);
190 		hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
191 	} else {
192 		hdr->dst_node_id = cpu_to_le32(to->sq_node);
193 		hdr->dst_port_id = cpu_to_le32(to->sq_port);
194 	}
195 
196 	hdr->size = cpu_to_le32(len);
197 	hdr->confirm_rx = 0;
198 
199 	skb_put_padto(skb, ALIGN(len, 4));
200 
201 	mutex_lock(&node->ep_lock);
202 	if (node->ep)
203 		rc = node->ep->xmit(node->ep, skb);
204 	else
205 		kfree_skb(skb);
206 	mutex_unlock(&node->ep_lock);
207 
208 	return rc;
209 }
210 
211 /* Lookup node by id.
212  *
213  * callers must release with qrtr_node_release()
214  */
qrtr_node_lookup(unsigned int nid)215 static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
216 {
217 	struct qrtr_node *node;
218 
219 	mutex_lock(&qrtr_node_lock);
220 	node = radix_tree_lookup(&qrtr_nodes, nid);
221 	node = qrtr_node_acquire(node);
222 	mutex_unlock(&qrtr_node_lock);
223 
224 	return node;
225 }
226 
227 /* Assign node id to node.
228  *
229  * This is mostly useful for automatic node id assignment, based on
230  * the source id in the incoming packet.
231  */
qrtr_node_assign(struct qrtr_node * node,unsigned int nid)232 static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
233 {
234 	if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
235 		return;
236 
237 	mutex_lock(&qrtr_node_lock);
238 	radix_tree_insert(&qrtr_nodes, nid, node);
239 	node->nid = nid;
240 	mutex_unlock(&qrtr_node_lock);
241 }
242 
243 /**
244  * qrtr_endpoint_post() - post incoming data
245  * @ep: endpoint handle
246  * @data: data pointer
247  * @len: size of data in bytes
248  *
249  * Return: 0 on success; negative error code on failure
250  */
qrtr_endpoint_post(struct qrtr_endpoint * ep,const void * data,size_t len)251 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
252 {
253 	struct qrtr_node *node = ep->node;
254 	const struct qrtr_hdr_v1 *v1;
255 	const struct qrtr_hdr_v2 *v2;
256 	struct sk_buff *skb;
257 	struct qrtr_cb *cb;
258 	unsigned int size;
259 	unsigned int ver;
260 	size_t hdrlen;
261 
262 	if (len & 3)
263 		return -EINVAL;
264 
265 	skb = netdev_alloc_skb(NULL, len);
266 	if (!skb)
267 		return -ENOMEM;
268 
269 	cb = (struct qrtr_cb *)skb->cb;
270 
271 	/* Version field in v1 is little endian, so this works for both cases */
272 	ver = *(u8*)data;
273 
274 	switch (ver) {
275 	case QRTR_PROTO_VER_1:
276 		v1 = data;
277 		hdrlen = sizeof(*v1);
278 
279 		cb->type = le32_to_cpu(v1->type);
280 		cb->src_node = le32_to_cpu(v1->src_node_id);
281 		cb->src_port = le32_to_cpu(v1->src_port_id);
282 		cb->confirm_rx = !!v1->confirm_rx;
283 		cb->dst_node = le32_to_cpu(v1->dst_node_id);
284 		cb->dst_port = le32_to_cpu(v1->dst_port_id);
285 
286 		size = le32_to_cpu(v1->size);
287 		break;
288 	case QRTR_PROTO_VER_2:
289 		v2 = data;
290 		hdrlen = sizeof(*v2) + v2->optlen;
291 
292 		cb->type = v2->type;
293 		cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
294 		cb->src_node = le16_to_cpu(v2->src_node_id);
295 		cb->src_port = le16_to_cpu(v2->src_port_id);
296 		cb->dst_node = le16_to_cpu(v2->dst_node_id);
297 		cb->dst_port = le16_to_cpu(v2->dst_port_id);
298 
299 		if (cb->src_port == (u16)QRTR_PORT_CTRL)
300 			cb->src_port = QRTR_PORT_CTRL;
301 		if (cb->dst_port == (u16)QRTR_PORT_CTRL)
302 			cb->dst_port = QRTR_PORT_CTRL;
303 
304 		size = le32_to_cpu(v2->size);
305 		break;
306 	default:
307 		pr_err("qrtr: Invalid version %d\n", ver);
308 		goto err;
309 	}
310 
311 	if (len != ALIGN(size, 4) + hdrlen)
312 		goto err;
313 
314 	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
315 		goto err;
316 
317 	skb_put_data(skb, data + hdrlen, size);
318 
319 	skb_queue_tail(&node->rx_queue, skb);
320 	schedule_work(&node->work);
321 
322 	return 0;
323 
324 err:
325 	kfree_skb(skb);
326 	return -EINVAL;
327 
328 }
329 EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
330 
331 /**
332  * qrtr_alloc_ctrl_packet() - allocate control packet skb
333  * @pkt: reference to qrtr_ctrl_pkt pointer
334  *
335  * Returns newly allocated sk_buff, or NULL on failure
336  *
337  * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
338  * on success returns a reference to the control packet in @pkt.
339  */
qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt ** pkt)340 static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
341 {
342 	const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
343 	struct sk_buff *skb;
344 
345 	skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
346 	if (!skb)
347 		return NULL;
348 
349 	skb_reserve(skb, QRTR_HDR_MAX_SIZE);
350 	*pkt = skb_put_zero(skb, pkt_len);
351 
352 	return skb;
353 }
354 
355 static struct qrtr_sock *qrtr_port_lookup(int port);
356 static void qrtr_port_put(struct qrtr_sock *ipc);
357 
358 /* Handle and route a received packet.
359  *
360  * This will auto-reply with resume-tx packet as necessary.
361  */
qrtr_node_rx_work(struct work_struct * work)362 static void qrtr_node_rx_work(struct work_struct *work)
363 {
364 	struct qrtr_node *node = container_of(work, struct qrtr_node, work);
365 	struct qrtr_ctrl_pkt *pkt;
366 	struct sockaddr_qrtr dst;
367 	struct sockaddr_qrtr src;
368 	struct sk_buff *skb;
369 
370 	while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
371 		struct qrtr_sock *ipc;
372 		struct qrtr_cb *cb;
373 		int confirm;
374 
375 		cb = (struct qrtr_cb *)skb->cb;
376 		src.sq_node = cb->src_node;
377 		src.sq_port = cb->src_port;
378 		dst.sq_node = cb->dst_node;
379 		dst.sq_port = cb->dst_port;
380 		confirm = !!cb->confirm_rx;
381 
382 		qrtr_node_assign(node, cb->src_node);
383 
384 		ipc = qrtr_port_lookup(cb->dst_port);
385 		if (!ipc) {
386 			kfree_skb(skb);
387 		} else {
388 			if (sock_queue_rcv_skb(&ipc->sk, skb))
389 				kfree_skb(skb);
390 
391 			qrtr_port_put(ipc);
392 		}
393 
394 		if (confirm) {
395 			skb = qrtr_alloc_ctrl_packet(&pkt);
396 			if (!skb)
397 				break;
398 
399 			pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
400 			pkt->client.node = cpu_to_le32(dst.sq_node);
401 			pkt->client.port = cpu_to_le32(dst.sq_port);
402 
403 			if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
404 					      &dst, &src))
405 				break;
406 		}
407 	}
408 }
409 
410 /**
411  * qrtr_endpoint_register() - register a new endpoint
412  * @ep: endpoint to register
413  * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
414  * Return: 0 on success; negative error code on failure
415  *
416  * The specified endpoint must have the xmit function pointer set on call.
417  */
qrtr_endpoint_register(struct qrtr_endpoint * ep,unsigned int nid)418 int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
419 {
420 	struct qrtr_node *node;
421 
422 	if (!ep || !ep->xmit)
423 		return -EINVAL;
424 
425 	node = kzalloc(sizeof(*node), GFP_KERNEL);
426 	if (!node)
427 		return -ENOMEM;
428 
429 	INIT_WORK(&node->work, qrtr_node_rx_work);
430 	kref_init(&node->ref);
431 	mutex_init(&node->ep_lock);
432 	skb_queue_head_init(&node->rx_queue);
433 	node->nid = QRTR_EP_NID_AUTO;
434 	node->ep = ep;
435 
436 	qrtr_node_assign(node, nid);
437 
438 	mutex_lock(&qrtr_node_lock);
439 	list_add(&node->item, &qrtr_all_nodes);
440 	mutex_unlock(&qrtr_node_lock);
441 	ep->node = node;
442 
443 	return 0;
444 }
445 EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
446 
447 /**
448  * qrtr_endpoint_unregister - unregister endpoint
449  * @ep: endpoint to unregister
450  */
qrtr_endpoint_unregister(struct qrtr_endpoint * ep)451 void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
452 {
453 	struct qrtr_node *node = ep->node;
454 	struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
455 	struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
456 	struct qrtr_ctrl_pkt *pkt;
457 	struct sk_buff *skb;
458 
459 	mutex_lock(&node->ep_lock);
460 	node->ep = NULL;
461 	mutex_unlock(&node->ep_lock);
462 
463 	/* Notify the local controller about the event */
464 	skb = qrtr_alloc_ctrl_packet(&pkt);
465 	if (skb) {
466 		pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
467 		qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
468 	}
469 
470 	qrtr_node_release(node);
471 	ep->node = NULL;
472 }
473 EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
474 
475 /* Lookup socket by port.
476  *
477  * Callers must release with qrtr_port_put()
478  */
qrtr_port_lookup(int port)479 static struct qrtr_sock *qrtr_port_lookup(int port)
480 {
481 	struct qrtr_sock *ipc;
482 
483 	if (port == QRTR_PORT_CTRL)
484 		port = 0;
485 
486 	mutex_lock(&qrtr_port_lock);
487 	ipc = idr_find(&qrtr_ports, port);
488 	if (ipc)
489 		sock_hold(&ipc->sk);
490 	mutex_unlock(&qrtr_port_lock);
491 
492 	return ipc;
493 }
494 
495 /* Release acquired socket. */
qrtr_port_put(struct qrtr_sock * ipc)496 static void qrtr_port_put(struct qrtr_sock *ipc)
497 {
498 	sock_put(&ipc->sk);
499 }
500 
501 /* Remove port assignment. */
qrtr_port_remove(struct qrtr_sock * ipc)502 static void qrtr_port_remove(struct qrtr_sock *ipc)
503 {
504 	struct qrtr_ctrl_pkt *pkt;
505 	struct sk_buff *skb;
506 	int port = ipc->us.sq_port;
507 	struct sockaddr_qrtr to;
508 
509 	to.sq_family = AF_QIPCRTR;
510 	to.sq_node = QRTR_NODE_BCAST;
511 	to.sq_port = QRTR_PORT_CTRL;
512 
513 	skb = qrtr_alloc_ctrl_packet(&pkt);
514 	if (skb) {
515 		pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
516 		pkt->client.node = cpu_to_le32(ipc->us.sq_node);
517 		pkt->client.port = cpu_to_le32(ipc->us.sq_port);
518 
519 		skb_set_owner_w(skb, &ipc->sk);
520 		qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
521 				   &to);
522 	}
523 
524 	if (port == QRTR_PORT_CTRL)
525 		port = 0;
526 
527 	__sock_put(&ipc->sk);
528 
529 	mutex_lock(&qrtr_port_lock);
530 	idr_remove(&qrtr_ports, port);
531 	mutex_unlock(&qrtr_port_lock);
532 }
533 
534 /* Assign port number to socket.
535  *
536  * Specify port in the integer pointed to by port, and it will be adjusted
537  * on return as necesssary.
538  *
539  * Port may be:
540  *   0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
541  *   <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
542  *   >QRTR_MIN_EPH_SOCKET: Specified; available to all
543  */
qrtr_port_assign(struct qrtr_sock * ipc,int * port)544 static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
545 {
546 	int rc;
547 
548 	mutex_lock(&qrtr_port_lock);
549 	if (!*port) {
550 		rc = idr_alloc(&qrtr_ports, ipc,
551 			       QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
552 			       GFP_ATOMIC);
553 		if (rc >= 0)
554 			*port = rc;
555 	} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
556 		rc = -EACCES;
557 	} else if (*port == QRTR_PORT_CTRL) {
558 		rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
559 	} else {
560 		rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
561 		if (rc >= 0)
562 			*port = rc;
563 	}
564 	mutex_unlock(&qrtr_port_lock);
565 
566 	if (rc == -ENOSPC)
567 		return -EADDRINUSE;
568 	else if (rc < 0)
569 		return rc;
570 
571 	sock_hold(&ipc->sk);
572 
573 	return 0;
574 }
575 
576 /* Reset all non-control ports */
qrtr_reset_ports(void)577 static void qrtr_reset_ports(void)
578 {
579 	struct qrtr_sock *ipc;
580 	int id;
581 
582 	mutex_lock(&qrtr_port_lock);
583 	idr_for_each_entry(&qrtr_ports, ipc, id) {
584 		/* Don't reset control port */
585 		if (id == 0)
586 			continue;
587 
588 		sock_hold(&ipc->sk);
589 		ipc->sk.sk_err = ENETRESET;
590 		ipc->sk.sk_error_report(&ipc->sk);
591 		sock_put(&ipc->sk);
592 	}
593 	mutex_unlock(&qrtr_port_lock);
594 }
595 
596 /* Bind socket to address.
597  *
598  * Socket should be locked upon call.
599  */
__qrtr_bind(struct socket * sock,const struct sockaddr_qrtr * addr,int zapped)600 static int __qrtr_bind(struct socket *sock,
601 		       const struct sockaddr_qrtr *addr, int zapped)
602 {
603 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
604 	struct sock *sk = sock->sk;
605 	int port;
606 	int rc;
607 
608 	/* rebinding ok */
609 	if (!zapped && addr->sq_port == ipc->us.sq_port)
610 		return 0;
611 
612 	port = addr->sq_port;
613 	rc = qrtr_port_assign(ipc, &port);
614 	if (rc)
615 		return rc;
616 
617 	/* unbind previous, if any */
618 	if (!zapped)
619 		qrtr_port_remove(ipc);
620 	ipc->us.sq_port = port;
621 
622 	sock_reset_flag(sk, SOCK_ZAPPED);
623 
624 	/* Notify all open ports about the new controller */
625 	if (port == QRTR_PORT_CTRL)
626 		qrtr_reset_ports();
627 
628 	return 0;
629 }
630 
631 /* Auto bind to an ephemeral port. */
qrtr_autobind(struct socket * sock)632 static int qrtr_autobind(struct socket *sock)
633 {
634 	struct sock *sk = sock->sk;
635 	struct sockaddr_qrtr addr;
636 
637 	if (!sock_flag(sk, SOCK_ZAPPED))
638 		return 0;
639 
640 	addr.sq_family = AF_QIPCRTR;
641 	addr.sq_node = qrtr_local_nid;
642 	addr.sq_port = 0;
643 
644 	return __qrtr_bind(sock, &addr, 1);
645 }
646 
647 /* Bind socket to specified sockaddr. */
qrtr_bind(struct socket * sock,struct sockaddr * saddr,int len)648 static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
649 {
650 	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
651 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
652 	struct sock *sk = sock->sk;
653 	int rc;
654 
655 	if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
656 		return -EINVAL;
657 
658 	if (addr->sq_node != ipc->us.sq_node)
659 		return -EINVAL;
660 
661 	lock_sock(sk);
662 	rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
663 	release_sock(sk);
664 
665 	return rc;
666 }
667 
668 /* Queue packet to local peer socket. */
qrtr_local_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)669 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
670 			      int type, struct sockaddr_qrtr *from,
671 			      struct sockaddr_qrtr *to)
672 {
673 	struct qrtr_sock *ipc;
674 	struct qrtr_cb *cb;
675 
676 	ipc = qrtr_port_lookup(to->sq_port);
677 	if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
678 		kfree_skb(skb);
679 		return -ENODEV;
680 	}
681 
682 	cb = (struct qrtr_cb *)skb->cb;
683 	cb->src_node = from->sq_node;
684 	cb->src_port = from->sq_port;
685 
686 	if (sock_queue_rcv_skb(&ipc->sk, skb)) {
687 		qrtr_port_put(ipc);
688 		kfree_skb(skb);
689 		return -ENOSPC;
690 	}
691 
692 	qrtr_port_put(ipc);
693 
694 	return 0;
695 }
696 
697 /* Queue packet for broadcast. */
qrtr_bcast_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)698 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
699 			      int type, struct sockaddr_qrtr *from,
700 			      struct sockaddr_qrtr *to)
701 {
702 	struct sk_buff *skbn;
703 
704 	mutex_lock(&qrtr_node_lock);
705 	list_for_each_entry(node, &qrtr_all_nodes, item) {
706 		skbn = skb_clone(skb, GFP_KERNEL);
707 		if (!skbn)
708 			break;
709 		skb_set_owner_w(skbn, skb->sk);
710 		qrtr_node_enqueue(node, skbn, type, from, to);
711 	}
712 	mutex_unlock(&qrtr_node_lock);
713 
714 	qrtr_local_enqueue(node, skb, type, from, to);
715 
716 	return 0;
717 }
718 
qrtr_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)719 static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
720 {
721 	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
722 	int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
723 			  struct sockaddr_qrtr *, struct sockaddr_qrtr *);
724 	__le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
725 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
726 	struct sock *sk = sock->sk;
727 	struct qrtr_node *node;
728 	struct sk_buff *skb;
729 	size_t plen;
730 	u32 type;
731 	int rc;
732 
733 	if (msg->msg_flags & ~(MSG_DONTWAIT))
734 		return -EINVAL;
735 
736 	if (len > 65535)
737 		return -EMSGSIZE;
738 
739 	lock_sock(sk);
740 
741 	if (addr) {
742 		if (msg->msg_namelen < sizeof(*addr)) {
743 			release_sock(sk);
744 			return -EINVAL;
745 		}
746 
747 		if (addr->sq_family != AF_QIPCRTR) {
748 			release_sock(sk);
749 			return -EINVAL;
750 		}
751 
752 		rc = qrtr_autobind(sock);
753 		if (rc) {
754 			release_sock(sk);
755 			return rc;
756 		}
757 	} else if (sk->sk_state == TCP_ESTABLISHED) {
758 		addr = &ipc->peer;
759 	} else {
760 		release_sock(sk);
761 		return -ENOTCONN;
762 	}
763 
764 	node = NULL;
765 	if (addr->sq_node == QRTR_NODE_BCAST) {
766 		enqueue_fn = qrtr_bcast_enqueue;
767 		if (addr->sq_port != QRTR_PORT_CTRL) {
768 			release_sock(sk);
769 			return -ENOTCONN;
770 		}
771 	} else if (addr->sq_node == ipc->us.sq_node) {
772 		enqueue_fn = qrtr_local_enqueue;
773 	} else {
774 		enqueue_fn = qrtr_node_enqueue;
775 		node = qrtr_node_lookup(addr->sq_node);
776 		if (!node) {
777 			release_sock(sk);
778 			return -ECONNRESET;
779 		}
780 	}
781 
782 	plen = (len + 3) & ~3;
783 	skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
784 				  msg->msg_flags & MSG_DONTWAIT, &rc);
785 	if (!skb)
786 		goto out_node;
787 
788 	skb_reserve(skb, QRTR_HDR_MAX_SIZE);
789 
790 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
791 	if (rc) {
792 		kfree_skb(skb);
793 		goto out_node;
794 	}
795 
796 	if (ipc->us.sq_port == QRTR_PORT_CTRL) {
797 		if (len < 4) {
798 			rc = -EINVAL;
799 			kfree_skb(skb);
800 			goto out_node;
801 		}
802 
803 		/* control messages already require the type as 'command' */
804 		skb_copy_bits(skb, 0, &qrtr_type, 4);
805 	}
806 
807 	type = le32_to_cpu(qrtr_type);
808 	rc = enqueue_fn(node, skb, type, &ipc->us, addr);
809 	if (rc >= 0)
810 		rc = len;
811 
812 out_node:
813 	qrtr_node_release(node);
814 	release_sock(sk);
815 
816 	return rc;
817 }
818 
qrtr_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)819 static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
820 			size_t size, int flags)
821 {
822 	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
823 	struct sock *sk = sock->sk;
824 	struct sk_buff *skb;
825 	struct qrtr_cb *cb;
826 	int copied, rc;
827 
828 	lock_sock(sk);
829 
830 	if (sock_flag(sk, SOCK_ZAPPED)) {
831 		release_sock(sk);
832 		return -EADDRNOTAVAIL;
833 	}
834 
835 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
836 				flags & MSG_DONTWAIT, &rc);
837 	if (!skb) {
838 		release_sock(sk);
839 		return rc;
840 	}
841 
842 	copied = skb->len;
843 	if (copied > size) {
844 		copied = size;
845 		msg->msg_flags |= MSG_TRUNC;
846 	}
847 
848 	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
849 	if (rc < 0)
850 		goto out;
851 	rc = copied;
852 
853 	if (addr) {
854 		cb = (struct qrtr_cb *)skb->cb;
855 		addr->sq_family = AF_QIPCRTR;
856 		addr->sq_node = cb->src_node;
857 		addr->sq_port = cb->src_port;
858 		msg->msg_namelen = sizeof(*addr);
859 	}
860 
861 out:
862 	skb_free_datagram(sk, skb);
863 	release_sock(sk);
864 
865 	return rc;
866 }
867 
qrtr_connect(struct socket * sock,struct sockaddr * saddr,int len,int flags)868 static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
869 			int len, int flags)
870 {
871 	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
872 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
873 	struct sock *sk = sock->sk;
874 	int rc;
875 
876 	if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
877 		return -EINVAL;
878 
879 	lock_sock(sk);
880 
881 	sk->sk_state = TCP_CLOSE;
882 	sock->state = SS_UNCONNECTED;
883 
884 	rc = qrtr_autobind(sock);
885 	if (rc) {
886 		release_sock(sk);
887 		return rc;
888 	}
889 
890 	ipc->peer = *addr;
891 	sock->state = SS_CONNECTED;
892 	sk->sk_state = TCP_ESTABLISHED;
893 
894 	release_sock(sk);
895 
896 	return 0;
897 }
898 
qrtr_getname(struct socket * sock,struct sockaddr * saddr,int peer)899 static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
900 			int peer)
901 {
902 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
903 	struct sockaddr_qrtr qaddr;
904 	struct sock *sk = sock->sk;
905 
906 	lock_sock(sk);
907 	if (peer) {
908 		if (sk->sk_state != TCP_ESTABLISHED) {
909 			release_sock(sk);
910 			return -ENOTCONN;
911 		}
912 
913 		qaddr = ipc->peer;
914 	} else {
915 		qaddr = ipc->us;
916 	}
917 	release_sock(sk);
918 
919 	qaddr.sq_family = AF_QIPCRTR;
920 
921 	memcpy(saddr, &qaddr, sizeof(qaddr));
922 
923 	return sizeof(qaddr);
924 }
925 
qrtr_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)926 static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
927 {
928 	void __user *argp = (void __user *)arg;
929 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
930 	struct sock *sk = sock->sk;
931 	struct sockaddr_qrtr *sq;
932 	struct sk_buff *skb;
933 	struct ifreq ifr;
934 	long len = 0;
935 	int rc = 0;
936 
937 	lock_sock(sk);
938 
939 	switch (cmd) {
940 	case TIOCOUTQ:
941 		len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
942 		if (len < 0)
943 			len = 0;
944 		rc = put_user(len, (int __user *)argp);
945 		break;
946 	case TIOCINQ:
947 		skb = skb_peek(&sk->sk_receive_queue);
948 		if (skb)
949 			len = skb->len;
950 		rc = put_user(len, (int __user *)argp);
951 		break;
952 	case SIOCGIFADDR:
953 		if (copy_from_user(&ifr, argp, sizeof(ifr))) {
954 			rc = -EFAULT;
955 			break;
956 		}
957 
958 		sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
959 		*sq = ipc->us;
960 		if (copy_to_user(argp, &ifr, sizeof(ifr))) {
961 			rc = -EFAULT;
962 			break;
963 		}
964 		break;
965 	case SIOCADDRT:
966 	case SIOCDELRT:
967 	case SIOCSIFADDR:
968 	case SIOCGIFDSTADDR:
969 	case SIOCSIFDSTADDR:
970 	case SIOCGIFBRDADDR:
971 	case SIOCSIFBRDADDR:
972 	case SIOCGIFNETMASK:
973 	case SIOCSIFNETMASK:
974 		rc = -EINVAL;
975 		break;
976 	default:
977 		rc = -ENOIOCTLCMD;
978 		break;
979 	}
980 
981 	release_sock(sk);
982 
983 	return rc;
984 }
985 
qrtr_release(struct socket * sock)986 static int qrtr_release(struct socket *sock)
987 {
988 	struct sock *sk = sock->sk;
989 	struct qrtr_sock *ipc;
990 
991 	if (!sk)
992 		return 0;
993 
994 	lock_sock(sk);
995 
996 	ipc = qrtr_sk(sk);
997 	sk->sk_shutdown = SHUTDOWN_MASK;
998 	if (!sock_flag(sk, SOCK_DEAD))
999 		sk->sk_state_change(sk);
1000 
1001 	sock_set_flag(sk, SOCK_DEAD);
1002 	sock->sk = NULL;
1003 
1004 	if (!sock_flag(sk, SOCK_ZAPPED))
1005 		qrtr_port_remove(ipc);
1006 
1007 	skb_queue_purge(&sk->sk_receive_queue);
1008 
1009 	release_sock(sk);
1010 	sock_put(sk);
1011 
1012 	return 0;
1013 }
1014 
1015 static const struct proto_ops qrtr_proto_ops = {
1016 	.owner		= THIS_MODULE,
1017 	.family		= AF_QIPCRTR,
1018 	.bind		= qrtr_bind,
1019 	.connect	= qrtr_connect,
1020 	.socketpair	= sock_no_socketpair,
1021 	.accept		= sock_no_accept,
1022 	.listen		= sock_no_listen,
1023 	.sendmsg	= qrtr_sendmsg,
1024 	.recvmsg	= qrtr_recvmsg,
1025 	.getname	= qrtr_getname,
1026 	.ioctl		= qrtr_ioctl,
1027 	.gettstamp	= sock_gettstamp,
1028 	.poll		= datagram_poll,
1029 	.shutdown	= sock_no_shutdown,
1030 	.setsockopt	= sock_no_setsockopt,
1031 	.getsockopt	= sock_no_getsockopt,
1032 	.release	= qrtr_release,
1033 	.mmap		= sock_no_mmap,
1034 	.sendpage	= sock_no_sendpage,
1035 };
1036 
1037 static struct proto qrtr_proto = {
1038 	.name		= "QIPCRTR",
1039 	.owner		= THIS_MODULE,
1040 	.obj_size	= sizeof(struct qrtr_sock),
1041 };
1042 
qrtr_create(struct net * net,struct socket * sock,int protocol,int kern)1043 static int qrtr_create(struct net *net, struct socket *sock,
1044 		       int protocol, int kern)
1045 {
1046 	struct qrtr_sock *ipc;
1047 	struct sock *sk;
1048 
1049 	if (sock->type != SOCK_DGRAM)
1050 		return -EPROTOTYPE;
1051 
1052 	sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
1053 	if (!sk)
1054 		return -ENOMEM;
1055 
1056 	sock_set_flag(sk, SOCK_ZAPPED);
1057 
1058 	sock_init_data(sock, sk);
1059 	sock->ops = &qrtr_proto_ops;
1060 
1061 	ipc = qrtr_sk(sk);
1062 	ipc->us.sq_family = AF_QIPCRTR;
1063 	ipc->us.sq_node = qrtr_local_nid;
1064 	ipc->us.sq_port = 0;
1065 
1066 	return 0;
1067 }
1068 
1069 static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
1070 	[IFA_LOCAL] = { .type = NLA_U32 },
1071 };
1072 
qrtr_addr_doit(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1073 static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1074 			  struct netlink_ext_ack *extack)
1075 {
1076 	struct nlattr *tb[IFA_MAX + 1];
1077 	struct ifaddrmsg *ifm;
1078 	int rc;
1079 
1080 	if (!netlink_capable(skb, CAP_NET_ADMIN))
1081 		return -EPERM;
1082 
1083 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
1084 		return -EPERM;
1085 
1086 	ASSERT_RTNL();
1087 
1088 	rc = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
1089 				    qrtr_policy, extack);
1090 	if (rc < 0)
1091 		return rc;
1092 
1093 	ifm = nlmsg_data(nlh);
1094 	if (!tb[IFA_LOCAL])
1095 		return -EINVAL;
1096 
1097 	qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
1098 	return 0;
1099 }
1100 
1101 static const struct net_proto_family qrtr_family = {
1102 	.owner	= THIS_MODULE,
1103 	.family	= AF_QIPCRTR,
1104 	.create	= qrtr_create,
1105 };
1106 
qrtr_proto_init(void)1107 static int __init qrtr_proto_init(void)
1108 {
1109 	int rc;
1110 
1111 	rc = proto_register(&qrtr_proto, 1);
1112 	if (rc)
1113 		return rc;
1114 
1115 	rc = sock_register(&qrtr_family);
1116 	if (rc) {
1117 		proto_unregister(&qrtr_proto);
1118 		return rc;
1119 	}
1120 
1121 	rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
1122 	if (rc) {
1123 		sock_unregister(qrtr_family.family);
1124 		proto_unregister(&qrtr_proto);
1125 	}
1126 
1127 	return rc;
1128 }
1129 postcore_initcall(qrtr_proto_init);
1130 
qrtr_proto_fini(void)1131 static void __exit qrtr_proto_fini(void)
1132 {
1133 	rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
1134 	sock_unregister(qrtr_family.family);
1135 	proto_unregister(&qrtr_proto);
1136 }
1137 module_exit(qrtr_proto_fini);
1138 
1139 MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1140 MODULE_LICENSE("GPL v2");
1141 MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
1142