• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Connection Multiplexor
4  *
5  * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6  */
7 
8 #include <linux/bpf.h>
9 #include <linux/errno.h>
10 #include <linux/errqueue.h>
11 #include <linux/file.h>
12 #include <linux/in.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/netdevice.h>
17 #include <linux/poll.h>
18 #include <linux/rculist.h>
19 #include <linux/skbuff.h>
20 #include <linux/socket.h>
21 #include <linux/uaccess.h>
22 #include <linux/workqueue.h>
23 #include <linux/syscalls.h>
24 #include <linux/sched/signal.h>
25 
26 #include <net/kcm.h>
27 #include <net/netns/generic.h>
28 #include <net/sock.h>
29 #include <uapi/linux/kcm.h>
30 
31 unsigned int kcm_net_id;
32 
33 static struct kmem_cache *kcm_psockp __read_mostly;
34 static struct kmem_cache *kcm_muxp __read_mostly;
35 static struct workqueue_struct *kcm_wq;
36 
kcm_sk(const struct sock * sk)37 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
38 {
39 	return (struct kcm_sock *)sk;
40 }
41 
kcm_tx_msg(struct sk_buff * skb)42 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
43 {
44 	return (struct kcm_tx_msg *)skb->cb;
45 }
46 
report_csk_error(struct sock * csk,int err)47 static void report_csk_error(struct sock *csk, int err)
48 {
49 	csk->sk_err = EPIPE;
50 	sk_error_report(csk);
51 }
52 
kcm_abort_tx_psock(struct kcm_psock * psock,int err,bool wakeup_kcm)53 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
54 			       bool wakeup_kcm)
55 {
56 	struct sock *csk = psock->sk;
57 	struct kcm_mux *mux = psock->mux;
58 
59 	/* Unrecoverable error in transmit */
60 
61 	spin_lock_bh(&mux->lock);
62 
63 	if (psock->tx_stopped) {
64 		spin_unlock_bh(&mux->lock);
65 		return;
66 	}
67 
68 	psock->tx_stopped = 1;
69 	KCM_STATS_INCR(psock->stats.tx_aborts);
70 
71 	if (!psock->tx_kcm) {
72 		/* Take off psocks_avail list */
73 		list_del(&psock->psock_avail_list);
74 	} else if (wakeup_kcm) {
75 		/* In this case psock is being aborted while outside of
76 		 * write_msgs and psock is reserved. Schedule tx_work
77 		 * to handle the failure there. Need to commit tx_stopped
78 		 * before queuing work.
79 		 */
80 		smp_mb();
81 
82 		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
83 	}
84 
85 	spin_unlock_bh(&mux->lock);
86 
87 	/* Report error on lower socket */
88 	report_csk_error(csk, err);
89 }
90 
91 /* RX mux lock held. */
kcm_update_rx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)92 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
93 				    struct kcm_psock *psock)
94 {
95 	STRP_STATS_ADD(mux->stats.rx_bytes,
96 		       psock->strp.stats.bytes -
97 		       psock->saved_rx_bytes);
98 	mux->stats.rx_msgs +=
99 		psock->strp.stats.msgs - psock->saved_rx_msgs;
100 	psock->saved_rx_msgs = psock->strp.stats.msgs;
101 	psock->saved_rx_bytes = psock->strp.stats.bytes;
102 }
103 
kcm_update_tx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)104 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
105 				    struct kcm_psock *psock)
106 {
107 	KCM_STATS_ADD(mux->stats.tx_bytes,
108 		      psock->stats.tx_bytes - psock->saved_tx_bytes);
109 	mux->stats.tx_msgs +=
110 		psock->stats.tx_msgs - psock->saved_tx_msgs;
111 	psock->saved_tx_msgs = psock->stats.tx_msgs;
112 	psock->saved_tx_bytes = psock->stats.tx_bytes;
113 }
114 
115 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116 
117 /* KCM is ready to receive messages on its queue-- either the KCM is new or
118  * has become unblocked after being blocked on full socket buffer. Queue any
119  * pending ready messages on a psock. RX mux lock held.
120  */
kcm_rcv_ready(struct kcm_sock * kcm)121 static void kcm_rcv_ready(struct kcm_sock *kcm)
122 {
123 	struct kcm_mux *mux = kcm->mux;
124 	struct kcm_psock *psock;
125 	struct sk_buff *skb;
126 
127 	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
128 		return;
129 
130 	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
131 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
132 			/* Assuming buffer limit has been reached */
133 			skb_queue_head(&mux->rx_hold_queue, skb);
134 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
135 			return;
136 		}
137 	}
138 
139 	while (!list_empty(&mux->psocks_ready)) {
140 		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
141 					 psock_ready_list);
142 
143 		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
144 			/* Assuming buffer limit has been reached */
145 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
146 			return;
147 		}
148 
149 		/* Consumed the ready message on the psock. Schedule rx_work to
150 		 * get more messages.
151 		 */
152 		list_del(&psock->psock_ready_list);
153 		psock->ready_rx_msg = NULL;
154 		/* Commit clearing of ready_rx_msg for queuing work */
155 		smp_mb();
156 
157 		strp_unpause(&psock->strp);
158 		strp_check_rcv(&psock->strp);
159 	}
160 
161 	/* Buffer limit is okay now, add to ready list */
162 	list_add_tail(&kcm->wait_rx_list,
163 		      &kcm->mux->kcm_rx_waiters);
164 	/* paired with lockless reads in kcm_rfree() */
165 	WRITE_ONCE(kcm->rx_wait, true);
166 }
167 
kcm_rfree(struct sk_buff * skb)168 static void kcm_rfree(struct sk_buff *skb)
169 {
170 	struct sock *sk = skb->sk;
171 	struct kcm_sock *kcm = kcm_sk(sk);
172 	struct kcm_mux *mux = kcm->mux;
173 	unsigned int len = skb->truesize;
174 
175 	sk_mem_uncharge(sk, len);
176 	atomic_sub(len, &sk->sk_rmem_alloc);
177 
178 	/* For reading rx_wait and rx_psock without holding lock */
179 	smp_mb__after_atomic();
180 
181 	if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
182 	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
183 		spin_lock_bh(&mux->rx_lock);
184 		kcm_rcv_ready(kcm);
185 		spin_unlock_bh(&mux->rx_lock);
186 	}
187 }
188 
kcm_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)189 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
190 {
191 	struct sk_buff_head *list = &sk->sk_receive_queue;
192 
193 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
194 		return -ENOMEM;
195 
196 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
197 		return -ENOBUFS;
198 
199 	skb->dev = NULL;
200 
201 	skb_orphan(skb);
202 	skb->sk = sk;
203 	skb->destructor = kcm_rfree;
204 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
205 	sk_mem_charge(sk, skb->truesize);
206 
207 	skb_queue_tail(list, skb);
208 
209 	if (!sock_flag(sk, SOCK_DEAD))
210 		sk->sk_data_ready(sk);
211 
212 	return 0;
213 }
214 
215 /* Requeue received messages for a kcm socket to other kcm sockets. This is
216  * called with a kcm socket is receive disabled.
217  * RX mux lock held.
218  */
requeue_rx_msgs(struct kcm_mux * mux,struct sk_buff_head * head)219 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
220 {
221 	struct sk_buff *skb;
222 	struct kcm_sock *kcm;
223 
224 	while ((skb = skb_dequeue(head))) {
225 		/* Reset destructor to avoid calling kcm_rcv_ready */
226 		skb->destructor = sock_rfree;
227 		skb_orphan(skb);
228 try_again:
229 		if (list_empty(&mux->kcm_rx_waiters)) {
230 			skb_queue_tail(&mux->rx_hold_queue, skb);
231 			continue;
232 		}
233 
234 		kcm = list_first_entry(&mux->kcm_rx_waiters,
235 				       struct kcm_sock, wait_rx_list);
236 
237 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
238 			/* Should mean socket buffer full */
239 			list_del(&kcm->wait_rx_list);
240 			/* paired with lockless reads in kcm_rfree() */
241 			WRITE_ONCE(kcm->rx_wait, false);
242 
243 			/* Commit rx_wait to read in kcm_free */
244 			smp_wmb();
245 
246 			goto try_again;
247 		}
248 	}
249 }
250 
251 /* Lower sock lock held */
reserve_rx_kcm(struct kcm_psock * psock,struct sk_buff * head)252 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
253 				       struct sk_buff *head)
254 {
255 	struct kcm_mux *mux = psock->mux;
256 	struct kcm_sock *kcm;
257 
258 	WARN_ON(psock->ready_rx_msg);
259 
260 	if (psock->rx_kcm)
261 		return psock->rx_kcm;
262 
263 	spin_lock_bh(&mux->rx_lock);
264 
265 	if (psock->rx_kcm) {
266 		spin_unlock_bh(&mux->rx_lock);
267 		return psock->rx_kcm;
268 	}
269 
270 	kcm_update_rx_mux_stats(mux, psock);
271 
272 	if (list_empty(&mux->kcm_rx_waiters)) {
273 		psock->ready_rx_msg = head;
274 		strp_pause(&psock->strp);
275 		list_add_tail(&psock->psock_ready_list,
276 			      &mux->psocks_ready);
277 		spin_unlock_bh(&mux->rx_lock);
278 		return NULL;
279 	}
280 
281 	kcm = list_first_entry(&mux->kcm_rx_waiters,
282 			       struct kcm_sock, wait_rx_list);
283 	list_del(&kcm->wait_rx_list);
284 	/* paired with lockless reads in kcm_rfree() */
285 	WRITE_ONCE(kcm->rx_wait, false);
286 
287 	psock->rx_kcm = kcm;
288 	/* paired with lockless reads in kcm_rfree() */
289 	WRITE_ONCE(kcm->rx_psock, psock);
290 
291 	spin_unlock_bh(&mux->rx_lock);
292 
293 	return kcm;
294 }
295 
296 static void kcm_done(struct kcm_sock *kcm);
297 
kcm_done_work(struct work_struct * w)298 static void kcm_done_work(struct work_struct *w)
299 {
300 	kcm_done(container_of(w, struct kcm_sock, done_work));
301 }
302 
303 /* Lower sock held */
unreserve_rx_kcm(struct kcm_psock * psock,bool rcv_ready)304 static void unreserve_rx_kcm(struct kcm_psock *psock,
305 			     bool rcv_ready)
306 {
307 	struct kcm_sock *kcm = psock->rx_kcm;
308 	struct kcm_mux *mux = psock->mux;
309 
310 	if (!kcm)
311 		return;
312 
313 	spin_lock_bh(&mux->rx_lock);
314 
315 	psock->rx_kcm = NULL;
316 	/* paired with lockless reads in kcm_rfree() */
317 	WRITE_ONCE(kcm->rx_psock, NULL);
318 
319 	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
320 	 * kcm_rfree
321 	 */
322 	smp_mb();
323 
324 	if (unlikely(kcm->done)) {
325 		spin_unlock_bh(&mux->rx_lock);
326 
327 		/* Need to run kcm_done in a task since we need to qcquire
328 		 * callback locks which may already be held here.
329 		 */
330 		INIT_WORK(&kcm->done_work, kcm_done_work);
331 		schedule_work(&kcm->done_work);
332 		return;
333 	}
334 
335 	if (unlikely(kcm->rx_disabled)) {
336 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
337 	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
338 		/* Check for degenerative race with rx_wait that all
339 		 * data was dequeued (accounted for in kcm_rfree).
340 		 */
341 		kcm_rcv_ready(kcm);
342 	}
343 	spin_unlock_bh(&mux->rx_lock);
344 }
345 
346 /* Lower sock lock held */
psock_data_ready(struct sock * sk)347 static void psock_data_ready(struct sock *sk)
348 {
349 	struct kcm_psock *psock;
350 
351 	read_lock_bh(&sk->sk_callback_lock);
352 
353 	psock = (struct kcm_psock *)sk->sk_user_data;
354 	if (likely(psock))
355 		strp_data_ready(&psock->strp);
356 
357 	read_unlock_bh(&sk->sk_callback_lock);
358 }
359 
360 /* Called with lower sock held */
kcm_rcv_strparser(struct strparser * strp,struct sk_buff * skb)361 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
362 {
363 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
364 	struct kcm_sock *kcm;
365 
366 try_queue:
367 	kcm = reserve_rx_kcm(psock, skb);
368 	if (!kcm) {
369 		 /* Unable to reserve a KCM, message is held in psock and strp
370 		  * is paused.
371 		  */
372 		return;
373 	}
374 
375 	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
376 		/* Should mean socket buffer full */
377 		unreserve_rx_kcm(psock, false);
378 		goto try_queue;
379 	}
380 }
381 
kcm_parse_func_strparser(struct strparser * strp,struct sk_buff * skb)382 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
383 {
384 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
385 	struct bpf_prog *prog = psock->bpf_prog;
386 	int res;
387 
388 	res = bpf_prog_run_pin_on_cpu(prog, skb);
389 	return res;
390 }
391 
kcm_read_sock_done(struct strparser * strp,int err)392 static int kcm_read_sock_done(struct strparser *strp, int err)
393 {
394 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
395 
396 	unreserve_rx_kcm(psock, true);
397 
398 	return err;
399 }
400 
psock_state_change(struct sock * sk)401 static void psock_state_change(struct sock *sk)
402 {
403 	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
404 	 * since application will normally not poll with EPOLLIN
405 	 * on the TCP sockets.
406 	 */
407 
408 	report_csk_error(sk, EPIPE);
409 }
410 
psock_write_space(struct sock * sk)411 static void psock_write_space(struct sock *sk)
412 {
413 	struct kcm_psock *psock;
414 	struct kcm_mux *mux;
415 	struct kcm_sock *kcm;
416 
417 	read_lock_bh(&sk->sk_callback_lock);
418 
419 	psock = (struct kcm_psock *)sk->sk_user_data;
420 	if (unlikely(!psock))
421 		goto out;
422 	mux = psock->mux;
423 
424 	spin_lock_bh(&mux->lock);
425 
426 	/* Check if the socket is reserved so someone is waiting for sending. */
427 	kcm = psock->tx_kcm;
428 	if (kcm && !unlikely(kcm->tx_stopped))
429 		queue_work(kcm_wq, &kcm->tx_work);
430 
431 	spin_unlock_bh(&mux->lock);
432 out:
433 	read_unlock_bh(&sk->sk_callback_lock);
434 }
435 
436 static void unreserve_psock(struct kcm_sock *kcm);
437 
438 /* kcm sock is locked. */
reserve_psock(struct kcm_sock * kcm)439 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
440 {
441 	struct kcm_mux *mux = kcm->mux;
442 	struct kcm_psock *psock;
443 
444 	psock = kcm->tx_psock;
445 
446 	smp_rmb(); /* Must read tx_psock before tx_wait */
447 
448 	if (psock) {
449 		WARN_ON(kcm->tx_wait);
450 		if (unlikely(psock->tx_stopped))
451 			unreserve_psock(kcm);
452 		else
453 			return kcm->tx_psock;
454 	}
455 
456 	spin_lock_bh(&mux->lock);
457 
458 	/* Check again under lock to see if psock was reserved for this
459 	 * psock via psock_unreserve.
460 	 */
461 	psock = kcm->tx_psock;
462 	if (unlikely(psock)) {
463 		WARN_ON(kcm->tx_wait);
464 		spin_unlock_bh(&mux->lock);
465 		return kcm->tx_psock;
466 	}
467 
468 	if (!list_empty(&mux->psocks_avail)) {
469 		psock = list_first_entry(&mux->psocks_avail,
470 					 struct kcm_psock,
471 					 psock_avail_list);
472 		list_del(&psock->psock_avail_list);
473 		if (kcm->tx_wait) {
474 			list_del(&kcm->wait_psock_list);
475 			kcm->tx_wait = false;
476 		}
477 		kcm->tx_psock = psock;
478 		psock->tx_kcm = kcm;
479 		KCM_STATS_INCR(psock->stats.reserved);
480 	} else if (!kcm->tx_wait) {
481 		list_add_tail(&kcm->wait_psock_list,
482 			      &mux->kcm_tx_waiters);
483 		kcm->tx_wait = true;
484 	}
485 
486 	spin_unlock_bh(&mux->lock);
487 
488 	return psock;
489 }
490 
491 /* mux lock held */
psock_now_avail(struct kcm_psock * psock)492 static void psock_now_avail(struct kcm_psock *psock)
493 {
494 	struct kcm_mux *mux = psock->mux;
495 	struct kcm_sock *kcm;
496 
497 	if (list_empty(&mux->kcm_tx_waiters)) {
498 		list_add_tail(&psock->psock_avail_list,
499 			      &mux->psocks_avail);
500 	} else {
501 		kcm = list_first_entry(&mux->kcm_tx_waiters,
502 				       struct kcm_sock,
503 				       wait_psock_list);
504 		list_del(&kcm->wait_psock_list);
505 		kcm->tx_wait = false;
506 		psock->tx_kcm = kcm;
507 
508 		/* Commit before changing tx_psock since that is read in
509 		 * reserve_psock before queuing work.
510 		 */
511 		smp_mb();
512 
513 		kcm->tx_psock = psock;
514 		KCM_STATS_INCR(psock->stats.reserved);
515 		queue_work(kcm_wq, &kcm->tx_work);
516 	}
517 }
518 
519 /* kcm sock is locked. */
unreserve_psock(struct kcm_sock * kcm)520 static void unreserve_psock(struct kcm_sock *kcm)
521 {
522 	struct kcm_psock *psock;
523 	struct kcm_mux *mux = kcm->mux;
524 
525 	spin_lock_bh(&mux->lock);
526 
527 	psock = kcm->tx_psock;
528 
529 	if (WARN_ON(!psock)) {
530 		spin_unlock_bh(&mux->lock);
531 		return;
532 	}
533 
534 	smp_rmb(); /* Read tx_psock before tx_wait */
535 
536 	kcm_update_tx_mux_stats(mux, psock);
537 
538 	WARN_ON(kcm->tx_wait);
539 
540 	kcm->tx_psock = NULL;
541 	psock->tx_kcm = NULL;
542 	KCM_STATS_INCR(psock->stats.unreserved);
543 
544 	if (unlikely(psock->tx_stopped)) {
545 		if (psock->done) {
546 			/* Deferred free */
547 			list_del(&psock->psock_list);
548 			mux->psocks_cnt--;
549 			sock_put(psock->sk);
550 			fput(psock->sk->sk_socket->file);
551 			kmem_cache_free(kcm_psockp, psock);
552 		}
553 
554 		/* Don't put back on available list */
555 
556 		spin_unlock_bh(&mux->lock);
557 
558 		return;
559 	}
560 
561 	psock_now_avail(psock);
562 
563 	spin_unlock_bh(&mux->lock);
564 }
565 
kcm_report_tx_retry(struct kcm_sock * kcm)566 static void kcm_report_tx_retry(struct kcm_sock *kcm)
567 {
568 	struct kcm_mux *mux = kcm->mux;
569 
570 	spin_lock_bh(&mux->lock);
571 	KCM_STATS_INCR(mux->stats.tx_retries);
572 	spin_unlock_bh(&mux->lock);
573 }
574 
575 /* Write any messages ready on the kcm socket.  Called with kcm sock lock
576  * held.  Return bytes actually sent or error.
577  */
kcm_write_msgs(struct kcm_sock * kcm)578 static int kcm_write_msgs(struct kcm_sock *kcm)
579 {
580 	struct sock *sk = &kcm->sk;
581 	struct kcm_psock *psock;
582 	struct sk_buff *skb, *head;
583 	struct kcm_tx_msg *txm;
584 	unsigned short fragidx, frag_offset;
585 	unsigned int sent, total_sent = 0;
586 	int ret = 0;
587 
588 	kcm->tx_wait_more = false;
589 	psock = kcm->tx_psock;
590 	if (unlikely(psock && psock->tx_stopped)) {
591 		/* A reserved psock was aborted asynchronously. Unreserve
592 		 * it and we'll retry the message.
593 		 */
594 		unreserve_psock(kcm);
595 		kcm_report_tx_retry(kcm);
596 		if (skb_queue_empty(&sk->sk_write_queue))
597 			return 0;
598 
599 		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
600 
601 	} else if (skb_queue_empty(&sk->sk_write_queue)) {
602 		return 0;
603 	}
604 
605 	head = skb_peek(&sk->sk_write_queue);
606 	txm = kcm_tx_msg(head);
607 
608 	if (txm->sent) {
609 		/* Send of first skbuff in queue already in progress */
610 		if (WARN_ON(!psock)) {
611 			ret = -EINVAL;
612 			goto out;
613 		}
614 		sent = txm->sent;
615 		frag_offset = txm->frag_offset;
616 		fragidx = txm->fragidx;
617 		skb = txm->frag_skb;
618 
619 		goto do_frag;
620 	}
621 
622 try_again:
623 	psock = reserve_psock(kcm);
624 	if (!psock)
625 		goto out;
626 
627 	do {
628 		skb = head;
629 		txm = kcm_tx_msg(head);
630 		sent = 0;
631 
632 do_frag_list:
633 		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
634 			ret = -EINVAL;
635 			goto out;
636 		}
637 
638 		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
639 		     fragidx++) {
640 			skb_frag_t *frag;
641 
642 			frag_offset = 0;
643 do_frag:
644 			frag = &skb_shinfo(skb)->frags[fragidx];
645 			if (WARN_ON(!skb_frag_size(frag))) {
646 				ret = -EINVAL;
647 				goto out;
648 			}
649 
650 			ret = kernel_sendpage(psock->sk->sk_socket,
651 					      skb_frag_page(frag),
652 					      skb_frag_off(frag) + frag_offset,
653 					      skb_frag_size(frag) - frag_offset,
654 					      MSG_DONTWAIT);
655 			if (ret <= 0) {
656 				if (ret == -EAGAIN) {
657 					/* Save state to try again when there's
658 					 * write space on the socket
659 					 */
660 					txm->sent = sent;
661 					txm->frag_offset = frag_offset;
662 					txm->fragidx = fragidx;
663 					txm->frag_skb = skb;
664 
665 					ret = 0;
666 					goto out;
667 				}
668 
669 				/* Hard failure in sending message, abort this
670 				 * psock since it has lost framing
671 				 * synchronization and retry sending the
672 				 * message from the beginning.
673 				 */
674 				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
675 						   true);
676 				unreserve_psock(kcm);
677 
678 				txm->sent = 0;
679 				kcm_report_tx_retry(kcm);
680 				ret = 0;
681 
682 				goto try_again;
683 			}
684 
685 			sent += ret;
686 			frag_offset += ret;
687 			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
688 			if (frag_offset < skb_frag_size(frag)) {
689 				/* Not finished with this frag */
690 				goto do_frag;
691 			}
692 		}
693 
694 		if (skb == head) {
695 			if (skb_has_frag_list(skb)) {
696 				skb = skb_shinfo(skb)->frag_list;
697 				goto do_frag_list;
698 			}
699 		} else if (skb->next) {
700 			skb = skb->next;
701 			goto do_frag_list;
702 		}
703 
704 		/* Successfully sent the whole packet, account for it. */
705 		skb_dequeue(&sk->sk_write_queue);
706 		kfree_skb(head);
707 		sk->sk_wmem_queued -= sent;
708 		total_sent += sent;
709 		KCM_STATS_INCR(psock->stats.tx_msgs);
710 	} while ((head = skb_peek(&sk->sk_write_queue)));
711 out:
712 	if (!head) {
713 		/* Done with all queued messages. */
714 		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
715 		unreserve_psock(kcm);
716 	}
717 
718 	/* Check if write space is available */
719 	sk->sk_write_space(sk);
720 
721 	return total_sent ? : ret;
722 }
723 
kcm_tx_work(struct work_struct * w)724 static void kcm_tx_work(struct work_struct *w)
725 {
726 	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
727 	struct sock *sk = &kcm->sk;
728 	int err;
729 
730 	lock_sock(sk);
731 
732 	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
733 	 * aborts
734 	 */
735 	err = kcm_write_msgs(kcm);
736 	if (err < 0) {
737 		/* Hard failure in write, report error on KCM socket */
738 		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
739 		report_csk_error(&kcm->sk, -err);
740 		goto out;
741 	}
742 
743 	/* Primarily for SOCK_SEQPACKET sockets */
744 	if (likely(sk->sk_socket) &&
745 	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
746 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
747 		sk->sk_write_space(sk);
748 	}
749 
750 out:
751 	release_sock(sk);
752 }
753 
kcm_push(struct kcm_sock * kcm)754 static void kcm_push(struct kcm_sock *kcm)
755 {
756 	if (kcm->tx_wait_more)
757 		kcm_write_msgs(kcm);
758 }
759 
kcm_sendpage(struct socket * sock,struct page * page,int offset,size_t size,int flags)760 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
761 			    int offset, size_t size, int flags)
762 
763 {
764 	struct sock *sk = sock->sk;
765 	struct kcm_sock *kcm = kcm_sk(sk);
766 	struct sk_buff *skb = NULL, *head = NULL;
767 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
768 	bool eor;
769 	int err = 0;
770 	int i;
771 
772 	if (flags & MSG_SENDPAGE_NOTLAST)
773 		flags |= MSG_MORE;
774 
775 	/* No MSG_EOR from splice, only look at MSG_MORE */
776 	eor = !(flags & MSG_MORE);
777 
778 	lock_sock(sk);
779 
780 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
781 
782 	err = -EPIPE;
783 	if (sk->sk_err)
784 		goto out_error;
785 
786 	if (kcm->seq_skb) {
787 		/* Previously opened message */
788 		head = kcm->seq_skb;
789 		skb = kcm_tx_msg(head)->last_skb;
790 		i = skb_shinfo(skb)->nr_frags;
791 
792 		if (skb_can_coalesce(skb, i, page, offset)) {
793 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
794 			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
795 			goto coalesced;
796 		}
797 
798 		if (i >= MAX_SKB_FRAGS) {
799 			struct sk_buff *tskb;
800 
801 			tskb = alloc_skb(0, sk->sk_allocation);
802 			while (!tskb) {
803 				kcm_push(kcm);
804 				err = sk_stream_wait_memory(sk, &timeo);
805 				if (err)
806 					goto out_error;
807 			}
808 
809 			if (head == skb)
810 				skb_shinfo(head)->frag_list = tskb;
811 			else
812 				skb->next = tskb;
813 
814 			skb = tskb;
815 			skb->ip_summed = CHECKSUM_UNNECESSARY;
816 			i = 0;
817 		}
818 	} else {
819 		/* Call the sk_stream functions to manage the sndbuf mem. */
820 		if (!sk_stream_memory_free(sk)) {
821 			kcm_push(kcm);
822 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
823 			err = sk_stream_wait_memory(sk, &timeo);
824 			if (err)
825 				goto out_error;
826 		}
827 
828 		head = alloc_skb(0, sk->sk_allocation);
829 		while (!head) {
830 			kcm_push(kcm);
831 			err = sk_stream_wait_memory(sk, &timeo);
832 			if (err)
833 				goto out_error;
834 		}
835 
836 		skb = head;
837 		i = 0;
838 	}
839 
840 	get_page(page);
841 	skb_fill_page_desc_noacc(skb, i, page, offset, size);
842 	skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
843 
844 coalesced:
845 	skb->len += size;
846 	skb->data_len += size;
847 	skb->truesize += size;
848 	sk->sk_wmem_queued += size;
849 	sk_mem_charge(sk, size);
850 
851 	if (head != skb) {
852 		head->len += size;
853 		head->data_len += size;
854 		head->truesize += size;
855 	}
856 
857 	if (eor) {
858 		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
859 
860 		/* Message complete, queue it on send buffer */
861 		__skb_queue_tail(&sk->sk_write_queue, head);
862 		kcm->seq_skb = NULL;
863 		KCM_STATS_INCR(kcm->stats.tx_msgs);
864 
865 		if (flags & MSG_BATCH) {
866 			kcm->tx_wait_more = true;
867 		} else if (kcm->tx_wait_more || not_busy) {
868 			err = kcm_write_msgs(kcm);
869 			if (err < 0) {
870 				/* We got a hard error in write_msgs but have
871 				 * already queued this message. Report an error
872 				 * in the socket, but don't affect return value
873 				 * from sendmsg
874 				 */
875 				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
876 				report_csk_error(&kcm->sk, -err);
877 			}
878 		}
879 	} else {
880 		/* Message not complete, save state */
881 		kcm->seq_skb = head;
882 		kcm_tx_msg(head)->last_skb = skb;
883 	}
884 
885 	KCM_STATS_ADD(kcm->stats.tx_bytes, size);
886 
887 	release_sock(sk);
888 	return size;
889 
890 out_error:
891 	kcm_push(kcm);
892 
893 	err = sk_stream_error(sk, flags, err);
894 
895 	/* make sure we wake any epoll edge trigger waiter */
896 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
897 		sk->sk_write_space(sk);
898 
899 	release_sock(sk);
900 	return err;
901 }
902 
kcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)903 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
904 {
905 	struct sock *sk = sock->sk;
906 	struct kcm_sock *kcm = kcm_sk(sk);
907 	struct sk_buff *skb = NULL, *head = NULL;
908 	size_t copy, copied = 0;
909 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
910 	int eor = (sock->type == SOCK_DGRAM) ?
911 		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
912 	int err = -EPIPE;
913 
914 	lock_sock(sk);
915 
916 	/* Per tcp_sendmsg this should be in poll */
917 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
918 
919 	if (sk->sk_err)
920 		goto out_error;
921 
922 	if (kcm->seq_skb) {
923 		/* Previously opened message */
924 		head = kcm->seq_skb;
925 		skb = kcm_tx_msg(head)->last_skb;
926 		goto start;
927 	}
928 
929 	/* Call the sk_stream functions to manage the sndbuf mem. */
930 	if (!sk_stream_memory_free(sk)) {
931 		kcm_push(kcm);
932 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
933 		err = sk_stream_wait_memory(sk, &timeo);
934 		if (err)
935 			goto out_error;
936 	}
937 
938 	if (msg_data_left(msg)) {
939 		/* New message, alloc head skb */
940 		head = alloc_skb(0, sk->sk_allocation);
941 		while (!head) {
942 			kcm_push(kcm);
943 			err = sk_stream_wait_memory(sk, &timeo);
944 			if (err)
945 				goto out_error;
946 
947 			head = alloc_skb(0, sk->sk_allocation);
948 		}
949 
950 		skb = head;
951 
952 		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
953 		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
954 		 */
955 		skb->ip_summed = CHECKSUM_UNNECESSARY;
956 	}
957 
958 start:
959 	while (msg_data_left(msg)) {
960 		bool merge = true;
961 		int i = skb_shinfo(skb)->nr_frags;
962 		struct page_frag *pfrag = sk_page_frag(sk);
963 
964 		if (!sk_page_frag_refill(sk, pfrag))
965 			goto wait_for_memory;
966 
967 		if (!skb_can_coalesce(skb, i, pfrag->page,
968 				      pfrag->offset)) {
969 			if (i == MAX_SKB_FRAGS) {
970 				struct sk_buff *tskb;
971 
972 				tskb = alloc_skb(0, sk->sk_allocation);
973 				if (!tskb)
974 					goto wait_for_memory;
975 
976 				if (head == skb)
977 					skb_shinfo(head)->frag_list = tskb;
978 				else
979 					skb->next = tskb;
980 
981 				skb = tskb;
982 				skb->ip_summed = CHECKSUM_UNNECESSARY;
983 				continue;
984 			}
985 			merge = false;
986 		}
987 
988 		copy = min_t(int, msg_data_left(msg),
989 			     pfrag->size - pfrag->offset);
990 
991 		if (!sk_wmem_schedule(sk, copy))
992 			goto wait_for_memory;
993 
994 		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
995 					       pfrag->page,
996 					       pfrag->offset,
997 					       copy);
998 		if (err)
999 			goto out_error;
1000 
1001 		/* Update the skb. */
1002 		if (merge) {
1003 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1004 		} else {
1005 			skb_fill_page_desc(skb, i, pfrag->page,
1006 					   pfrag->offset, copy);
1007 			get_page(pfrag->page);
1008 		}
1009 
1010 		pfrag->offset += copy;
1011 		copied += copy;
1012 		if (head != skb) {
1013 			head->len += copy;
1014 			head->data_len += copy;
1015 		}
1016 
1017 		continue;
1018 
1019 wait_for_memory:
1020 		kcm_push(kcm);
1021 		err = sk_stream_wait_memory(sk, &timeo);
1022 		if (err)
1023 			goto out_error;
1024 	}
1025 
1026 	if (eor) {
1027 		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1028 
1029 		if (head) {
1030 			/* Message complete, queue it on send buffer */
1031 			__skb_queue_tail(&sk->sk_write_queue, head);
1032 			kcm->seq_skb = NULL;
1033 			KCM_STATS_INCR(kcm->stats.tx_msgs);
1034 		}
1035 
1036 		if (msg->msg_flags & MSG_BATCH) {
1037 			kcm->tx_wait_more = true;
1038 		} else if (kcm->tx_wait_more || not_busy) {
1039 			err = kcm_write_msgs(kcm);
1040 			if (err < 0) {
1041 				/* We got a hard error in write_msgs but have
1042 				 * already queued this message. Report an error
1043 				 * in the socket, but don't affect return value
1044 				 * from sendmsg
1045 				 */
1046 				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1047 				report_csk_error(&kcm->sk, -err);
1048 			}
1049 		}
1050 	} else {
1051 		/* Message not complete, save state */
1052 partial_message:
1053 		if (head) {
1054 			kcm->seq_skb = head;
1055 			kcm_tx_msg(head)->last_skb = skb;
1056 		}
1057 	}
1058 
1059 	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1060 
1061 	release_sock(sk);
1062 	return copied;
1063 
1064 out_error:
1065 	kcm_push(kcm);
1066 
1067 	if (sock->type == SOCK_SEQPACKET) {
1068 		/* Wrote some bytes before encountering an
1069 		 * error, return partial success.
1070 		 */
1071 		if (copied)
1072 			goto partial_message;
1073 		if (head != kcm->seq_skb)
1074 			kfree_skb(head);
1075 	} else {
1076 		kfree_skb(head);
1077 		kcm->seq_skb = NULL;
1078 	}
1079 
1080 	err = sk_stream_error(sk, msg->msg_flags, err);
1081 
1082 	/* make sure we wake any epoll edge trigger waiter */
1083 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1084 		sk->sk_write_space(sk);
1085 
1086 	release_sock(sk);
1087 	return err;
1088 }
1089 
kcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1090 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1091 		       size_t len, int flags)
1092 {
1093 	int noblock = flags & MSG_DONTWAIT;
1094 	struct sock *sk = sock->sk;
1095 	struct kcm_sock *kcm = kcm_sk(sk);
1096 	int err = 0;
1097 	struct strp_msg *stm;
1098 	int copied = 0;
1099 	struct sk_buff *skb;
1100 
1101 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1102 	if (!skb)
1103 		goto out;
1104 
1105 	/* Okay, have a message on the receive queue */
1106 
1107 	stm = strp_msg(skb);
1108 
1109 	if (len > stm->full_len)
1110 		len = stm->full_len;
1111 
1112 	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1113 	if (err < 0)
1114 		goto out;
1115 
1116 	copied = len;
1117 	if (likely(!(flags & MSG_PEEK))) {
1118 		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1119 		if (copied < stm->full_len) {
1120 			if (sock->type == SOCK_DGRAM) {
1121 				/* Truncated message */
1122 				msg->msg_flags |= MSG_TRUNC;
1123 				goto msg_finished;
1124 			}
1125 			stm->offset += copied;
1126 			stm->full_len -= copied;
1127 		} else {
1128 msg_finished:
1129 			/* Finished with message */
1130 			msg->msg_flags |= MSG_EOR;
1131 			KCM_STATS_INCR(kcm->stats.rx_msgs);
1132 		}
1133 	}
1134 
1135 out:
1136 	skb_free_datagram(sk, skb);
1137 	return copied ? : err;
1138 }
1139 
kcm_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1140 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1141 			       struct pipe_inode_info *pipe, size_t len,
1142 			       unsigned int flags)
1143 {
1144 	int noblock = flags & MSG_DONTWAIT;
1145 	struct sock *sk = sock->sk;
1146 	struct kcm_sock *kcm = kcm_sk(sk);
1147 	struct strp_msg *stm;
1148 	int err = 0;
1149 	ssize_t copied;
1150 	struct sk_buff *skb;
1151 
1152 	/* Only support splice for SOCKSEQPACKET */
1153 
1154 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1155 	if (!skb)
1156 		goto err_out;
1157 
1158 	/* Okay, have a message on the receive queue */
1159 
1160 	stm = strp_msg(skb);
1161 
1162 	if (len > stm->full_len)
1163 		len = stm->full_len;
1164 
1165 	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1166 	if (copied < 0) {
1167 		err = copied;
1168 		goto err_out;
1169 	}
1170 
1171 	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1172 
1173 	stm->offset += copied;
1174 	stm->full_len -= copied;
1175 
1176 	/* We have no way to return MSG_EOR. If all the bytes have been
1177 	 * read we still leave the message in the receive socket buffer.
1178 	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1179 	 * finish reading the message.
1180 	 */
1181 
1182 	skb_free_datagram(sk, skb);
1183 	return copied;
1184 
1185 err_out:
1186 	skb_free_datagram(sk, skb);
1187 	return err;
1188 }
1189 
1190 /* kcm sock lock held */
kcm_recv_disable(struct kcm_sock * kcm)1191 static void kcm_recv_disable(struct kcm_sock *kcm)
1192 {
1193 	struct kcm_mux *mux = kcm->mux;
1194 
1195 	if (kcm->rx_disabled)
1196 		return;
1197 
1198 	spin_lock_bh(&mux->rx_lock);
1199 
1200 	kcm->rx_disabled = 1;
1201 
1202 	/* If a psock is reserved we'll do cleanup in unreserve */
1203 	if (!kcm->rx_psock) {
1204 		if (kcm->rx_wait) {
1205 			list_del(&kcm->wait_rx_list);
1206 			/* paired with lockless reads in kcm_rfree() */
1207 			WRITE_ONCE(kcm->rx_wait, false);
1208 		}
1209 
1210 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1211 	}
1212 
1213 	spin_unlock_bh(&mux->rx_lock);
1214 }
1215 
1216 /* kcm sock lock held */
kcm_recv_enable(struct kcm_sock * kcm)1217 static void kcm_recv_enable(struct kcm_sock *kcm)
1218 {
1219 	struct kcm_mux *mux = kcm->mux;
1220 
1221 	if (!kcm->rx_disabled)
1222 		return;
1223 
1224 	spin_lock_bh(&mux->rx_lock);
1225 
1226 	kcm->rx_disabled = 0;
1227 	kcm_rcv_ready(kcm);
1228 
1229 	spin_unlock_bh(&mux->rx_lock);
1230 }
1231 
kcm_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1232 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1233 			  sockptr_t optval, unsigned int optlen)
1234 {
1235 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1236 	int val, valbool;
1237 	int err = 0;
1238 
1239 	if (level != SOL_KCM)
1240 		return -ENOPROTOOPT;
1241 
1242 	if (optlen < sizeof(int))
1243 		return -EINVAL;
1244 
1245 	if (copy_from_sockptr(&val, optval, sizeof(int)))
1246 		return -EFAULT;
1247 
1248 	valbool = val ? 1 : 0;
1249 
1250 	switch (optname) {
1251 	case KCM_RECV_DISABLE:
1252 		lock_sock(&kcm->sk);
1253 		if (valbool)
1254 			kcm_recv_disable(kcm);
1255 		else
1256 			kcm_recv_enable(kcm);
1257 		release_sock(&kcm->sk);
1258 		break;
1259 	default:
1260 		err = -ENOPROTOOPT;
1261 	}
1262 
1263 	return err;
1264 }
1265 
kcm_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1266 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1267 			  char __user *optval, int __user *optlen)
1268 {
1269 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1270 	int val, len;
1271 
1272 	if (level != SOL_KCM)
1273 		return -ENOPROTOOPT;
1274 
1275 	if (get_user(len, optlen))
1276 		return -EFAULT;
1277 
1278 	len = min_t(unsigned int, len, sizeof(int));
1279 	if (len < 0)
1280 		return -EINVAL;
1281 
1282 	switch (optname) {
1283 	case KCM_RECV_DISABLE:
1284 		val = kcm->rx_disabled;
1285 		break;
1286 	default:
1287 		return -ENOPROTOOPT;
1288 	}
1289 
1290 	if (put_user(len, optlen))
1291 		return -EFAULT;
1292 	if (copy_to_user(optval, &val, len))
1293 		return -EFAULT;
1294 	return 0;
1295 }
1296 
init_kcm_sock(struct kcm_sock * kcm,struct kcm_mux * mux)1297 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1298 {
1299 	struct kcm_sock *tkcm;
1300 	struct list_head *head;
1301 	int index = 0;
1302 
1303 	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1304 	 * we set sk_state, otherwise epoll_wait always returns right away with
1305 	 * EPOLLHUP
1306 	 */
1307 	kcm->sk.sk_state = TCP_ESTABLISHED;
1308 
1309 	/* Add to mux's kcm sockets list */
1310 	kcm->mux = mux;
1311 	spin_lock_bh(&mux->lock);
1312 
1313 	head = &mux->kcm_socks;
1314 	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1315 		if (tkcm->index != index)
1316 			break;
1317 		head = &tkcm->kcm_sock_list;
1318 		index++;
1319 	}
1320 
1321 	list_add(&kcm->kcm_sock_list, head);
1322 	kcm->index = index;
1323 
1324 	mux->kcm_socks_cnt++;
1325 	spin_unlock_bh(&mux->lock);
1326 
1327 	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1328 
1329 	spin_lock_bh(&mux->rx_lock);
1330 	kcm_rcv_ready(kcm);
1331 	spin_unlock_bh(&mux->rx_lock);
1332 }
1333 
kcm_attach(struct socket * sock,struct socket * csock,struct bpf_prog * prog)1334 static int kcm_attach(struct socket *sock, struct socket *csock,
1335 		      struct bpf_prog *prog)
1336 {
1337 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1338 	struct kcm_mux *mux = kcm->mux;
1339 	struct sock *csk;
1340 	struct kcm_psock *psock = NULL, *tpsock;
1341 	struct list_head *head;
1342 	int index = 0;
1343 	static const struct strp_callbacks cb = {
1344 		.rcv_msg = kcm_rcv_strparser,
1345 		.parse_msg = kcm_parse_func_strparser,
1346 		.read_sock_done = kcm_read_sock_done,
1347 	};
1348 	int err = 0;
1349 
1350 	csk = csock->sk;
1351 	if (!csk)
1352 		return -EINVAL;
1353 
1354 	lock_sock(csk);
1355 
1356 	/* Only allow TCP sockets to be attached for now */
1357 	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1358 	    csk->sk_protocol != IPPROTO_TCP) {
1359 		err = -EOPNOTSUPP;
1360 		goto out;
1361 	}
1362 
1363 	/* Don't allow listeners or closed sockets */
1364 	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1365 		err = -EOPNOTSUPP;
1366 		goto out;
1367 	}
1368 
1369 	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1370 	if (!psock) {
1371 		err = -ENOMEM;
1372 		goto out;
1373 	}
1374 
1375 	psock->mux = mux;
1376 	psock->sk = csk;
1377 	psock->bpf_prog = prog;
1378 
1379 	write_lock_bh(&csk->sk_callback_lock);
1380 
1381 	/* Check if sk_user_data is already by KCM or someone else.
1382 	 * Must be done under lock to prevent race conditions.
1383 	 */
1384 	if (csk->sk_user_data) {
1385 		write_unlock_bh(&csk->sk_callback_lock);
1386 		kmem_cache_free(kcm_psockp, psock);
1387 		err = -EALREADY;
1388 		goto out;
1389 	}
1390 
1391 	err = strp_init(&psock->strp, csk, &cb);
1392 	if (err) {
1393 		write_unlock_bh(&csk->sk_callback_lock);
1394 		kmem_cache_free(kcm_psockp, psock);
1395 		goto out;
1396 	}
1397 
1398 	psock->save_data_ready = csk->sk_data_ready;
1399 	psock->save_write_space = csk->sk_write_space;
1400 	psock->save_state_change = csk->sk_state_change;
1401 	csk->sk_user_data = psock;
1402 	csk->sk_data_ready = psock_data_ready;
1403 	csk->sk_write_space = psock_write_space;
1404 	csk->sk_state_change = psock_state_change;
1405 
1406 	write_unlock_bh(&csk->sk_callback_lock);
1407 
1408 	sock_hold(csk);
1409 
1410 	/* Finished initialization, now add the psock to the MUX. */
1411 	spin_lock_bh(&mux->lock);
1412 	head = &mux->psocks;
1413 	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1414 		if (tpsock->index != index)
1415 			break;
1416 		head = &tpsock->psock_list;
1417 		index++;
1418 	}
1419 
1420 	list_add(&psock->psock_list, head);
1421 	psock->index = index;
1422 
1423 	KCM_STATS_INCR(mux->stats.psock_attach);
1424 	mux->psocks_cnt++;
1425 	psock_now_avail(psock);
1426 	spin_unlock_bh(&mux->lock);
1427 
1428 	/* Schedule RX work in case there are already bytes queued */
1429 	strp_check_rcv(&psock->strp);
1430 
1431 out:
1432 	release_sock(csk);
1433 
1434 	return err;
1435 }
1436 
kcm_attach_ioctl(struct socket * sock,struct kcm_attach * info)1437 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1438 {
1439 	struct socket *csock;
1440 	struct bpf_prog *prog;
1441 	int err;
1442 
1443 	csock = sockfd_lookup(info->fd, &err);
1444 	if (!csock)
1445 		return -ENOENT;
1446 
1447 	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1448 	if (IS_ERR(prog)) {
1449 		err = PTR_ERR(prog);
1450 		goto out;
1451 	}
1452 
1453 	err = kcm_attach(sock, csock, prog);
1454 	if (err) {
1455 		bpf_prog_put(prog);
1456 		goto out;
1457 	}
1458 
1459 	/* Keep reference on file also */
1460 
1461 	return 0;
1462 out:
1463 	sockfd_put(csock);
1464 	return err;
1465 }
1466 
kcm_unattach(struct kcm_psock * psock)1467 static void kcm_unattach(struct kcm_psock *psock)
1468 {
1469 	struct sock *csk = psock->sk;
1470 	struct kcm_mux *mux = psock->mux;
1471 
1472 	lock_sock(csk);
1473 
1474 	/* Stop getting callbacks from TCP socket. After this there should
1475 	 * be no way to reserve a kcm for this psock.
1476 	 */
1477 	write_lock_bh(&csk->sk_callback_lock);
1478 	csk->sk_user_data = NULL;
1479 	csk->sk_data_ready = psock->save_data_ready;
1480 	csk->sk_write_space = psock->save_write_space;
1481 	csk->sk_state_change = psock->save_state_change;
1482 	strp_stop(&psock->strp);
1483 
1484 	if (WARN_ON(psock->rx_kcm)) {
1485 		write_unlock_bh(&csk->sk_callback_lock);
1486 		release_sock(csk);
1487 		return;
1488 	}
1489 
1490 	spin_lock_bh(&mux->rx_lock);
1491 
1492 	/* Stop receiver activities. After this point psock should not be
1493 	 * able to get onto ready list either through callbacks or work.
1494 	 */
1495 	if (psock->ready_rx_msg) {
1496 		list_del(&psock->psock_ready_list);
1497 		kfree_skb(psock->ready_rx_msg);
1498 		psock->ready_rx_msg = NULL;
1499 		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1500 	}
1501 
1502 	spin_unlock_bh(&mux->rx_lock);
1503 
1504 	write_unlock_bh(&csk->sk_callback_lock);
1505 
1506 	/* Call strp_done without sock lock */
1507 	release_sock(csk);
1508 	strp_done(&psock->strp);
1509 	lock_sock(csk);
1510 
1511 	bpf_prog_put(psock->bpf_prog);
1512 
1513 	spin_lock_bh(&mux->lock);
1514 
1515 	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1516 	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1517 
1518 	KCM_STATS_INCR(mux->stats.psock_unattach);
1519 
1520 	if (psock->tx_kcm) {
1521 		/* psock was reserved.  Just mark it finished and we will clean
1522 		 * up in the kcm paths, we need kcm lock which can not be
1523 		 * acquired here.
1524 		 */
1525 		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1526 		spin_unlock_bh(&mux->lock);
1527 
1528 		/* We are unattaching a socket that is reserved. Abort the
1529 		 * socket since we may be out of sync in sending on it. We need
1530 		 * to do this without the mux lock.
1531 		 */
1532 		kcm_abort_tx_psock(psock, EPIPE, false);
1533 
1534 		spin_lock_bh(&mux->lock);
1535 		if (!psock->tx_kcm) {
1536 			/* psock now unreserved in window mux was unlocked */
1537 			goto no_reserved;
1538 		}
1539 		psock->done = 1;
1540 
1541 		/* Commit done before queuing work to process it */
1542 		smp_mb();
1543 
1544 		/* Queue tx work to make sure psock->done is handled */
1545 		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1546 		spin_unlock_bh(&mux->lock);
1547 	} else {
1548 no_reserved:
1549 		if (!psock->tx_stopped)
1550 			list_del(&psock->psock_avail_list);
1551 		list_del(&psock->psock_list);
1552 		mux->psocks_cnt--;
1553 		spin_unlock_bh(&mux->lock);
1554 
1555 		sock_put(csk);
1556 		fput(csk->sk_socket->file);
1557 		kmem_cache_free(kcm_psockp, psock);
1558 	}
1559 
1560 	release_sock(csk);
1561 }
1562 
kcm_unattach_ioctl(struct socket * sock,struct kcm_unattach * info)1563 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1564 {
1565 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1566 	struct kcm_mux *mux = kcm->mux;
1567 	struct kcm_psock *psock;
1568 	struct socket *csock;
1569 	struct sock *csk;
1570 	int err;
1571 
1572 	csock = sockfd_lookup(info->fd, &err);
1573 	if (!csock)
1574 		return -ENOENT;
1575 
1576 	csk = csock->sk;
1577 	if (!csk) {
1578 		err = -EINVAL;
1579 		goto out;
1580 	}
1581 
1582 	err = -ENOENT;
1583 
1584 	spin_lock_bh(&mux->lock);
1585 
1586 	list_for_each_entry(psock, &mux->psocks, psock_list) {
1587 		if (psock->sk != csk)
1588 			continue;
1589 
1590 		/* Found the matching psock */
1591 
1592 		if (psock->unattaching || WARN_ON(psock->done)) {
1593 			err = -EALREADY;
1594 			break;
1595 		}
1596 
1597 		psock->unattaching = 1;
1598 
1599 		spin_unlock_bh(&mux->lock);
1600 
1601 		/* Lower socket lock should already be held */
1602 		kcm_unattach(psock);
1603 
1604 		err = 0;
1605 		goto out;
1606 	}
1607 
1608 	spin_unlock_bh(&mux->lock);
1609 
1610 out:
1611 	sockfd_put(csock);
1612 	return err;
1613 }
1614 
1615 static struct proto kcm_proto = {
1616 	.name	= "KCM",
1617 	.owner	= THIS_MODULE,
1618 	.obj_size = sizeof(struct kcm_sock),
1619 };
1620 
1621 /* Clone a kcm socket. */
kcm_clone(struct socket * osock)1622 static struct file *kcm_clone(struct socket *osock)
1623 {
1624 	struct socket *newsock;
1625 	struct sock *newsk;
1626 
1627 	newsock = sock_alloc();
1628 	if (!newsock)
1629 		return ERR_PTR(-ENFILE);
1630 
1631 	newsock->type = osock->type;
1632 	newsock->ops = osock->ops;
1633 
1634 	__module_get(newsock->ops->owner);
1635 
1636 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1637 			 &kcm_proto, false);
1638 	if (!newsk) {
1639 		sock_release(newsock);
1640 		return ERR_PTR(-ENOMEM);
1641 	}
1642 	sock_init_data(newsock, newsk);
1643 	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1644 
1645 	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1646 }
1647 
kcm_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1648 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1649 {
1650 	int err;
1651 
1652 	switch (cmd) {
1653 	case SIOCKCMATTACH: {
1654 		struct kcm_attach info;
1655 
1656 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1657 			return -EFAULT;
1658 
1659 		err = kcm_attach_ioctl(sock, &info);
1660 
1661 		break;
1662 	}
1663 	case SIOCKCMUNATTACH: {
1664 		struct kcm_unattach info;
1665 
1666 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1667 			return -EFAULT;
1668 
1669 		err = kcm_unattach_ioctl(sock, &info);
1670 
1671 		break;
1672 	}
1673 	case SIOCKCMCLONE: {
1674 		struct kcm_clone info;
1675 		struct file *file;
1676 
1677 		info.fd = get_unused_fd_flags(0);
1678 		if (unlikely(info.fd < 0))
1679 			return info.fd;
1680 
1681 		file = kcm_clone(sock);
1682 		if (IS_ERR(file)) {
1683 			put_unused_fd(info.fd);
1684 			return PTR_ERR(file);
1685 		}
1686 		if (copy_to_user((void __user *)arg, &info,
1687 				 sizeof(info))) {
1688 			put_unused_fd(info.fd);
1689 			fput(file);
1690 			return -EFAULT;
1691 		}
1692 		fd_install(info.fd, file);
1693 		err = 0;
1694 		break;
1695 	}
1696 	default:
1697 		err = -ENOIOCTLCMD;
1698 		break;
1699 	}
1700 
1701 	return err;
1702 }
1703 
free_mux(struct rcu_head * rcu)1704 static void free_mux(struct rcu_head *rcu)
1705 {
1706 	struct kcm_mux *mux = container_of(rcu,
1707 	    struct kcm_mux, rcu);
1708 
1709 	kmem_cache_free(kcm_muxp, mux);
1710 }
1711 
release_mux(struct kcm_mux * mux)1712 static void release_mux(struct kcm_mux *mux)
1713 {
1714 	struct kcm_net *knet = mux->knet;
1715 	struct kcm_psock *psock, *tmp_psock;
1716 
1717 	/* Release psocks */
1718 	list_for_each_entry_safe(psock, tmp_psock,
1719 				 &mux->psocks, psock_list) {
1720 		if (!WARN_ON(psock->unattaching))
1721 			kcm_unattach(psock);
1722 	}
1723 
1724 	if (WARN_ON(mux->psocks_cnt))
1725 		return;
1726 
1727 	__skb_queue_purge(&mux->rx_hold_queue);
1728 
1729 	mutex_lock(&knet->mutex);
1730 	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1731 	aggregate_psock_stats(&mux->aggregate_psock_stats,
1732 			      &knet->aggregate_psock_stats);
1733 	aggregate_strp_stats(&mux->aggregate_strp_stats,
1734 			     &knet->aggregate_strp_stats);
1735 	list_del_rcu(&mux->kcm_mux_list);
1736 	knet->count--;
1737 	mutex_unlock(&knet->mutex);
1738 
1739 	call_rcu(&mux->rcu, free_mux);
1740 }
1741 
kcm_done(struct kcm_sock * kcm)1742 static void kcm_done(struct kcm_sock *kcm)
1743 {
1744 	struct kcm_mux *mux = kcm->mux;
1745 	struct sock *sk = &kcm->sk;
1746 	int socks_cnt;
1747 
1748 	spin_lock_bh(&mux->rx_lock);
1749 	if (kcm->rx_psock) {
1750 		/* Cleanup in unreserve_rx_kcm */
1751 		WARN_ON(kcm->done);
1752 		kcm->rx_disabled = 1;
1753 		kcm->done = 1;
1754 		spin_unlock_bh(&mux->rx_lock);
1755 		return;
1756 	}
1757 
1758 	if (kcm->rx_wait) {
1759 		list_del(&kcm->wait_rx_list);
1760 		/* paired with lockless reads in kcm_rfree() */
1761 		WRITE_ONCE(kcm->rx_wait, false);
1762 	}
1763 	/* Move any pending receive messages to other kcm sockets */
1764 	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1765 
1766 	spin_unlock_bh(&mux->rx_lock);
1767 
1768 	if (WARN_ON(sk_rmem_alloc_get(sk)))
1769 		return;
1770 
1771 	/* Detach from MUX */
1772 	spin_lock_bh(&mux->lock);
1773 
1774 	list_del(&kcm->kcm_sock_list);
1775 	mux->kcm_socks_cnt--;
1776 	socks_cnt = mux->kcm_socks_cnt;
1777 
1778 	spin_unlock_bh(&mux->lock);
1779 
1780 	if (!socks_cnt) {
1781 		/* We are done with the mux now. */
1782 		release_mux(mux);
1783 	}
1784 
1785 	WARN_ON(kcm->rx_wait);
1786 
1787 	sock_put(&kcm->sk);
1788 }
1789 
1790 /* Called by kcm_release to close a KCM socket.
1791  * If this is the last KCM socket on the MUX, destroy the MUX.
1792  */
kcm_release(struct socket * sock)1793 static int kcm_release(struct socket *sock)
1794 {
1795 	struct sock *sk = sock->sk;
1796 	struct kcm_sock *kcm;
1797 	struct kcm_mux *mux;
1798 	struct kcm_psock *psock;
1799 
1800 	if (!sk)
1801 		return 0;
1802 
1803 	kcm = kcm_sk(sk);
1804 	mux = kcm->mux;
1805 
1806 	lock_sock(sk);
1807 	sock_orphan(sk);
1808 	kfree_skb(kcm->seq_skb);
1809 
1810 	/* Purge queue under lock to avoid race condition with tx_work trying
1811 	 * to act when queue is nonempty. If tx_work runs after this point
1812 	 * it will just return.
1813 	 */
1814 	__skb_queue_purge(&sk->sk_write_queue);
1815 
1816 	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1817 	 * get a writespace callback. This prevents further work being queued
1818 	 * from the callback (unbinding the psock occurs after canceling work.
1819 	 */
1820 	kcm->tx_stopped = 1;
1821 
1822 	release_sock(sk);
1823 
1824 	spin_lock_bh(&mux->lock);
1825 	if (kcm->tx_wait) {
1826 		/* Take of tx_wait list, after this point there should be no way
1827 		 * that a psock will be assigned to this kcm.
1828 		 */
1829 		list_del(&kcm->wait_psock_list);
1830 		kcm->tx_wait = false;
1831 	}
1832 	spin_unlock_bh(&mux->lock);
1833 
1834 	/* Cancel work. After this point there should be no outside references
1835 	 * to the kcm socket.
1836 	 */
1837 	cancel_work_sync(&kcm->tx_work);
1838 
1839 	lock_sock(sk);
1840 	psock = kcm->tx_psock;
1841 	if (psock) {
1842 		/* A psock was reserved, so we need to kill it since it
1843 		 * may already have some bytes queued from a message. We
1844 		 * need to do this after removing kcm from tx_wait list.
1845 		 */
1846 		kcm_abort_tx_psock(psock, EPIPE, false);
1847 		unreserve_psock(kcm);
1848 	}
1849 	release_sock(sk);
1850 
1851 	WARN_ON(kcm->tx_wait);
1852 	WARN_ON(kcm->tx_psock);
1853 
1854 	sock->sk = NULL;
1855 
1856 	kcm_done(kcm);
1857 
1858 	return 0;
1859 }
1860 
1861 static const struct proto_ops kcm_dgram_ops = {
1862 	.family =	PF_KCM,
1863 	.owner =	THIS_MODULE,
1864 	.release =	kcm_release,
1865 	.bind =		sock_no_bind,
1866 	.connect =	sock_no_connect,
1867 	.socketpair =	sock_no_socketpair,
1868 	.accept =	sock_no_accept,
1869 	.getname =	sock_no_getname,
1870 	.poll =		datagram_poll,
1871 	.ioctl =	kcm_ioctl,
1872 	.listen =	sock_no_listen,
1873 	.shutdown =	sock_no_shutdown,
1874 	.setsockopt =	kcm_setsockopt,
1875 	.getsockopt =	kcm_getsockopt,
1876 	.sendmsg =	kcm_sendmsg,
1877 	.recvmsg =	kcm_recvmsg,
1878 	.mmap =		sock_no_mmap,
1879 	.sendpage =	kcm_sendpage,
1880 };
1881 
1882 static const struct proto_ops kcm_seqpacket_ops = {
1883 	.family =	PF_KCM,
1884 	.owner =	THIS_MODULE,
1885 	.release =	kcm_release,
1886 	.bind =		sock_no_bind,
1887 	.connect =	sock_no_connect,
1888 	.socketpair =	sock_no_socketpair,
1889 	.accept =	sock_no_accept,
1890 	.getname =	sock_no_getname,
1891 	.poll =		datagram_poll,
1892 	.ioctl =	kcm_ioctl,
1893 	.listen =	sock_no_listen,
1894 	.shutdown =	sock_no_shutdown,
1895 	.setsockopt =	kcm_setsockopt,
1896 	.getsockopt =	kcm_getsockopt,
1897 	.sendmsg =	kcm_sendmsg,
1898 	.recvmsg =	kcm_recvmsg,
1899 	.mmap =		sock_no_mmap,
1900 	.sendpage =	kcm_sendpage,
1901 	.splice_read =	kcm_splice_read,
1902 };
1903 
1904 /* Create proto operation for kcm sockets */
kcm_create(struct net * net,struct socket * sock,int protocol,int kern)1905 static int kcm_create(struct net *net, struct socket *sock,
1906 		      int protocol, int kern)
1907 {
1908 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1909 	struct sock *sk;
1910 	struct kcm_mux *mux;
1911 
1912 	switch (sock->type) {
1913 	case SOCK_DGRAM:
1914 		sock->ops = &kcm_dgram_ops;
1915 		break;
1916 	case SOCK_SEQPACKET:
1917 		sock->ops = &kcm_seqpacket_ops;
1918 		break;
1919 	default:
1920 		return -ESOCKTNOSUPPORT;
1921 	}
1922 
1923 	if (protocol != KCMPROTO_CONNECTED)
1924 		return -EPROTONOSUPPORT;
1925 
1926 	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1927 	if (!sk)
1928 		return -ENOMEM;
1929 
1930 	/* Allocate a kcm mux, shared between KCM sockets */
1931 	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1932 	if (!mux) {
1933 		sk_free(sk);
1934 		return -ENOMEM;
1935 	}
1936 
1937 	spin_lock_init(&mux->lock);
1938 	spin_lock_init(&mux->rx_lock);
1939 	INIT_LIST_HEAD(&mux->kcm_socks);
1940 	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1941 	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1942 
1943 	INIT_LIST_HEAD(&mux->psocks);
1944 	INIT_LIST_HEAD(&mux->psocks_ready);
1945 	INIT_LIST_HEAD(&mux->psocks_avail);
1946 
1947 	mux->knet = knet;
1948 
1949 	/* Add new MUX to list */
1950 	mutex_lock(&knet->mutex);
1951 	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1952 	knet->count++;
1953 	mutex_unlock(&knet->mutex);
1954 
1955 	skb_queue_head_init(&mux->rx_hold_queue);
1956 
1957 	/* Init KCM socket */
1958 	sock_init_data(sock, sk);
1959 	init_kcm_sock(kcm_sk(sk), mux);
1960 
1961 	return 0;
1962 }
1963 
1964 static const struct net_proto_family kcm_family_ops = {
1965 	.family = PF_KCM,
1966 	.create = kcm_create,
1967 	.owner  = THIS_MODULE,
1968 };
1969 
kcm_init_net(struct net * net)1970 static __net_init int kcm_init_net(struct net *net)
1971 {
1972 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1973 
1974 	INIT_LIST_HEAD_RCU(&knet->mux_list);
1975 	mutex_init(&knet->mutex);
1976 
1977 	return 0;
1978 }
1979 
kcm_exit_net(struct net * net)1980 static __net_exit void kcm_exit_net(struct net *net)
1981 {
1982 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1983 
1984 	/* All KCM sockets should be closed at this point, which should mean
1985 	 * that all multiplexors and psocks have been destroyed.
1986 	 */
1987 	WARN_ON(!list_empty(&knet->mux_list));
1988 
1989 	mutex_destroy(&knet->mutex);
1990 }
1991 
1992 static struct pernet_operations kcm_net_ops = {
1993 	.init = kcm_init_net,
1994 	.exit = kcm_exit_net,
1995 	.id   = &kcm_net_id,
1996 	.size = sizeof(struct kcm_net),
1997 };
1998 
kcm_init(void)1999 static int __init kcm_init(void)
2000 {
2001 	int err = -ENOMEM;
2002 
2003 	kcm_muxp = kmem_cache_create("kcm_mux_cache",
2004 				     sizeof(struct kcm_mux), 0,
2005 				     SLAB_HWCACHE_ALIGN, NULL);
2006 	if (!kcm_muxp)
2007 		goto fail;
2008 
2009 	kcm_psockp = kmem_cache_create("kcm_psock_cache",
2010 				       sizeof(struct kcm_psock), 0,
2011 					SLAB_HWCACHE_ALIGN, NULL);
2012 	if (!kcm_psockp)
2013 		goto fail;
2014 
2015 	kcm_wq = create_singlethread_workqueue("kkcmd");
2016 	if (!kcm_wq)
2017 		goto fail;
2018 
2019 	err = proto_register(&kcm_proto, 1);
2020 	if (err)
2021 		goto fail;
2022 
2023 	err = register_pernet_device(&kcm_net_ops);
2024 	if (err)
2025 		goto net_ops_fail;
2026 
2027 	err = sock_register(&kcm_family_ops);
2028 	if (err)
2029 		goto sock_register_fail;
2030 
2031 	err = kcm_proc_init();
2032 	if (err)
2033 		goto proc_init_fail;
2034 
2035 	return 0;
2036 
2037 proc_init_fail:
2038 	sock_unregister(PF_KCM);
2039 
2040 sock_register_fail:
2041 	unregister_pernet_device(&kcm_net_ops);
2042 
2043 net_ops_fail:
2044 	proto_unregister(&kcm_proto);
2045 
2046 fail:
2047 	kmem_cache_destroy(kcm_muxp);
2048 	kmem_cache_destroy(kcm_psockp);
2049 
2050 	if (kcm_wq)
2051 		destroy_workqueue(kcm_wq);
2052 
2053 	return err;
2054 }
2055 
kcm_exit(void)2056 static void __exit kcm_exit(void)
2057 {
2058 	kcm_proc_exit();
2059 	sock_unregister(PF_KCM);
2060 	unregister_pernet_device(&kcm_net_ops);
2061 	proto_unregister(&kcm_proto);
2062 	destroy_workqueue(kcm_wq);
2063 
2064 	kmem_cache_destroy(kcm_muxp);
2065 	kmem_cache_destroy(kcm_psockp);
2066 }
2067 
2068 module_init(kcm_init);
2069 module_exit(kcm_exit);
2070 
2071 MODULE_LICENSE("GPL");
2072 MODULE_ALIAS_NETPROTO(PF_KCM);
2073