• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14 	if (msg->sg.end > msg->sg.start &&
15 	    elem_first_coalesce < msg->sg.end)
16 		return true;
17 
18 	if (msg->sg.end < msg->sg.start &&
19 	    (elem_first_coalesce > msg->sg.start ||
20 	     elem_first_coalesce < msg->sg.end))
21 		return true;
22 
23 	return false;
24 }
25 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 		 int elem_first_coalesce)
28 {
29 	struct page_frag *pfrag = sk_page_frag(sk);
30 	u32 osize = msg->sg.size;
31 	int ret = 0;
32 
33 	len -= msg->sg.size;
34 	while (len > 0) {
35 		struct scatterlist *sge;
36 		u32 orig_offset;
37 		int use, i;
38 
39 		if (!sk_page_frag_refill(sk, pfrag)) {
40 			ret = -ENOMEM;
41 			goto msg_trim;
42 		}
43 
44 		orig_offset = pfrag->offset;
45 		use = min_t(int, len, pfrag->size - orig_offset);
46 		if (!sk_wmem_schedule(sk, use)) {
47 			ret = -ENOMEM;
48 			goto msg_trim;
49 		}
50 
51 		i = msg->sg.end;
52 		sk_msg_iter_var_prev(i);
53 		sge = &msg->sg.data[i];
54 
55 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56 		    sg_page(sge) == pfrag->page &&
57 		    sge->offset + sge->length == orig_offset) {
58 			sge->length += use;
59 		} else {
60 			if (sk_msg_full(msg)) {
61 				ret = -ENOSPC;
62 				break;
63 			}
64 
65 			sge = &msg->sg.data[msg->sg.end];
66 			sg_unmark_end(sge);
67 			sg_set_page(sge, pfrag->page, use, orig_offset);
68 			get_page(pfrag->page);
69 			sk_msg_iter_next(msg, end);
70 		}
71 
72 		sk_mem_charge(sk, use);
73 		msg->sg.size += use;
74 		pfrag->offset += use;
75 		len -= use;
76 	}
77 
78 	return ret;
79 
80 msg_trim:
81 	sk_msg_trim(sk, msg, osize);
82 	return ret;
83 }
84 EXPORT_SYMBOL_GPL(sk_msg_alloc);
85 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)86 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87 		 u32 off, u32 len)
88 {
89 	int i = src->sg.start;
90 	struct scatterlist *sge = sk_msg_elem(src, i);
91 	struct scatterlist *sgd = NULL;
92 	u32 sge_len, sge_off;
93 
94 	while (off) {
95 		if (sge->length > off)
96 			break;
97 		off -= sge->length;
98 		sk_msg_iter_var_next(i);
99 		if (i == src->sg.end && off)
100 			return -ENOSPC;
101 		sge = sk_msg_elem(src, i);
102 	}
103 
104 	while (len) {
105 		sge_len = sge->length - off;
106 		if (sge_len > len)
107 			sge_len = len;
108 
109 		if (dst->sg.end)
110 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
111 
112 		if (sgd &&
113 		    (sg_page(sge) == sg_page(sgd)) &&
114 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115 			sgd->length += sge_len;
116 			dst->sg.size += sge_len;
117 		} else if (!sk_msg_full(dst)) {
118 			sge_off = sge->offset + off;
119 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120 		} else {
121 			return -ENOSPC;
122 		}
123 
124 		off = 0;
125 		len -= sge_len;
126 		sk_mem_charge(sk, sge_len);
127 		sk_msg_iter_var_next(i);
128 		if (i == src->sg.end && len)
129 			return -ENOSPC;
130 		sge = sk_msg_elem(src, i);
131 	}
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(sk_msg_clone);
136 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138 {
139 	int i = msg->sg.start;
140 
141 	do {
142 		struct scatterlist *sge = sk_msg_elem(msg, i);
143 
144 		if (bytes < sge->length) {
145 			sge->length -= bytes;
146 			sge->offset += bytes;
147 			sk_mem_uncharge(sk, bytes);
148 			break;
149 		}
150 
151 		sk_mem_uncharge(sk, sge->length);
152 		bytes -= sge->length;
153 		sge->length = 0;
154 		sge->offset = 0;
155 		sk_msg_iter_var_next(i);
156 	} while (bytes && i != msg->sg.end);
157 	msg->sg.start = i;
158 }
159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162 {
163 	int i = msg->sg.start;
164 
165 	do {
166 		struct scatterlist *sge = &msg->sg.data[i];
167 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
168 
169 		sk_mem_uncharge(sk, uncharge);
170 		bytes -= uncharge;
171 		sk_msg_iter_var_next(i);
172 	} while (i != msg->sg.end);
173 }
174 EXPORT_SYMBOL_GPL(sk_msg_return);
175 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177 			    bool charge)
178 {
179 	struct scatterlist *sge = sk_msg_elem(msg, i);
180 	u32 len = sge->length;
181 
182 	/* When the skb owns the memory we free it from consume_skb path. */
183 	if (!msg->skb) {
184 		if (charge)
185 			sk_mem_uncharge(sk, len);
186 		put_page(sg_page(sge));
187 	}
188 	memset(sge, 0, sizeof(*sge));
189 	return len;
190 }
191 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193 			 bool charge)
194 {
195 	struct scatterlist *sge = sk_msg_elem(msg, i);
196 	int freed = 0;
197 
198 	while (msg->sg.size) {
199 		msg->sg.size -= sge->length;
200 		freed += sk_msg_free_elem(sk, msg, i, charge);
201 		sk_msg_iter_var_next(i);
202 		sk_msg_check_to_free(msg, i, msg->sg.size);
203 		sge = sk_msg_elem(msg, i);
204 	}
205 	consume_skb(msg->skb);
206 	sk_msg_init(msg);
207 	return freed;
208 }
209 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211 {
212 	return __sk_msg_free(sk, msg, msg->sg.start, false);
213 }
214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215 
sk_msg_free(struct sock * sk,struct sk_msg * msg)216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217 {
218 	return __sk_msg_free(sk, msg, msg->sg.start, true);
219 }
220 EXPORT_SYMBOL_GPL(sk_msg_free);
221 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223 				  u32 bytes, bool charge)
224 {
225 	struct scatterlist *sge;
226 	u32 i = msg->sg.start;
227 
228 	while (bytes) {
229 		sge = sk_msg_elem(msg, i);
230 		if (!sge->length)
231 			break;
232 		if (bytes < sge->length) {
233 			if (charge)
234 				sk_mem_uncharge(sk, bytes);
235 			sge->length -= bytes;
236 			sge->offset += bytes;
237 			msg->sg.size -= bytes;
238 			break;
239 		}
240 
241 		msg->sg.size -= sge->length;
242 		bytes -= sge->length;
243 		sk_msg_free_elem(sk, msg, i, charge);
244 		sk_msg_iter_var_next(i);
245 		sk_msg_check_to_free(msg, i, bytes);
246 	}
247 	msg->sg.start = i;
248 }
249 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251 {
252 	__sk_msg_free_partial(sk, msg, bytes, true);
253 }
254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257 				  u32 bytes)
258 {
259 	__sk_msg_free_partial(sk, msg, bytes, false);
260 }
261 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263 {
264 	int trim = msg->sg.size - len;
265 	u32 i = msg->sg.end;
266 
267 	if (trim <= 0) {
268 		WARN_ON(trim < 0);
269 		return;
270 	}
271 
272 	sk_msg_iter_var_prev(i);
273 	msg->sg.size = len;
274 	while (msg->sg.data[i].length &&
275 	       trim >= msg->sg.data[i].length) {
276 		trim -= msg->sg.data[i].length;
277 		sk_msg_free_elem(sk, msg, i, true);
278 		sk_msg_iter_var_prev(i);
279 		if (!trim)
280 			goto out;
281 	}
282 
283 	msg->sg.data[i].length -= trim;
284 	sk_mem_uncharge(sk, trim);
285 	/* Adjust copybreak if it falls into the trimmed part of last buf */
286 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287 		msg->sg.copybreak = msg->sg.data[i].length;
288 out:
289 	sk_msg_iter_var_next(i);
290 	msg->sg.end = i;
291 
292 	/* If we trim data a full sg elem before curr pointer update
293 	 * copybreak and current so that any future copy operations
294 	 * start at new copy location.
295 	 * However trimed data that has not yet been used in a copy op
296 	 * does not require an update.
297 	 */
298 	if (!msg->sg.size) {
299 		msg->sg.curr = msg->sg.start;
300 		msg->sg.copybreak = 0;
301 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303 		sk_msg_iter_var_prev(i);
304 		msg->sg.curr = i;
305 		msg->sg.copybreak = msg->sg.data[i].length;
306 	}
307 }
308 EXPORT_SYMBOL_GPL(sk_msg_trim);
309 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311 			      struct sk_msg *msg, u32 bytes)
312 {
313 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314 	const int to_max_pages = MAX_MSG_FRAGS;
315 	struct page *pages[MAX_MSG_FRAGS];
316 	ssize_t orig, copied, use, offset;
317 
318 	orig = msg->sg.size;
319 	while (bytes > 0) {
320 		i = 0;
321 		maxpages = to_max_pages - num_elems;
322 		if (maxpages == 0) {
323 			ret = -EFAULT;
324 			goto out;
325 		}
326 
327 		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
328 					    &offset);
329 		if (copied <= 0) {
330 			ret = -EFAULT;
331 			goto out;
332 		}
333 
334 		iov_iter_advance(from, copied);
335 		bytes -= copied;
336 		msg->sg.size += copied;
337 
338 		while (copied) {
339 			use = min_t(int, copied, PAGE_SIZE - offset);
340 			sg_set_page(&msg->sg.data[msg->sg.end],
341 				    pages[i], use, offset);
342 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 			sk_mem_charge(sk, use);
344 
345 			offset = 0;
346 			copied -= use;
347 			sk_msg_iter_next(msg, end);
348 			num_elems++;
349 			i++;
350 		}
351 		/* When zerocopy is mixed with sk_msg_*copy* operations we
352 		 * may have a copybreak set in this case clear and prefer
353 		 * zerocopy remainder when possible.
354 		 */
355 		msg->sg.copybreak = 0;
356 		msg->sg.curr = msg->sg.end;
357 	}
358 out:
359 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
360 	 * also needs to be cleared.
361 	 */
362 	if (ret)
363 		iov_iter_revert(from, msg->sg.size - orig);
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 			     struct sk_msg *msg, u32 bytes)
370 {
371 	int ret = -ENOSPC, i = msg->sg.curr;
372 	struct scatterlist *sge;
373 	u32 copy, buf_size;
374 	void *to;
375 
376 	do {
377 		sge = sk_msg_elem(msg, i);
378 		/* This is possible if a trim operation shrunk the buffer */
379 		if (msg->sg.copybreak >= sge->length) {
380 			msg->sg.copybreak = 0;
381 			sk_msg_iter_var_next(i);
382 			if (i == msg->sg.end)
383 				break;
384 			sge = sk_msg_elem(msg, i);
385 		}
386 
387 		buf_size = sge->length - msg->sg.copybreak;
388 		copy = (buf_size > bytes) ? bytes : buf_size;
389 		to = sg_virt(sge) + msg->sg.copybreak;
390 		msg->sg.copybreak += copy;
391 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 			ret = copy_from_iter_nocache(to, copy, from);
393 		else
394 			ret = copy_from_iter(to, copy, from);
395 		if (ret != copy) {
396 			ret = -EFAULT;
397 			goto out;
398 		}
399 		bytes -= copy;
400 		if (!bytes)
401 			break;
402 		msg->sg.copybreak = 0;
403 		sk_msg_iter_var_next(i);
404 	} while (i != msg->sg.end);
405 out:
406 	msg->sg.curr = i;
407 	return ret;
408 }
409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)411 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
412 						  struct sk_buff *skb)
413 {
414 	struct sk_msg *msg;
415 
416 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
417 		return NULL;
418 
419 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
420 		return NULL;
421 
422 	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
423 	if (unlikely(!msg))
424 		return NULL;
425 
426 	sk_msg_init(msg);
427 	return msg;
428 }
429 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg)430 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
431 					struct sk_psock *psock,
432 					struct sock *sk,
433 					struct sk_msg *msg)
434 {
435 	int num_sge, copied;
436 
437 	/* skb linearize may fail with ENOMEM, but lets simply try again
438 	 * later if this happens. Under memory pressure we don't want to
439 	 * drop the skb. We need to linearize the skb so that the mapping
440 	 * in skb_to_sgvec can not error.
441 	 */
442 	if (skb_linearize(skb))
443 		return -EAGAIN;
444 	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
445 	if (unlikely(num_sge < 0))
446 		return num_sge;
447 
448 	copied = skb->len;
449 	msg->sg.start = 0;
450 	msg->sg.size = copied;
451 	msg->sg.end = num_sge;
452 	msg->skb = skb;
453 
454 	sk_psock_queue_msg(psock, msg);
455 	sk_psock_data_ready(sk, psock);
456 	return copied;
457 }
458 
459 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
460 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb)461 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
462 {
463 	struct sock *sk = psock->sk;
464 	struct sk_msg *msg;
465 	int err;
466 
467 	/* If we are receiving on the same sock skb->sk is already assigned,
468 	 * skip memory accounting and owner transition seeing it already set
469 	 * correctly.
470 	 */
471 	if (unlikely(skb->sk == sk))
472 		return sk_psock_skb_ingress_self(psock, skb);
473 	msg = sk_psock_create_ingress_msg(sk, skb);
474 	if (!msg)
475 		return -EAGAIN;
476 
477 	/* This will transition ownership of the data from the socket where
478 	 * the BPF program was run initiating the redirect to the socket
479 	 * we will eventually receive this data on. The data will be released
480 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
481 	 * into user buffers.
482 	 */
483 	skb_set_owner_r(skb, sk);
484 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
485 	if (err < 0)
486 		kfree(msg);
487 	return err;
488 }
489 
490 /* Puts an skb on the ingress queue of the socket already assigned to the
491  * skb. In this case we do not need to check memory limits or skb_set_owner_r
492  * because the skb is already accounted for here.
493  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb)494 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
495 {
496 	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
497 	struct sock *sk = psock->sk;
498 	int err;
499 
500 	if (unlikely(!msg))
501 		return -EAGAIN;
502 	sk_msg_init(msg);
503 	skb_set_owner_r(skb, sk);
504 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
505 	if (err < 0)
506 		kfree(msg);
507 	return err;
508 }
509 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)510 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
511 			       u32 off, u32 len, bool ingress)
512 {
513 	if (!ingress) {
514 		if (!sock_writeable(psock->sk))
515 			return -EAGAIN;
516 		return skb_send_sock_locked(psock->sk, skb, off, len);
517 	}
518 	return sk_psock_skb_ingress(psock, skb);
519 }
520 
sk_psock_backlog(struct work_struct * work)521 static void sk_psock_backlog(struct work_struct *work)
522 {
523 	struct sk_psock *psock = container_of(work, struct sk_psock, work);
524 	struct sk_psock_work_state *state = &psock->work_state;
525 	struct sk_buff *skb;
526 	bool ingress;
527 	u32 len, off;
528 	int ret;
529 
530 	/* Lock sock to avoid losing sk_socket during loop. */
531 	lock_sock(psock->sk);
532 	if (state->skb) {
533 		skb = state->skb;
534 		len = state->len;
535 		off = state->off;
536 		state->skb = NULL;
537 		goto start;
538 	}
539 
540 	while ((skb = skb_dequeue(&psock->ingress_skb))) {
541 		len = skb->len;
542 		off = 0;
543 start:
544 		ingress = tcp_skb_bpf_ingress(skb);
545 		do {
546 			ret = -EIO;
547 			if (likely(psock->sk->sk_socket))
548 				ret = sk_psock_handle_skb(psock, skb, off,
549 							  len, ingress);
550 			if (ret <= 0) {
551 				if (ret == -EAGAIN) {
552 					state->skb = skb;
553 					state->len = len;
554 					state->off = off;
555 					goto end;
556 				}
557 				/* Hard errors break pipe and stop xmit. */
558 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
559 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
560 				kfree_skb(skb);
561 				goto end;
562 			}
563 			off += ret;
564 			len -= ret;
565 		} while (len);
566 
567 		if (!ingress)
568 			kfree_skb(skb);
569 	}
570 end:
571 	release_sock(psock->sk);
572 }
573 
sk_psock_init(struct sock * sk,int node)574 struct sk_psock *sk_psock_init(struct sock *sk, int node)
575 {
576 	struct sk_psock *psock;
577 	struct proto *prot;
578 
579 	write_lock_bh(&sk->sk_callback_lock);
580 
581 	if (inet_csk_has_ulp(sk)) {
582 		psock = ERR_PTR(-EINVAL);
583 		goto out;
584 	}
585 
586 	if (sk->sk_user_data) {
587 		psock = ERR_PTR(-EBUSY);
588 		goto out;
589 	}
590 
591 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
592 	if (!psock) {
593 		psock = ERR_PTR(-ENOMEM);
594 		goto out;
595 	}
596 
597 	prot = READ_ONCE(sk->sk_prot);
598 	psock->sk = sk;
599 	psock->eval = __SK_NONE;
600 	psock->sk_proto = prot;
601 	psock->saved_unhash = prot->unhash;
602 	psock->saved_close = prot->close;
603 	psock->saved_write_space = sk->sk_write_space;
604 
605 	INIT_LIST_HEAD(&psock->link);
606 	spin_lock_init(&psock->link_lock);
607 
608 	INIT_WORK(&psock->work, sk_psock_backlog);
609 	INIT_LIST_HEAD(&psock->ingress_msg);
610 	skb_queue_head_init(&psock->ingress_skb);
611 
612 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
613 	refcount_set(&psock->refcnt, 1);
614 
615 	__rcu_assign_sk_user_data_with_flags(sk, psock,
616 					     SK_USER_DATA_NOCOPY |
617 					     SK_USER_DATA_PSOCK);
618 	sock_hold(sk);
619 
620 out:
621 	write_unlock_bh(&sk->sk_callback_lock);
622 	return psock;
623 }
624 EXPORT_SYMBOL_GPL(sk_psock_init);
625 
sk_psock_link_pop(struct sk_psock * psock)626 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
627 {
628 	struct sk_psock_link *link;
629 
630 	spin_lock_bh(&psock->link_lock);
631 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
632 					list);
633 	if (link)
634 		list_del(&link->list);
635 	spin_unlock_bh(&psock->link_lock);
636 	return link;
637 }
638 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)639 void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
640 {
641 	struct sk_msg *msg, *tmp;
642 
643 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
644 		list_del(&msg->list);
645 		sk_msg_free(psock->sk, msg);
646 		kfree(msg);
647 	}
648 }
649 
sk_psock_zap_ingress(struct sk_psock * psock)650 static void sk_psock_zap_ingress(struct sk_psock *psock)
651 {
652 	__skb_queue_purge(&psock->ingress_skb);
653 	__sk_psock_purge_ingress_msg(psock);
654 }
655 
sk_psock_link_destroy(struct sk_psock * psock)656 static void sk_psock_link_destroy(struct sk_psock *psock)
657 {
658 	struct sk_psock_link *link, *tmp;
659 
660 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
661 		list_del(&link->list);
662 		sk_psock_free_link(link);
663 	}
664 }
665 
sk_psock_destroy_deferred(struct work_struct * gc)666 static void sk_psock_destroy_deferred(struct work_struct *gc)
667 {
668 	struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
669 
670 	/* No sk_callback_lock since already detached. */
671 
672 	/* Parser has been stopped */
673 	if (psock->progs.skb_parser)
674 		strp_done(&psock->parser.strp);
675 
676 	cancel_work_sync(&psock->work);
677 
678 	psock_progs_drop(&psock->progs);
679 
680 	sk_psock_link_destroy(psock);
681 	sk_psock_cork_free(psock);
682 	sk_psock_zap_ingress(psock);
683 
684 	if (psock->sk_redir)
685 		sock_put(psock->sk_redir);
686 	sock_put(psock->sk);
687 	kfree(psock);
688 }
689 
sk_psock_destroy(struct rcu_head * rcu)690 static void sk_psock_destroy(struct rcu_head *rcu)
691 {
692 	struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
693 
694 	INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
695 	schedule_work(&psock->gc);
696 }
697 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)698 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
699 {
700 	sk_psock_cork_free(psock);
701 	sk_psock_zap_ingress(psock);
702 
703 	write_lock_bh(&sk->sk_callback_lock);
704 	sk_psock_restore_proto(sk, psock);
705 	rcu_assign_sk_user_data(sk, NULL);
706 	if (psock->progs.skb_parser)
707 		sk_psock_stop_strp(sk, psock);
708 	else if (psock->progs.skb_verdict)
709 		sk_psock_stop_verdict(sk, psock);
710 	write_unlock_bh(&sk->sk_callback_lock);
711 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
712 
713 	call_rcu(&psock->rcu, sk_psock_destroy);
714 }
715 EXPORT_SYMBOL_GPL(sk_psock_drop);
716 
sk_psock_map_verd(int verdict,bool redir)717 static int sk_psock_map_verd(int verdict, bool redir)
718 {
719 	switch (verdict) {
720 	case SK_PASS:
721 		return redir ? __SK_REDIRECT : __SK_PASS;
722 	case SK_DROP:
723 	default:
724 		break;
725 	}
726 
727 	return __SK_DROP;
728 }
729 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)730 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
731 			 struct sk_msg *msg)
732 {
733 	struct bpf_prog *prog;
734 	int ret;
735 
736 	rcu_read_lock();
737 	prog = READ_ONCE(psock->progs.msg_parser);
738 	if (unlikely(!prog)) {
739 		ret = __SK_PASS;
740 		goto out;
741 	}
742 
743 	sk_msg_compute_data_pointers(msg);
744 	msg->sk = sk;
745 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
746 	ret = sk_psock_map_verd(ret, msg->sk_redir);
747 	psock->apply_bytes = msg->apply_bytes;
748 	if (ret == __SK_REDIRECT) {
749 		if (psock->sk_redir)
750 			sock_put(psock->sk_redir);
751 		psock->sk_redir = msg->sk_redir;
752 		if (!psock->sk_redir) {
753 			ret = __SK_DROP;
754 			goto out;
755 		}
756 		sock_hold(psock->sk_redir);
757 	}
758 out:
759 	rcu_read_unlock();
760 	return ret;
761 }
762 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
763 
sk_psock_bpf_run(struct sk_psock * psock,struct bpf_prog * prog,struct sk_buff * skb)764 static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
765 			    struct sk_buff *skb)
766 {
767 	bpf_compute_data_end_sk_skb(skb);
768 	return bpf_prog_run_pin_on_cpu(prog, skb);
769 }
770 
sk_psock_from_strp(struct strparser * strp)771 static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
772 {
773 	struct sk_psock_parser *parser;
774 
775 	parser = container_of(strp, struct sk_psock_parser, strp);
776 	return container_of(parser, struct sk_psock, parser);
777 }
778 
sk_psock_skb_redirect(struct sk_buff * skb)779 static void sk_psock_skb_redirect(struct sk_buff *skb)
780 {
781 	struct sk_psock *psock_other;
782 	struct sock *sk_other;
783 
784 	sk_other = tcp_skb_bpf_redirect_fetch(skb);
785 	/* This error is a buggy BPF program, it returned a redirect
786 	 * return code, but then didn't set a redirect interface.
787 	 */
788 	if (unlikely(!sk_other)) {
789 		kfree_skb(skb);
790 		return;
791 	}
792 	psock_other = sk_psock(sk_other);
793 	/* This error indicates the socket is being torn down or had another
794 	 * error that caused the pipe to break. We can't send a packet on
795 	 * a socket that is in this state so we drop the skb.
796 	 */
797 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
798 	    !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
799 		kfree_skb(skb);
800 		return;
801 	}
802 
803 	skb_queue_tail(&psock_other->ingress_skb, skb);
804 	schedule_work(&psock_other->work);
805 }
806 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sock * sk,int verdict)807 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
808 {
809 	switch (verdict) {
810 	case __SK_REDIRECT:
811 		sk_psock_skb_redirect(skb);
812 		break;
813 	case __SK_PASS:
814 	case __SK_DROP:
815 	default:
816 		break;
817 	}
818 }
819 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)820 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
821 {
822 	struct bpf_prog *prog;
823 	int ret = __SK_PASS;
824 
825 	rcu_read_lock();
826 	prog = READ_ONCE(psock->progs.skb_verdict);
827 	if (likely(prog)) {
828 		skb->sk = psock->sk;
829 		tcp_skb_bpf_redirect_clear(skb);
830 		ret = sk_psock_bpf_run(psock, prog, skb);
831 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
832 		skb->sk = NULL;
833 	}
834 	sk_psock_tls_verdict_apply(skb, psock->sk, ret);
835 	rcu_read_unlock();
836 	return ret;
837 }
838 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
839 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)840 static void sk_psock_verdict_apply(struct sk_psock *psock,
841 				   struct sk_buff *skb, int verdict)
842 {
843 	struct tcp_skb_cb *tcp;
844 	struct sock *sk_other;
845 	int err = -EIO;
846 
847 	switch (verdict) {
848 	case __SK_PASS:
849 		sk_other = psock->sk;
850 		if (sock_flag(sk_other, SOCK_DEAD) ||
851 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
852 			goto out_free;
853 		}
854 
855 		tcp = TCP_SKB_CB(skb);
856 		tcp->bpf.flags |= BPF_F_INGRESS;
857 
858 		/* If the queue is empty then we can submit directly
859 		 * into the msg queue. If its not empty we have to
860 		 * queue work otherwise we may get OOO data. Otherwise,
861 		 * if sk_psock_skb_ingress errors will be handled by
862 		 * retrying later from workqueue.
863 		 */
864 		if (skb_queue_empty(&psock->ingress_skb)) {
865 			err = sk_psock_skb_ingress_self(psock, skb);
866 		}
867 		if (err < 0) {
868 			skb_queue_tail(&psock->ingress_skb, skb);
869 			schedule_work(&psock->work);
870 		}
871 		break;
872 	case __SK_REDIRECT:
873 		sk_psock_skb_redirect(skb);
874 		break;
875 	case __SK_DROP:
876 	default:
877 out_free:
878 		kfree_skb(skb);
879 	}
880 }
881 
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)882 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
883 {
884 	struct sk_psock *psock;
885 	struct bpf_prog *prog;
886 	int ret = __SK_DROP;
887 	struct sock *sk;
888 
889 	rcu_read_lock();
890 	sk = strp->sk;
891 	psock = sk_psock(sk);
892 	if (unlikely(!psock)) {
893 		kfree_skb(skb);
894 		goto out;
895 	}
896 	prog = READ_ONCE(psock->progs.skb_verdict);
897 	if (likely(prog)) {
898 		skb->sk = sk;
899 		tcp_skb_bpf_redirect_clear(skb);
900 		ret = sk_psock_bpf_run(psock, prog, skb);
901 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
902 		skb->sk = NULL;
903 	}
904 	sk_psock_verdict_apply(psock, skb, ret);
905 out:
906 	rcu_read_unlock();
907 }
908 
sk_psock_strp_read_done(struct strparser * strp,int err)909 static int sk_psock_strp_read_done(struct strparser *strp, int err)
910 {
911 	return err;
912 }
913 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)914 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
915 {
916 	struct sk_psock *psock = sk_psock_from_strp(strp);
917 	struct bpf_prog *prog;
918 	int ret = skb->len;
919 
920 	rcu_read_lock();
921 	prog = READ_ONCE(psock->progs.skb_parser);
922 	if (likely(prog)) {
923 		skb->sk = psock->sk;
924 		ret = sk_psock_bpf_run(psock, prog, skb);
925 		skb->sk = NULL;
926 	}
927 	rcu_read_unlock();
928 	return ret;
929 }
930 
931 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)932 static void sk_psock_strp_data_ready(struct sock *sk)
933 {
934 	struct sk_psock *psock;
935 
936 	rcu_read_lock();
937 	psock = sk_psock(sk);
938 	if (likely(psock)) {
939 		if (tls_sw_has_ctx_rx(sk)) {
940 			psock->parser.saved_data_ready(sk);
941 		} else {
942 			write_lock_bh(&sk->sk_callback_lock);
943 			strp_data_ready(&psock->parser.strp);
944 			write_unlock_bh(&sk->sk_callback_lock);
945 		}
946 	}
947 	rcu_read_unlock();
948 }
949 
sk_psock_verdict_recv(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t orig_len)950 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
951 				 unsigned int offset, size_t orig_len)
952 {
953 	struct sock *sk = (struct sock *)desc->arg.data;
954 	struct sk_psock *psock;
955 	struct bpf_prog *prog;
956 	int ret = __SK_DROP;
957 	int len = orig_len;
958 
959 	/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
960 	skb = skb_clone(skb, GFP_ATOMIC);
961 	if (!skb) {
962 		desc->error = -ENOMEM;
963 		return 0;
964 	}
965 
966 	rcu_read_lock();
967 	psock = sk_psock(sk);
968 	if (unlikely(!psock)) {
969 		len = 0;
970 		kfree_skb(skb);
971 		goto out;
972 	}
973 	prog = READ_ONCE(psock->progs.skb_verdict);
974 	if (likely(prog)) {
975 		skb->sk = sk;
976 		tcp_skb_bpf_redirect_clear(skb);
977 		ret = sk_psock_bpf_run(psock, prog, skb);
978 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
979 		skb->sk = NULL;
980 	}
981 	sk_psock_verdict_apply(psock, skb, ret);
982 out:
983 	rcu_read_unlock();
984 	return len;
985 }
986 
sk_psock_verdict_data_ready(struct sock * sk)987 static void sk_psock_verdict_data_ready(struct sock *sk)
988 {
989 	struct socket *sock = sk->sk_socket;
990 	read_descriptor_t desc;
991 
992 	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
993 		return;
994 
995 	desc.arg.data = sk;
996 	desc.error = 0;
997 	desc.count = 1;
998 
999 	sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1000 }
1001 
sk_psock_write_space(struct sock * sk)1002 static void sk_psock_write_space(struct sock *sk)
1003 {
1004 	struct sk_psock *psock;
1005 	void (*write_space)(struct sock *sk) = NULL;
1006 
1007 	rcu_read_lock();
1008 	psock = sk_psock(sk);
1009 	if (likely(psock)) {
1010 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1011 			schedule_work(&psock->work);
1012 		write_space = psock->saved_write_space;
1013 	}
1014 	rcu_read_unlock();
1015 	if (write_space)
1016 		write_space(sk);
1017 }
1018 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1019 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1020 {
1021 	static const struct strp_callbacks cb = {
1022 		.rcv_msg	= sk_psock_strp_read,
1023 		.read_sock_done	= sk_psock_strp_read_done,
1024 		.parse_msg	= sk_psock_strp_parse,
1025 	};
1026 
1027 	psock->parser.enabled = false;
1028 	return strp_init(&psock->parser.strp, sk, &cb);
1029 }
1030 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1031 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1032 {
1033 	struct sk_psock_parser *parser = &psock->parser;
1034 
1035 	if (parser->enabled)
1036 		return;
1037 
1038 	parser->saved_data_ready = sk->sk_data_ready;
1039 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1040 	sk->sk_write_space = sk_psock_write_space;
1041 	parser->enabled = true;
1042 }
1043 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1044 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1045 {
1046 	struct sk_psock_parser *parser = &psock->parser;
1047 
1048 	if (parser->enabled)
1049 		return;
1050 
1051 	parser->saved_data_ready = sk->sk_data_ready;
1052 	sk->sk_data_ready = sk_psock_strp_data_ready;
1053 	sk->sk_write_space = sk_psock_write_space;
1054 	parser->enabled = true;
1055 }
1056 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1057 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1058 {
1059 	struct sk_psock_parser *parser = &psock->parser;
1060 
1061 	if (!parser->enabled)
1062 		return;
1063 
1064 	sk->sk_data_ready = parser->saved_data_ready;
1065 	parser->saved_data_ready = NULL;
1066 	strp_stop(&parser->strp);
1067 	parser->enabled = false;
1068 }
1069 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1070 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1071 {
1072 	struct sk_psock_parser *parser = &psock->parser;
1073 
1074 	if (!parser->enabled)
1075 		return;
1076 
1077 	sk->sk_data_ready = parser->saved_data_ready;
1078 	parser->saved_data_ready = NULL;
1079 	parser->enabled = false;
1080 }
1081