• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14 	if (msg->sg.end > msg->sg.start &&
15 	    elem_first_coalesce < msg->sg.end)
16 		return true;
17 
18 	if (msg->sg.end < msg->sg.start &&
19 	    (elem_first_coalesce > msg->sg.start ||
20 	     elem_first_coalesce < msg->sg.end))
21 		return true;
22 
23 	return false;
24 }
25 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 		 int elem_first_coalesce)
28 {
29 	struct page_frag *pfrag = sk_page_frag(sk);
30 	u32 osize = msg->sg.size;
31 	int ret = 0;
32 
33 	len -= msg->sg.size;
34 	while (len > 0) {
35 		struct scatterlist *sge;
36 		u32 orig_offset;
37 		int use, i;
38 
39 		if (!sk_page_frag_refill(sk, pfrag)) {
40 			ret = -ENOMEM;
41 			goto msg_trim;
42 		}
43 
44 		orig_offset = pfrag->offset;
45 		use = min_t(int, len, pfrag->size - orig_offset);
46 		if (!sk_wmem_schedule(sk, use)) {
47 			ret = -ENOMEM;
48 			goto msg_trim;
49 		}
50 
51 		i = msg->sg.end;
52 		sk_msg_iter_var_prev(i);
53 		sge = &msg->sg.data[i];
54 
55 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56 		    sg_page(sge) == pfrag->page &&
57 		    sge->offset + sge->length == orig_offset) {
58 			sge->length += use;
59 		} else {
60 			if (sk_msg_full(msg)) {
61 				ret = -ENOSPC;
62 				break;
63 			}
64 
65 			sge = &msg->sg.data[msg->sg.end];
66 			sg_unmark_end(sge);
67 			sg_set_page(sge, pfrag->page, use, orig_offset);
68 			get_page(pfrag->page);
69 			sk_msg_iter_next(msg, end);
70 		}
71 
72 		sk_mem_charge(sk, use);
73 		msg->sg.size += use;
74 		pfrag->offset += use;
75 		len -= use;
76 	}
77 
78 	return ret;
79 
80 msg_trim:
81 	sk_msg_trim(sk, msg, osize);
82 	return ret;
83 }
84 EXPORT_SYMBOL_GPL(sk_msg_alloc);
85 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)86 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87 		 u32 off, u32 len)
88 {
89 	int i = src->sg.start;
90 	struct scatterlist *sge = sk_msg_elem(src, i);
91 	struct scatterlist *sgd = NULL;
92 	u32 sge_len, sge_off;
93 
94 	while (off) {
95 		if (sge->length > off)
96 			break;
97 		off -= sge->length;
98 		sk_msg_iter_var_next(i);
99 		if (i == src->sg.end && off)
100 			return -ENOSPC;
101 		sge = sk_msg_elem(src, i);
102 	}
103 
104 	while (len) {
105 		sge_len = sge->length - off;
106 		if (sge_len > len)
107 			sge_len = len;
108 
109 		if (dst->sg.end)
110 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
111 
112 		if (sgd &&
113 		    (sg_page(sge) == sg_page(sgd)) &&
114 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115 			sgd->length += sge_len;
116 			dst->sg.size += sge_len;
117 		} else if (!sk_msg_full(dst)) {
118 			sge_off = sge->offset + off;
119 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120 		} else {
121 			return -ENOSPC;
122 		}
123 
124 		off = 0;
125 		len -= sge_len;
126 		sk_mem_charge(sk, sge_len);
127 		sk_msg_iter_var_next(i);
128 		if (i == src->sg.end && len)
129 			return -ENOSPC;
130 		sge = sk_msg_elem(src, i);
131 	}
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(sk_msg_clone);
136 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138 {
139 	int i = msg->sg.start;
140 
141 	do {
142 		struct scatterlist *sge = sk_msg_elem(msg, i);
143 
144 		if (bytes < sge->length) {
145 			sge->length -= bytes;
146 			sge->offset += bytes;
147 			sk_mem_uncharge(sk, bytes);
148 			break;
149 		}
150 
151 		sk_mem_uncharge(sk, sge->length);
152 		bytes -= sge->length;
153 		sge->length = 0;
154 		sge->offset = 0;
155 		sk_msg_iter_var_next(i);
156 	} while (bytes && i != msg->sg.end);
157 	msg->sg.start = i;
158 }
159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162 {
163 	int i = msg->sg.start;
164 
165 	do {
166 		struct scatterlist *sge = &msg->sg.data[i];
167 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
168 
169 		sk_mem_uncharge(sk, uncharge);
170 		bytes -= uncharge;
171 		sk_msg_iter_var_next(i);
172 	} while (i != msg->sg.end);
173 }
174 EXPORT_SYMBOL_GPL(sk_msg_return);
175 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177 			    bool charge)
178 {
179 	struct scatterlist *sge = sk_msg_elem(msg, i);
180 	u32 len = sge->length;
181 
182 	/* When the skb owns the memory we free it from consume_skb path. */
183 	if (!msg->skb) {
184 		if (charge)
185 			sk_mem_uncharge(sk, len);
186 		put_page(sg_page(sge));
187 	}
188 	memset(sge, 0, sizeof(*sge));
189 	return len;
190 }
191 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193 			 bool charge)
194 {
195 	struct scatterlist *sge = sk_msg_elem(msg, i);
196 	int freed = 0;
197 
198 	while (msg->sg.size) {
199 		msg->sg.size -= sge->length;
200 		freed += sk_msg_free_elem(sk, msg, i, charge);
201 		sk_msg_iter_var_next(i);
202 		sk_msg_check_to_free(msg, i, msg->sg.size);
203 		sge = sk_msg_elem(msg, i);
204 	}
205 	consume_skb(msg->skb);
206 	sk_msg_init(msg);
207 	return freed;
208 }
209 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211 {
212 	return __sk_msg_free(sk, msg, msg->sg.start, false);
213 }
214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215 
sk_msg_free(struct sock * sk,struct sk_msg * msg)216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217 {
218 	return __sk_msg_free(sk, msg, msg->sg.start, true);
219 }
220 EXPORT_SYMBOL_GPL(sk_msg_free);
221 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223 				  u32 bytes, bool charge)
224 {
225 	struct scatterlist *sge;
226 	u32 i = msg->sg.start;
227 
228 	while (bytes) {
229 		sge = sk_msg_elem(msg, i);
230 		if (!sge->length)
231 			break;
232 		if (bytes < sge->length) {
233 			if (charge)
234 				sk_mem_uncharge(sk, bytes);
235 			sge->length -= bytes;
236 			sge->offset += bytes;
237 			msg->sg.size -= bytes;
238 			break;
239 		}
240 
241 		msg->sg.size -= sge->length;
242 		bytes -= sge->length;
243 		sk_msg_free_elem(sk, msg, i, charge);
244 		sk_msg_iter_var_next(i);
245 		sk_msg_check_to_free(msg, i, bytes);
246 	}
247 	msg->sg.start = i;
248 }
249 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251 {
252 	__sk_msg_free_partial(sk, msg, bytes, true);
253 }
254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257 				  u32 bytes)
258 {
259 	__sk_msg_free_partial(sk, msg, bytes, false);
260 }
261 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263 {
264 	int trim = msg->sg.size - len;
265 	u32 i = msg->sg.end;
266 
267 	if (trim <= 0) {
268 		WARN_ON(trim < 0);
269 		return;
270 	}
271 
272 	sk_msg_iter_var_prev(i);
273 	msg->sg.size = len;
274 	while (msg->sg.data[i].length &&
275 	       trim >= msg->sg.data[i].length) {
276 		trim -= msg->sg.data[i].length;
277 		sk_msg_free_elem(sk, msg, i, true);
278 		sk_msg_iter_var_prev(i);
279 		if (!trim)
280 			goto out;
281 	}
282 
283 	msg->sg.data[i].length -= trim;
284 	sk_mem_uncharge(sk, trim);
285 	/* Adjust copybreak if it falls into the trimmed part of last buf */
286 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287 		msg->sg.copybreak = msg->sg.data[i].length;
288 out:
289 	sk_msg_iter_var_next(i);
290 	msg->sg.end = i;
291 
292 	/* If we trim data a full sg elem before curr pointer update
293 	 * copybreak and current so that any future copy operations
294 	 * start at new copy location.
295 	 * However trimed data that has not yet been used in a copy op
296 	 * does not require an update.
297 	 */
298 	if (!msg->sg.size) {
299 		msg->sg.curr = msg->sg.start;
300 		msg->sg.copybreak = 0;
301 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303 		sk_msg_iter_var_prev(i);
304 		msg->sg.curr = i;
305 		msg->sg.copybreak = msg->sg.data[i].length;
306 	}
307 }
308 EXPORT_SYMBOL_GPL(sk_msg_trim);
309 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311 			      struct sk_msg *msg, u32 bytes)
312 {
313 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314 	const int to_max_pages = MAX_MSG_FRAGS;
315 	struct page *pages[MAX_MSG_FRAGS];
316 	ssize_t orig, copied, use, offset;
317 
318 	orig = msg->sg.size;
319 	while (bytes > 0) {
320 		i = 0;
321 		maxpages = to_max_pages - num_elems;
322 		if (maxpages == 0) {
323 			ret = -EFAULT;
324 			goto out;
325 		}
326 
327 		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
328 					    &offset);
329 		if (copied <= 0) {
330 			ret = -EFAULT;
331 			goto out;
332 		}
333 
334 		bytes -= copied;
335 		msg->sg.size += copied;
336 
337 		while (copied) {
338 			use = min_t(int, copied, PAGE_SIZE - offset);
339 			sg_set_page(&msg->sg.data[msg->sg.end],
340 				    pages[i], use, offset);
341 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
342 			sk_mem_charge(sk, use);
343 
344 			offset = 0;
345 			copied -= use;
346 			sk_msg_iter_next(msg, end);
347 			num_elems++;
348 			i++;
349 		}
350 		/* When zerocopy is mixed with sk_msg_*copy* operations we
351 		 * may have a copybreak set in this case clear and prefer
352 		 * zerocopy remainder when possible.
353 		 */
354 		msg->sg.copybreak = 0;
355 		msg->sg.curr = msg->sg.end;
356 	}
357 out:
358 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
359 	 * also needs to be cleared.
360 	 */
361 	if (ret)
362 		iov_iter_revert(from, msg->sg.size - orig);
363 	return ret;
364 }
365 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
366 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)367 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
368 			     struct sk_msg *msg, u32 bytes)
369 {
370 	int ret = -ENOSPC, i = msg->sg.curr;
371 	struct scatterlist *sge;
372 	u32 copy, buf_size;
373 	void *to;
374 
375 	do {
376 		sge = sk_msg_elem(msg, i);
377 		/* This is possible if a trim operation shrunk the buffer */
378 		if (msg->sg.copybreak >= sge->length) {
379 			msg->sg.copybreak = 0;
380 			sk_msg_iter_var_next(i);
381 			if (i == msg->sg.end)
382 				break;
383 			sge = sk_msg_elem(msg, i);
384 		}
385 
386 		buf_size = sge->length - msg->sg.copybreak;
387 		copy = (buf_size > bytes) ? bytes : buf_size;
388 		to = sg_virt(sge) + msg->sg.copybreak;
389 		msg->sg.copybreak += copy;
390 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
391 			ret = copy_from_iter_nocache(to, copy, from);
392 		else
393 			ret = copy_from_iter(to, copy, from);
394 		if (ret != copy) {
395 			ret = -EFAULT;
396 			goto out;
397 		}
398 		bytes -= copy;
399 		if (!bytes)
400 			break;
401 		msg->sg.copybreak = 0;
402 		sk_msg_iter_var_next(i);
403 	} while (i != msg->sg.end);
404 out:
405 	msg->sg.curr = i;
406 	return ret;
407 }
408 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
409 
410 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)411 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
412 		   int len, int flags)
413 {
414 	struct iov_iter *iter = &msg->msg_iter;
415 	int peek = flags & MSG_PEEK;
416 	struct sk_msg *msg_rx;
417 	int i, copied = 0;
418 
419 	msg_rx = sk_psock_peek_msg(psock);
420 	while (copied != len) {
421 		struct scatterlist *sge;
422 
423 		if (unlikely(!msg_rx))
424 			break;
425 
426 		i = msg_rx->sg.start;
427 		do {
428 			struct page *page;
429 			int copy;
430 
431 			sge = sk_msg_elem(msg_rx, i);
432 			copy = sge->length;
433 			page = sg_page(sge);
434 			if (copied + copy > len)
435 				copy = len - copied;
436 			copy = copy_page_to_iter(page, sge->offset, copy, iter);
437 			if (!copy) {
438 				copied = copied ? copied : -EFAULT;
439 				goto out;
440 			}
441 
442 			copied += copy;
443 			if (likely(!peek)) {
444 				sge->offset += copy;
445 				sge->length -= copy;
446 				if (!msg_rx->skb)
447 					sk_mem_uncharge(sk, copy);
448 				msg_rx->sg.size -= copy;
449 
450 				if (!sge->length) {
451 					sk_msg_iter_var_next(i);
452 					if (!msg_rx->skb)
453 						put_page(page);
454 				}
455 			} else {
456 				/* Lets not optimize peek case if copy_page_to_iter
457 				 * didn't copy the entire length lets just break.
458 				 */
459 				if (copy != sge->length)
460 					goto out;
461 				sk_msg_iter_var_next(i);
462 			}
463 
464 			if (copied == len)
465 				break;
466 		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
467 
468 		if (unlikely(peek)) {
469 			msg_rx = sk_psock_next_msg(psock, msg_rx);
470 			if (!msg_rx)
471 				break;
472 			continue;
473 		}
474 
475 		msg_rx->sg.start = i;
476 		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
477 			msg_rx = sk_psock_dequeue_msg(psock);
478 			kfree_sk_msg(msg_rx);
479 		}
480 		msg_rx = sk_psock_peek_msg(psock);
481 	}
482 out:
483 	return copied;
484 }
485 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
486 
sk_msg_is_readable(struct sock * sk)487 bool sk_msg_is_readable(struct sock *sk)
488 {
489 	struct sk_psock *psock;
490 	bool empty = true;
491 
492 	rcu_read_lock();
493 	psock = sk_psock(sk);
494 	if (likely(psock))
495 		empty = list_empty(&psock->ingress_msg);
496 	rcu_read_unlock();
497 	return !empty;
498 }
499 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
500 
alloc_sk_msg(gfp_t gfp)501 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
502 {
503 	struct sk_msg *msg;
504 
505 	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
506 	if (unlikely(!msg))
507 		return NULL;
508 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
509 	return msg;
510 }
511 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)512 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
513 						  struct sk_buff *skb)
514 {
515 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
516 		return NULL;
517 
518 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
519 		return NULL;
520 
521 	return alloc_sk_msg(GFP_KERNEL);
522 }
523 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg)524 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
525 					u32 off, u32 len,
526 					struct sk_psock *psock,
527 					struct sock *sk,
528 					struct sk_msg *msg)
529 {
530 	int num_sge, copied;
531 
532 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
533 	if (num_sge < 0) {
534 		/* skb linearize may fail with ENOMEM, but lets simply try again
535 		 * later if this happens. Under memory pressure we don't want to
536 		 * drop the skb. We need to linearize the skb so that the mapping
537 		 * in skb_to_sgvec can not error.
538 		 */
539 		if (skb_linearize(skb))
540 			return -EAGAIN;
541 
542 		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
543 		if (unlikely(num_sge < 0))
544 			return num_sge;
545 	}
546 
547 	copied = len;
548 	msg->sg.start = 0;
549 	msg->sg.size = copied;
550 	msg->sg.end = num_sge;
551 	msg->skb = skb;
552 
553 	sk_psock_queue_msg(psock, msg);
554 	sk_psock_data_ready(sk, psock);
555 	return copied;
556 }
557 
558 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
559 				     u32 off, u32 len);
560 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)561 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
562 				u32 off, u32 len)
563 {
564 	struct sock *sk = psock->sk;
565 	struct sk_msg *msg;
566 	int err;
567 
568 	/* If we are receiving on the same sock skb->sk is already assigned,
569 	 * skip memory accounting and owner transition seeing it already set
570 	 * correctly.
571 	 */
572 	if (unlikely(skb->sk == sk))
573 		return sk_psock_skb_ingress_self(psock, skb, off, len);
574 	msg = sk_psock_create_ingress_msg(sk, skb);
575 	if (!msg)
576 		return -EAGAIN;
577 
578 	/* This will transition ownership of the data from the socket where
579 	 * the BPF program was run initiating the redirect to the socket
580 	 * we will eventually receive this data on. The data will be released
581 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
582 	 * into user buffers.
583 	 */
584 	skb_set_owner_r(skb, sk);
585 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
586 	if (err < 0)
587 		kfree(msg);
588 	return err;
589 }
590 
591 /* Puts an skb on the ingress queue of the socket already assigned to the
592  * skb. In this case we do not need to check memory limits or skb_set_owner_r
593  * because the skb is already accounted for here.
594  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)595 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
596 				     u32 off, u32 len)
597 {
598 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
599 	struct sock *sk = psock->sk;
600 	int err;
601 
602 	if (unlikely(!msg))
603 		return -EAGAIN;
604 	skb_set_owner_r(skb, sk);
605 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
606 	if (err < 0)
607 		kfree(msg);
608 	return err;
609 }
610 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)611 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
612 			       u32 off, u32 len, bool ingress)
613 {
614 	int err = 0;
615 
616 	if (!ingress) {
617 		if (!sock_writeable(psock->sk))
618 			return -EAGAIN;
619 		return skb_send_sock(psock->sk, skb, off, len);
620 	}
621 	skb_get(skb);
622 	err = sk_psock_skb_ingress(psock, skb, off, len);
623 	if (err < 0)
624 		kfree_skb(skb);
625 	return err;
626 }
627 
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)628 static void sk_psock_skb_state(struct sk_psock *psock,
629 			       struct sk_psock_work_state *state,
630 			       int len, int off)
631 {
632 	spin_lock_bh(&psock->ingress_lock);
633 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
634 		state->len = len;
635 		state->off = off;
636 	}
637 	spin_unlock_bh(&psock->ingress_lock);
638 }
639 
sk_psock_backlog(struct work_struct * work)640 static void sk_psock_backlog(struct work_struct *work)
641 {
642 	struct delayed_work *dwork = to_delayed_work(work);
643 	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
644 	struct sk_psock_work_state *state = &psock->work_state;
645 	struct sk_buff *skb = NULL;
646 	u32 len = 0, off = 0;
647 	bool ingress;
648 	int ret;
649 
650 	mutex_lock(&psock->work_mutex);
651 	if (unlikely(state->len)) {
652 		len = state->len;
653 		off = state->off;
654 	}
655 
656 	while ((skb = skb_peek(&psock->ingress_skb))) {
657 		len = skb->len;
658 		off = 0;
659 		if (skb_bpf_strparser(skb)) {
660 			struct strp_msg *stm = strp_msg(skb);
661 
662 			off = stm->offset;
663 			len = stm->full_len;
664 		}
665 		ingress = skb_bpf_ingress(skb);
666 		skb_bpf_redirect_clear(skb);
667 		do {
668 			ret = -EIO;
669 			if (!sock_flag(psock->sk, SOCK_DEAD))
670 				ret = sk_psock_handle_skb(psock, skb, off,
671 							  len, ingress);
672 			if (ret <= 0) {
673 				if (ret == -EAGAIN) {
674 					sk_psock_skb_state(psock, state, len, off);
675 
676 					/* Delay slightly to prioritize any
677 					 * other work that might be here.
678 					 */
679 					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
680 						schedule_delayed_work(&psock->work, 1);
681 					goto end;
682 				}
683 				/* Hard errors break pipe and stop xmit. */
684 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
685 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
686 				goto end;
687 			}
688 			off += ret;
689 			len -= ret;
690 		} while (len);
691 
692 		skb = skb_dequeue(&psock->ingress_skb);
693 		kfree_skb(skb);
694 	}
695 end:
696 	mutex_unlock(&psock->work_mutex);
697 }
698 
sk_psock_init(struct sock * sk,int node)699 struct sk_psock *sk_psock_init(struct sock *sk, int node)
700 {
701 	struct sk_psock *psock;
702 	struct proto *prot;
703 
704 	write_lock_bh(&sk->sk_callback_lock);
705 
706 	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
707 		psock = ERR_PTR(-EINVAL);
708 		goto out;
709 	}
710 
711 	if (sk->sk_user_data) {
712 		psock = ERR_PTR(-EBUSY);
713 		goto out;
714 	}
715 
716 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
717 	if (!psock) {
718 		psock = ERR_PTR(-ENOMEM);
719 		goto out;
720 	}
721 
722 	prot = READ_ONCE(sk->sk_prot);
723 	psock->sk = sk;
724 	psock->eval = __SK_NONE;
725 	psock->sk_proto = prot;
726 	psock->saved_unhash = prot->unhash;
727 	psock->saved_destroy = prot->destroy;
728 	psock->saved_close = prot->close;
729 	psock->saved_write_space = sk->sk_write_space;
730 
731 	INIT_LIST_HEAD(&psock->link);
732 	spin_lock_init(&psock->link_lock);
733 
734 	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
735 	mutex_init(&psock->work_mutex);
736 	INIT_LIST_HEAD(&psock->ingress_msg);
737 	spin_lock_init(&psock->ingress_lock);
738 	skb_queue_head_init(&psock->ingress_skb);
739 
740 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
741 	refcount_set(&psock->refcnt, 1);
742 
743 	__rcu_assign_sk_user_data_with_flags(sk, psock,
744 					     SK_USER_DATA_NOCOPY |
745 					     SK_USER_DATA_PSOCK);
746 	sock_hold(sk);
747 
748 out:
749 	write_unlock_bh(&sk->sk_callback_lock);
750 	return psock;
751 }
752 EXPORT_SYMBOL_GPL(sk_psock_init);
753 
sk_psock_link_pop(struct sk_psock * psock)754 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
755 {
756 	struct sk_psock_link *link;
757 
758 	spin_lock_bh(&psock->link_lock);
759 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
760 					list);
761 	if (link)
762 		list_del(&link->list);
763 	spin_unlock_bh(&psock->link_lock);
764 	return link;
765 }
766 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)767 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
768 {
769 	struct sk_msg *msg, *tmp;
770 
771 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
772 		list_del(&msg->list);
773 		sk_msg_free(psock->sk, msg);
774 		kfree(msg);
775 	}
776 }
777 
__sk_psock_zap_ingress(struct sk_psock * psock)778 static void __sk_psock_zap_ingress(struct sk_psock *psock)
779 {
780 	struct sk_buff *skb;
781 
782 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
783 		skb_bpf_redirect_clear(skb);
784 		sock_drop(psock->sk, skb);
785 	}
786 	__sk_psock_purge_ingress_msg(psock);
787 }
788 
sk_psock_link_destroy(struct sk_psock * psock)789 static void sk_psock_link_destroy(struct sk_psock *psock)
790 {
791 	struct sk_psock_link *link, *tmp;
792 
793 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
794 		list_del(&link->list);
795 		sk_psock_free_link(link);
796 	}
797 }
798 
sk_psock_stop(struct sk_psock * psock)799 void sk_psock_stop(struct sk_psock *psock)
800 {
801 	spin_lock_bh(&psock->ingress_lock);
802 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
803 	sk_psock_cork_free(psock);
804 	spin_unlock_bh(&psock->ingress_lock);
805 }
806 
807 static void sk_psock_done_strp(struct sk_psock *psock);
808 
sk_psock_destroy(struct work_struct * work)809 static void sk_psock_destroy(struct work_struct *work)
810 {
811 	struct sk_psock *psock = container_of(to_rcu_work(work),
812 					      struct sk_psock, rwork);
813 	/* No sk_callback_lock since already detached. */
814 
815 	sk_psock_done_strp(psock);
816 
817 	cancel_delayed_work_sync(&psock->work);
818 	__sk_psock_zap_ingress(psock);
819 	mutex_destroy(&psock->work_mutex);
820 
821 	psock_progs_drop(&psock->progs);
822 
823 	sk_psock_link_destroy(psock);
824 	sk_psock_cork_free(psock);
825 
826 	if (psock->sk_redir)
827 		sock_put(psock->sk_redir);
828 	sock_put(psock->sk);
829 	kfree(psock);
830 }
831 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)832 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
833 {
834 	write_lock_bh(&sk->sk_callback_lock);
835 	sk_psock_restore_proto(sk, psock);
836 	rcu_assign_sk_user_data(sk, NULL);
837 	if (psock->progs.stream_parser)
838 		sk_psock_stop_strp(sk, psock);
839 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
840 		sk_psock_stop_verdict(sk, psock);
841 	write_unlock_bh(&sk->sk_callback_lock);
842 
843 	sk_psock_stop(psock);
844 
845 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
846 	queue_rcu_work(system_wq, &psock->rwork);
847 }
848 EXPORT_SYMBOL_GPL(sk_psock_drop);
849 
sk_psock_map_verd(int verdict,bool redir)850 static int sk_psock_map_verd(int verdict, bool redir)
851 {
852 	switch (verdict) {
853 	case SK_PASS:
854 		return redir ? __SK_REDIRECT : __SK_PASS;
855 	case SK_DROP:
856 	default:
857 		break;
858 	}
859 
860 	return __SK_DROP;
861 }
862 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)863 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
864 			 struct sk_msg *msg)
865 {
866 	struct bpf_prog *prog;
867 	int ret;
868 
869 	rcu_read_lock();
870 	prog = READ_ONCE(psock->progs.msg_parser);
871 	if (unlikely(!prog)) {
872 		ret = __SK_PASS;
873 		goto out;
874 	}
875 
876 	sk_msg_compute_data_pointers(msg);
877 	msg->sk = sk;
878 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
879 	ret = sk_psock_map_verd(ret, msg->sk_redir);
880 	psock->apply_bytes = msg->apply_bytes;
881 	if (ret == __SK_REDIRECT) {
882 		if (psock->sk_redir) {
883 			sock_put(psock->sk_redir);
884 			psock->sk_redir = NULL;
885 		}
886 		if (!msg->sk_redir) {
887 			ret = __SK_DROP;
888 			goto out;
889 		}
890 		psock->redir_ingress = sk_msg_to_ingress(msg);
891 		psock->sk_redir = msg->sk_redir;
892 		sock_hold(psock->sk_redir);
893 	}
894 out:
895 	rcu_read_unlock();
896 	return ret;
897 }
898 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
899 
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)900 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
901 {
902 	struct sk_psock *psock_other;
903 	struct sock *sk_other;
904 
905 	sk_other = skb_bpf_redirect_fetch(skb);
906 	/* This error is a buggy BPF program, it returned a redirect
907 	 * return code, but then didn't set a redirect interface.
908 	 */
909 	if (unlikely(!sk_other)) {
910 		skb_bpf_redirect_clear(skb);
911 		sock_drop(from->sk, skb);
912 		return -EIO;
913 	}
914 	psock_other = sk_psock(sk_other);
915 	/* This error indicates the socket is being torn down or had another
916 	 * error that caused the pipe to break. We can't send a packet on
917 	 * a socket that is in this state so we drop the skb.
918 	 */
919 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
920 		skb_bpf_redirect_clear(skb);
921 		sock_drop(from->sk, skb);
922 		return -EIO;
923 	}
924 	spin_lock_bh(&psock_other->ingress_lock);
925 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
926 		spin_unlock_bh(&psock_other->ingress_lock);
927 		skb_bpf_redirect_clear(skb);
928 		sock_drop(from->sk, skb);
929 		return -EIO;
930 	}
931 
932 	skb_queue_tail(&psock_other->ingress_skb, skb);
933 	schedule_delayed_work(&psock_other->work, 0);
934 	spin_unlock_bh(&psock_other->ingress_lock);
935 	return 0;
936 }
937 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)938 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
939 				       struct sk_psock *from, int verdict)
940 {
941 	switch (verdict) {
942 	case __SK_REDIRECT:
943 		sk_psock_skb_redirect(from, skb);
944 		break;
945 	case __SK_PASS:
946 	case __SK_DROP:
947 	default:
948 		break;
949 	}
950 }
951 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)952 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
953 {
954 	struct bpf_prog *prog;
955 	int ret = __SK_PASS;
956 
957 	rcu_read_lock();
958 	prog = READ_ONCE(psock->progs.stream_verdict);
959 	if (likely(prog)) {
960 		skb->sk = psock->sk;
961 		skb_dst_drop(skb);
962 		skb_bpf_redirect_clear(skb);
963 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
964 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
965 		skb->sk = NULL;
966 	}
967 	sk_psock_tls_verdict_apply(skb, psock, ret);
968 	rcu_read_unlock();
969 	return ret;
970 }
971 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
972 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)973 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
974 				  int verdict)
975 {
976 	struct sock *sk_other;
977 	int err = 0;
978 	u32 len, off;
979 
980 	switch (verdict) {
981 	case __SK_PASS:
982 		err = -EIO;
983 		sk_other = psock->sk;
984 		if (sock_flag(sk_other, SOCK_DEAD) ||
985 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
986 			goto out_free;
987 
988 		skb_bpf_set_ingress(skb);
989 
990 		/* If the queue is empty then we can submit directly
991 		 * into the msg queue. If its not empty we have to
992 		 * queue work otherwise we may get OOO data. Otherwise,
993 		 * if sk_psock_skb_ingress errors will be handled by
994 		 * retrying later from workqueue.
995 		 */
996 		if (skb_queue_empty(&psock->ingress_skb)) {
997 			len = skb->len;
998 			off = 0;
999 			if (skb_bpf_strparser(skb)) {
1000 				struct strp_msg *stm = strp_msg(skb);
1001 
1002 				off = stm->offset;
1003 				len = stm->full_len;
1004 			}
1005 			err = sk_psock_skb_ingress_self(psock, skb, off, len);
1006 		}
1007 		if (err < 0) {
1008 			spin_lock_bh(&psock->ingress_lock);
1009 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1010 				skb_queue_tail(&psock->ingress_skb, skb);
1011 				schedule_delayed_work(&psock->work, 0);
1012 				err = 0;
1013 			}
1014 			spin_unlock_bh(&psock->ingress_lock);
1015 			if (err < 0)
1016 				goto out_free;
1017 		}
1018 		break;
1019 	case __SK_REDIRECT:
1020 		tcp_eat_skb(psock->sk, skb);
1021 		err = sk_psock_skb_redirect(psock, skb);
1022 		break;
1023 	case __SK_DROP:
1024 	default:
1025 out_free:
1026 		skb_bpf_redirect_clear(skb);
1027 		tcp_eat_skb(psock->sk, skb);
1028 		sock_drop(psock->sk, skb);
1029 	}
1030 
1031 	return err;
1032 }
1033 
sk_psock_write_space(struct sock * sk)1034 static void sk_psock_write_space(struct sock *sk)
1035 {
1036 	struct sk_psock *psock;
1037 	void (*write_space)(struct sock *sk) = NULL;
1038 
1039 	rcu_read_lock();
1040 	psock = sk_psock(sk);
1041 	if (likely(psock)) {
1042 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1043 			schedule_delayed_work(&psock->work, 0);
1044 		write_space = psock->saved_write_space;
1045 	}
1046 	rcu_read_unlock();
1047 	if (write_space)
1048 		write_space(sk);
1049 }
1050 
1051 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1052 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1053 {
1054 	struct sk_psock *psock;
1055 	struct bpf_prog *prog;
1056 	int ret = __SK_DROP;
1057 	struct sock *sk;
1058 
1059 	rcu_read_lock();
1060 	sk = strp->sk;
1061 	psock = sk_psock(sk);
1062 	if (unlikely(!psock)) {
1063 		sock_drop(sk, skb);
1064 		goto out;
1065 	}
1066 	prog = READ_ONCE(psock->progs.stream_verdict);
1067 	if (likely(prog)) {
1068 		skb->sk = sk;
1069 		skb_dst_drop(skb);
1070 		skb_bpf_redirect_clear(skb);
1071 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1072 		skb_bpf_set_strparser(skb);
1073 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1074 		skb->sk = NULL;
1075 	}
1076 	sk_psock_verdict_apply(psock, skb, ret);
1077 out:
1078 	rcu_read_unlock();
1079 }
1080 
sk_psock_strp_read_done(struct strparser * strp,int err)1081 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1082 {
1083 	return err;
1084 }
1085 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1086 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1087 {
1088 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1089 	struct bpf_prog *prog;
1090 	int ret = skb->len;
1091 
1092 	rcu_read_lock();
1093 	prog = READ_ONCE(psock->progs.stream_parser);
1094 	if (likely(prog)) {
1095 		skb->sk = psock->sk;
1096 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1097 		skb->sk = NULL;
1098 	}
1099 	rcu_read_unlock();
1100 	return ret;
1101 }
1102 
1103 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1104 static void sk_psock_strp_data_ready(struct sock *sk)
1105 {
1106 	struct sk_psock *psock;
1107 
1108 	rcu_read_lock();
1109 	psock = sk_psock(sk);
1110 	if (likely(psock)) {
1111 		if (tls_sw_has_ctx_rx(sk)) {
1112 			psock->saved_data_ready(sk);
1113 		} else {
1114 			write_lock_bh(&sk->sk_callback_lock);
1115 			strp_data_ready(&psock->strp);
1116 			write_unlock_bh(&sk->sk_callback_lock);
1117 		}
1118 	}
1119 	rcu_read_unlock();
1120 }
1121 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1122 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1123 {
1124 	int ret;
1125 
1126 	static const struct strp_callbacks cb = {
1127 		.rcv_msg	= sk_psock_strp_read,
1128 		.read_sock_done	= sk_psock_strp_read_done,
1129 		.parse_msg	= sk_psock_strp_parse,
1130 	};
1131 
1132 	ret = strp_init(&psock->strp, sk, &cb);
1133 	if (!ret)
1134 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1135 
1136 	return ret;
1137 }
1138 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1139 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1140 {
1141 	if (psock->saved_data_ready)
1142 		return;
1143 
1144 	psock->saved_data_ready = sk->sk_data_ready;
1145 	sk->sk_data_ready = sk_psock_strp_data_ready;
1146 	sk->sk_write_space = sk_psock_write_space;
1147 }
1148 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1149 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1150 {
1151 	psock_set_prog(&psock->progs.stream_parser, NULL);
1152 
1153 	if (!psock->saved_data_ready)
1154 		return;
1155 
1156 	sk->sk_data_ready = psock->saved_data_ready;
1157 	psock->saved_data_ready = NULL;
1158 	strp_stop(&psock->strp);
1159 }
1160 
sk_psock_done_strp(struct sk_psock * psock)1161 static void sk_psock_done_strp(struct sk_psock *psock)
1162 {
1163 	/* Parser has been stopped */
1164 	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1165 		strp_done(&psock->strp);
1166 }
1167 #else
sk_psock_done_strp(struct sk_psock * psock)1168 static void sk_psock_done_strp(struct sk_psock *psock)
1169 {
1170 }
1171 #endif /* CONFIG_BPF_STREAM_PARSER */
1172 
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1173 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1174 {
1175 	struct sk_psock *psock;
1176 	struct bpf_prog *prog;
1177 	int ret = __SK_DROP;
1178 	int len = skb->len;
1179 
1180 	rcu_read_lock();
1181 	psock = sk_psock(sk);
1182 	if (unlikely(!psock)) {
1183 		len = 0;
1184 		tcp_eat_skb(sk, skb);
1185 		sock_drop(sk, skb);
1186 		goto out;
1187 	}
1188 	prog = READ_ONCE(psock->progs.stream_verdict);
1189 	if (!prog)
1190 		prog = READ_ONCE(psock->progs.skb_verdict);
1191 	if (likely(prog)) {
1192 		skb_dst_drop(skb);
1193 		skb_bpf_redirect_clear(skb);
1194 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1195 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1196 	}
1197 	ret = sk_psock_verdict_apply(psock, skb, ret);
1198 	if (ret < 0)
1199 		len = ret;
1200 out:
1201 	rcu_read_unlock();
1202 	return len;
1203 }
1204 
sk_psock_verdict_data_ready(struct sock * sk)1205 static void sk_psock_verdict_data_ready(struct sock *sk)
1206 {
1207 	struct socket *sock = sk->sk_socket;
1208 	int copied;
1209 
1210 	if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
1211 		return;
1212 	copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
1213 	if (copied >= 0) {
1214 		struct sk_psock *psock;
1215 
1216 		rcu_read_lock();
1217 		psock = sk_psock(sk);
1218 		if (psock) {
1219 			read_lock_bh(&sk->sk_callback_lock);
1220 			sk_psock_data_ready(sk, psock);
1221 			read_unlock_bh(&sk->sk_callback_lock);
1222 		}
1223 		rcu_read_unlock();
1224 	}
1225 }
1226 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1227 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1228 {
1229 	if (psock->saved_data_ready)
1230 		return;
1231 
1232 	psock->saved_data_ready = sk->sk_data_ready;
1233 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1234 	sk->sk_write_space = sk_psock_write_space;
1235 }
1236 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1237 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1238 {
1239 	psock_set_prog(&psock->progs.stream_verdict, NULL);
1240 	psock_set_prog(&psock->progs.skb_verdict, NULL);
1241 
1242 	if (!psock->saved_data_ready)
1243 		return;
1244 
1245 	sk->sk_data_ready = psock->saved_data_ready;
1246 	psock->saved_data_ready = NULL;
1247 }
1248