1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
17 return true;
18
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
22 return true;
23
24 return false;
25 }
26
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 int elem_first_coalesce)
29 {
30 struct page_frag *pfrag = sk_page_frag(sk);
31 u32 osize = msg->sg.size;
32 int ret = 0;
33
34 len -= msg->sg.size;
35 while (len > 0) {
36 struct scatterlist *sge;
37 u32 orig_offset;
38 int use, i;
39
40 if (!sk_page_frag_refill(sk, pfrag)) {
41 ret = -ENOMEM;
42 goto msg_trim;
43 }
44
45 orig_offset = pfrag->offset;
46 use = min_t(int, len, pfrag->size - orig_offset);
47 if (!sk_wmem_schedule(sk, use)) {
48 ret = -ENOMEM;
49 goto msg_trim;
50 }
51
52 i = msg->sg.end;
53 sk_msg_iter_var_prev(i);
54 sge = &msg->sg.data[i];
55
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 sg_page(sge) == pfrag->page &&
58 sge->offset + sge->length == orig_offset) {
59 sge->length += use;
60 } else {
61 if (sk_msg_full(msg)) {
62 ret = -ENOSPC;
63 break;
64 }
65
66 sge = &msg->sg.data[msg->sg.end];
67 sg_unmark_end(sge);
68 sg_set_page(sge, pfrag->page, use, orig_offset);
69 get_page(pfrag->page);
70 sk_msg_iter_next(msg, end);
71 }
72
73 sk_mem_charge(sk, use);
74 msg->sg.size += use;
75 pfrag->offset += use;
76 len -= use;
77 }
78
79 return ret;
80
81 msg_trim:
82 sk_msg_trim(sk, msg, osize);
83 return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 u32 off, u32 len)
89 {
90 int i = src->sg.start;
91 struct scatterlist *sge = sk_msg_elem(src, i);
92 struct scatterlist *sgd = NULL;
93 u32 sge_len, sge_off;
94
95 while (off) {
96 if (sge->length > off)
97 break;
98 off -= sge->length;
99 sk_msg_iter_var_next(i);
100 if (i == src->sg.end && off)
101 return -ENOSPC;
102 sge = sk_msg_elem(src, i);
103 }
104
105 while (len) {
106 sge_len = sge->length - off;
107 if (sge_len > len)
108 sge_len = len;
109
110 if (dst->sg.end)
111 sgd = sk_msg_elem(dst, dst->sg.end - 1);
112
113 if (sgd &&
114 (sg_page(sge) == sg_page(sgd)) &&
115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 sgd->length += sge_len;
117 dst->sg.size += sge_len;
118 } else if (!sk_msg_full(dst)) {
119 sge_off = sge->offset + off;
120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 } else {
122 return -ENOSPC;
123 }
124
125 off = 0;
126 len -= sge_len;
127 sk_mem_charge(sk, sge_len);
128 sk_msg_iter_var_next(i);
129 if (i == src->sg.end && len)
130 return -ENOSPC;
131 sge = sk_msg_elem(src, i);
132 }
133
134 return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 int i = msg->sg.start;
141
142 do {
143 struct scatterlist *sge = sk_msg_elem(msg, i);
144
145 if (bytes < sge->length) {
146 sge->length -= bytes;
147 sge->offset += bytes;
148 sk_mem_uncharge(sk, bytes);
149 break;
150 }
151
152 sk_mem_uncharge(sk, sge->length);
153 bytes -= sge->length;
154 sge->length = 0;
155 sge->offset = 0;
156 sk_msg_iter_var_next(i);
157 } while (bytes && i != msg->sg.end);
158 msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 int i = msg->sg.start;
165
166 do {
167 struct scatterlist *sge = &msg->sg.data[i];
168 int uncharge = (bytes < sge->length) ? bytes : sge->length;
169
170 sk_mem_uncharge(sk, uncharge);
171 bytes -= uncharge;
172 sk_msg_iter_var_next(i);
173 } while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 bool charge)
179 {
180 struct scatterlist *sge = sk_msg_elem(msg, i);
181 u32 len = sge->length;
182
183 /* When the skb owns the memory we free it from consume_skb path. */
184 if (!msg->skb) {
185 if (charge)
186 sk_mem_uncharge(sk, len);
187 put_page(sg_page(sge));
188 }
189 memset(sge, 0, sizeof(*sge));
190 return len;
191 }
192
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 bool charge)
195 {
196 struct scatterlist *sge = sk_msg_elem(msg, i);
197 int freed = 0;
198
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
202 sk_msg_iter_var_next(i);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
205 }
206 consume_skb(msg->skb);
207 sk_msg_init(msg);
208 return freed;
209 }
210
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216
sk_msg_free(struct sock * sk,struct sk_msg * msg)217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 u32 bytes, bool charge)
225 {
226 struct scatterlist *sge;
227 u32 i = msg->sg.start;
228
229 while (bytes) {
230 sge = sk_msg_elem(msg, i);
231 if (!sge->length)
232 break;
233 if (bytes < sge->length) {
234 if (charge)
235 sk_mem_uncharge(sk, bytes);
236 sge->length -= bytes;
237 sge->offset += bytes;
238 msg->sg.size -= bytes;
239 break;
240 }
241
242 msg->sg.size -= sge->length;
243 bytes -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
245 sk_msg_iter_var_next(i);
246 sk_msg_check_to_free(msg, i, bytes);
247 }
248 msg->sg.start = i;
249 }
250
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 __sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 u32 bytes)
259 {
260 __sk_msg_free_partial(sk, msg, bytes, false);
261 }
262
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 int trim = msg->sg.size - len;
266 u32 i = msg->sg.end;
267
268 if (trim <= 0) {
269 WARN_ON(trim < 0);
270 return;
271 }
272
273 sk_msg_iter_var_prev(i);
274 msg->sg.size = len;
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
279 sk_msg_iter_var_prev(i);
280 if (!trim)
281 goto out;
282 }
283
284 msg->sg.data[i].length -= trim;
285 sk_mem_uncharge(sk, trim);
286 /* Adjust copybreak if it falls into the trimmed part of last buf */
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 sk_msg_iter_var_next(i);
291 msg->sg.end = i;
292
293 /* If we trim data a full sg elem before curr pointer update
294 * copybreak and current so that any future copy operations
295 * start at new copy location.
296 * However trimmed data that has not yet been used in a copy op
297 * does not require an update.
298 */
299 if (!msg->sg.size) {
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 sk_msg_iter_var_prev(i);
305 msg->sg.curr = i;
306 msg->sg.copybreak = msg->sg.data[i].length;
307 }
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 struct sk_msg *msg, u32 bytes)
313 {
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 const int to_max_pages = MAX_MSG_FRAGS;
316 struct page *pages[MAX_MSG_FRAGS];
317 ssize_t orig, copied, use, offset;
318
319 orig = msg->sg.size;
320 while (bytes > 0) {
321 i = 0;
322 maxpages = to_max_pages - num_elems;
323 if (maxpages == 0) {
324 ret = -EFAULT;
325 goto out;
326 }
327
328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 &offset);
330 if (copied <= 0) {
331 ret = -EFAULT;
332 goto out;
333 }
334
335 bytes -= copied;
336 msg->sg.size += copied;
337
338 while (copied) {
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
344
345 offset = 0;
346 copied -= use;
347 sk_msg_iter_next(msg, end);
348 num_elems++;
349 i++;
350 }
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
354 */
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
357 }
358 out:
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
361 */
362 if (ret)
363 iov_iter_revert(from, msg->sg.size - orig);
364 return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
370 {
371 int ret = -ENOSPC, i = msg->sg.curr;
372 struct scatterlist *sge;
373 u32 copy, buf_size;
374 void *to;
375
376 do {
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
383 break;
384 sge = sk_msg_elem(msg, i);
385 }
386
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
393 else
394 ret = copy_from_iter(to, copy, from);
395 if (ret != copy) {
396 ret = -EFAULT;
397 goto out;
398 }
399 bytes -= copy;
400 if (!bytes)
401 break;
402 msg->sg.copybreak = 0;
403 sk_msg_iter_var_next(i);
404 } while (i != msg->sg.end);
405 out:
406 msg->sg.curr = i;
407 return ret;
408 }
409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410
411 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)412 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
413 int len, int flags)
414 {
415 struct iov_iter *iter = &msg->msg_iter;
416 int peek = flags & MSG_PEEK;
417 struct sk_msg *msg_rx;
418 int i, copied = 0;
419
420 msg_rx = sk_psock_peek_msg(psock);
421 while (copied != len) {
422 struct scatterlist *sge;
423
424 if (unlikely(!msg_rx))
425 break;
426
427 i = msg_rx->sg.start;
428 do {
429 struct page *page;
430 int copy;
431
432 sge = sk_msg_elem(msg_rx, i);
433 copy = sge->length;
434 page = sg_page(sge);
435 if (copied + copy > len)
436 copy = len - copied;
437 if (copy)
438 copy = copy_page_to_iter(page, sge->offset, copy, iter);
439 if (!copy) {
440 copied = copied ? copied : -EFAULT;
441 goto out;
442 }
443
444 copied += copy;
445 if (likely(!peek)) {
446 sge->offset += copy;
447 sge->length -= copy;
448 if (!msg_rx->skb) {
449 sk_mem_uncharge(sk, copy);
450 atomic_sub(copy, &sk->sk_rmem_alloc);
451 }
452 msg_rx->sg.size -= copy;
453
454 if (!sge->length) {
455 sk_msg_iter_var_next(i);
456 if (!msg_rx->skb)
457 put_page(page);
458 }
459 } else {
460 /* Lets not optimize peek case if copy_page_to_iter
461 * didn't copy the entire length lets just break.
462 */
463 if (copy != sge->length)
464 goto out;
465 sk_msg_iter_var_next(i);
466 }
467
468 if (copied == len)
469 break;
470 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
471
472 if (unlikely(peek)) {
473 msg_rx = sk_psock_next_msg(psock, msg_rx);
474 if (!msg_rx)
475 break;
476 continue;
477 }
478
479 msg_rx->sg.start = i;
480 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
481 msg_rx = sk_psock_dequeue_msg(psock);
482 kfree_sk_msg(msg_rx);
483 }
484 msg_rx = sk_psock_peek_msg(psock);
485 }
486 out:
487 return copied;
488 }
489 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
490
sk_msg_is_readable(struct sock * sk)491 bool sk_msg_is_readable(struct sock *sk)
492 {
493 struct sk_psock *psock;
494 bool empty = true;
495
496 rcu_read_lock();
497 psock = sk_psock(sk);
498 if (likely(psock))
499 empty = list_empty(&psock->ingress_msg);
500 rcu_read_unlock();
501 return !empty;
502 }
503 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
504
alloc_sk_msg(gfp_t gfp)505 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
506 {
507 struct sk_msg *msg;
508
509 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
510 if (unlikely(!msg))
511 return NULL;
512 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
513 return msg;
514 }
515
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)516 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
517 struct sk_buff *skb)
518 {
519 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
520 return NULL;
521
522 if (!sk_rmem_schedule(sk, skb, skb->truesize))
523 return NULL;
524
525 return alloc_sk_msg(GFP_KERNEL);
526 }
527
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg,bool take_ref)528 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
529 u32 off, u32 len,
530 struct sk_psock *psock,
531 struct sock *sk,
532 struct sk_msg *msg,
533 bool take_ref)
534 {
535 int num_sge, copied;
536
537 /* skb_to_sgvec will fail when the total number of fragments in
538 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
539 * caller may aggregate multiple skbs.
540 */
541 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
542 if (num_sge < 0) {
543 /* skb linearize may fail with ENOMEM, but lets simply try again
544 * later if this happens. Under memory pressure we don't want to
545 * drop the skb. We need to linearize the skb so that the mapping
546 * in skb_to_sgvec can not error.
547 * Note that skb_linearize requires the skb not to be shared.
548 */
549 if (skb_linearize(skb))
550 return -EAGAIN;
551
552 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
553 if (unlikely(num_sge < 0))
554 return num_sge;
555 }
556
557 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
558 psock->ingress_bytes += len;
559 #endif
560 copied = len;
561 msg->sg.start = 0;
562 msg->sg.size = copied;
563 msg->sg.end = num_sge;
564 msg->skb = take_ref ? skb_get(skb) : skb;
565
566 sk_psock_queue_msg(psock, msg);
567 sk_psock_data_ready(sk, psock);
568 return copied;
569 }
570
571 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
572 u32 off, u32 len, bool take_ref);
573
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)574 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
575 u32 off, u32 len)
576 {
577 struct sock *sk = psock->sk;
578 struct sk_msg *msg;
579 int err;
580
581 /* If we are receiving on the same sock skb->sk is already assigned,
582 * skip memory accounting and owner transition seeing it already set
583 * correctly.
584 */
585 if (unlikely(skb->sk == sk))
586 return sk_psock_skb_ingress_self(psock, skb, off, len, true);
587 msg = sk_psock_create_ingress_msg(sk, skb);
588 if (!msg)
589 return -EAGAIN;
590
591 /* This will transition ownership of the data from the socket where
592 * the BPF program was run initiating the redirect to the socket
593 * we will eventually receive this data on. The data will be released
594 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
595 * into user buffers.
596 */
597 skb_set_owner_r(skb, sk);
598 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
599 if (err < 0)
600 kfree(msg);
601 return err;
602 }
603
604 /* Puts an skb on the ingress queue of the socket already assigned to the
605 * skb. In this case we do not need to check memory limits or skb_set_owner_r
606 * because the skb is already accounted for here.
607 */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool take_ref)608 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
609 u32 off, u32 len, bool take_ref)
610 {
611 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
612 struct sock *sk = psock->sk;
613 int err;
614
615 if (unlikely(!msg))
616 return -EAGAIN;
617 skb_set_owner_r(skb, sk);
618 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
619 if (err < 0)
620 kfree(msg);
621 return err;
622 }
623
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)624 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
625 u32 off, u32 len, bool ingress)
626 {
627 if (!ingress) {
628 if (!sock_writeable(psock->sk))
629 return -EAGAIN;
630 return skb_send_sock(psock->sk, skb, off, len);
631 }
632
633 return sk_psock_skb_ingress(psock, skb, off, len);
634 }
635
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)636 static void sk_psock_skb_state(struct sk_psock *psock,
637 struct sk_psock_work_state *state,
638 int len, int off)
639 {
640 spin_lock_bh(&psock->ingress_lock);
641 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
642 state->len = len;
643 state->off = off;
644 }
645 spin_unlock_bh(&psock->ingress_lock);
646 }
647
sk_psock_backlog(struct work_struct * work)648 static void sk_psock_backlog(struct work_struct *work)
649 {
650 struct delayed_work *dwork = to_delayed_work(work);
651 struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
652 struct sk_psock_work_state *state = &psock->work_state;
653 struct sk_buff *skb = NULL;
654 u32 len = 0, off = 0;
655 bool ingress;
656 int ret;
657
658 /* If sk is quickly removed from the map and then added back, the old
659 * psock should not be scheduled, because there are now two psocks
660 * pointing to the same sk.
661 */
662 if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
663 return;
664
665 /* Increment the psock refcnt to synchronize with close(fd) path in
666 * sock_map_close(), ensuring we wait for backlog thread completion
667 * before sk_socket freed. If refcnt increment fails, it indicates
668 * sock_map_close() completed with sk_socket potentially already freed.
669 */
670 if (!sk_psock_get(psock->sk))
671 return;
672 mutex_lock(&psock->work_mutex);
673 while ((skb = skb_peek(&psock->ingress_skb))) {
674 len = skb->len;
675 off = 0;
676 if (skb_bpf_strparser(skb)) {
677 struct strp_msg *stm = strp_msg(skb);
678
679 off = stm->offset;
680 len = stm->full_len;
681 }
682
683 /* Resume processing from previous partial state */
684 if (unlikely(state->len)) {
685 len = state->len;
686 off = state->off;
687 }
688
689 ingress = skb_bpf_ingress(skb);
690 skb_bpf_redirect_clear(skb);
691 do {
692 ret = -EIO;
693 if (!sock_flag(psock->sk, SOCK_DEAD))
694 ret = sk_psock_handle_skb(psock, skb, off,
695 len, ingress);
696 if (ret <= 0) {
697 if (ret == -EAGAIN) {
698 sk_psock_skb_state(psock, state, len, off);
699 /* Restore redir info we cleared before */
700 skb_bpf_set_redir(skb, psock->sk, ingress);
701 /* Delay slightly to prioritize any
702 * other work that might be here.
703 */
704 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
705 schedule_delayed_work(&psock->work, 1);
706 goto end;
707 }
708 /* Hard errors break pipe and stop xmit. */
709 sk_psock_report_error(psock, ret ? -ret : EPIPE);
710 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
711 goto end;
712 }
713 off += ret;
714 len -= ret;
715 } while (len);
716
717 /* The entire skb sent, clear state */
718 sk_psock_skb_state(psock, state, 0, 0);
719 skb = skb_dequeue(&psock->ingress_skb);
720 kfree_skb(skb);
721 }
722 end:
723 mutex_unlock(&psock->work_mutex);
724 sk_psock_put(psock->sk, psock);
725 }
726
sk_psock_init(struct sock * sk,int node)727 struct sk_psock *sk_psock_init(struct sock *sk, int node)
728 {
729 struct sk_psock *psock;
730 struct proto *prot;
731
732 write_lock_bh(&sk->sk_callback_lock);
733
734 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
735 psock = ERR_PTR(-EINVAL);
736 goto out;
737 }
738
739 if (sk->sk_user_data) {
740 psock = ERR_PTR(-EBUSY);
741 goto out;
742 }
743
744 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
745 if (!psock) {
746 psock = ERR_PTR(-ENOMEM);
747 goto out;
748 }
749
750 prot = READ_ONCE(sk->sk_prot);
751 psock->sk = sk;
752 psock->eval = __SK_NONE;
753 psock->sk_proto = prot;
754 psock->saved_unhash = prot->unhash;
755 psock->saved_destroy = prot->destroy;
756 psock->saved_close = prot->close;
757 psock->saved_write_space = sk->sk_write_space;
758
759 INIT_LIST_HEAD(&psock->link);
760 spin_lock_init(&psock->link_lock);
761
762 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
763 mutex_init(&psock->work_mutex);
764 INIT_LIST_HEAD(&psock->ingress_msg);
765 spin_lock_init(&psock->ingress_lock);
766 skb_queue_head_init(&psock->ingress_skb);
767
768 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
769 refcount_set(&psock->refcnt, 1);
770
771 __rcu_assign_sk_user_data_with_flags(sk, psock,
772 SK_USER_DATA_NOCOPY |
773 SK_USER_DATA_PSOCK);
774 sock_hold(sk);
775
776 out:
777 write_unlock_bh(&sk->sk_callback_lock);
778 return psock;
779 }
780 EXPORT_SYMBOL_GPL(sk_psock_init);
781
sk_psock_link_pop(struct sk_psock * psock)782 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
783 {
784 struct sk_psock_link *link;
785
786 spin_lock_bh(&psock->link_lock);
787 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
788 list);
789 if (link)
790 list_del(&link->list);
791 spin_unlock_bh(&psock->link_lock);
792 return link;
793 }
794
__sk_psock_purge_ingress_msg(struct sk_psock * psock)795 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
796 {
797 struct sk_msg *msg, *tmp;
798
799 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
800 list_del(&msg->list);
801 if (!msg->skb)
802 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
803 sk_msg_free(psock->sk, msg);
804 kfree(msg);
805 }
806 }
807
__sk_psock_zap_ingress(struct sk_psock * psock)808 static void __sk_psock_zap_ingress(struct sk_psock *psock)
809 {
810 struct sk_buff *skb;
811
812 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
813 skb_bpf_redirect_clear(skb);
814 sock_drop(psock->sk, skb);
815 }
816 __sk_psock_purge_ingress_msg(psock);
817 }
818
sk_psock_link_destroy(struct sk_psock * psock)819 static void sk_psock_link_destroy(struct sk_psock *psock)
820 {
821 struct sk_psock_link *link, *tmp;
822
823 list_for_each_entry_safe(link, tmp, &psock->link, list) {
824 list_del(&link->list);
825 sk_psock_free_link(link);
826 }
827 }
828
sk_psock_stop(struct sk_psock * psock)829 void sk_psock_stop(struct sk_psock *psock)
830 {
831 spin_lock_bh(&psock->ingress_lock);
832 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
833 sk_psock_cork_free(psock);
834 spin_unlock_bh(&psock->ingress_lock);
835 }
836
837 static void sk_psock_done_strp(struct sk_psock *psock);
838
sk_psock_destroy(struct work_struct * work)839 static void sk_psock_destroy(struct work_struct *work)
840 {
841 struct sk_psock *psock = container_of(to_rcu_work(work),
842 struct sk_psock, rwork);
843 /* No sk_callback_lock since already detached. */
844
845 sk_psock_done_strp(psock);
846
847 cancel_delayed_work_sync(&psock->work);
848 __sk_psock_zap_ingress(psock);
849 mutex_destroy(&psock->work_mutex);
850
851 psock_progs_drop(&psock->progs);
852
853 sk_psock_link_destroy(psock);
854 sk_psock_cork_free(psock);
855
856 if (psock->sk_redir)
857 sock_put(psock->sk_redir);
858 if (psock->sk_pair)
859 sock_put(psock->sk_pair);
860 sock_put(psock->sk);
861 kfree(psock);
862 }
863
sk_psock_drop(struct sock * sk,struct sk_psock * psock)864 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
865 {
866 write_lock_bh(&sk->sk_callback_lock);
867 sk_psock_restore_proto(sk, psock);
868 rcu_assign_sk_user_data(sk, NULL);
869 if (psock->progs.stream_parser)
870 sk_psock_stop_strp(sk, psock);
871 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
872 sk_psock_stop_verdict(sk, psock);
873 write_unlock_bh(&sk->sk_callback_lock);
874
875 sk_psock_stop(psock);
876
877 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
878 queue_rcu_work(system_wq, &psock->rwork);
879 }
880 EXPORT_SYMBOL_GPL(sk_psock_drop);
881
sk_psock_map_verd(int verdict,bool redir)882 static int sk_psock_map_verd(int verdict, bool redir)
883 {
884 switch (verdict) {
885 case SK_PASS:
886 return redir ? __SK_REDIRECT : __SK_PASS;
887 case SK_DROP:
888 default:
889 break;
890 }
891
892 return __SK_DROP;
893 }
894
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)895 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
896 struct sk_msg *msg)
897 {
898 struct bpf_prog *prog;
899 int ret;
900
901 rcu_read_lock();
902 prog = READ_ONCE(psock->progs.msg_parser);
903 if (unlikely(!prog)) {
904 ret = __SK_PASS;
905 goto out;
906 }
907
908 sk_msg_compute_data_pointers(msg);
909 msg->sk = sk;
910 ret = bpf_prog_run_pin_on_cpu(prog, msg);
911 ret = sk_psock_map_verd(ret, msg->sk_redir);
912 psock->apply_bytes = msg->apply_bytes;
913 if (ret == __SK_REDIRECT) {
914 if (psock->sk_redir) {
915 sock_put(psock->sk_redir);
916 psock->sk_redir = NULL;
917 }
918 if (!msg->sk_redir) {
919 ret = __SK_DROP;
920 goto out;
921 }
922 psock->redir_ingress = sk_msg_to_ingress(msg);
923 psock->sk_redir = msg->sk_redir;
924 sock_hold(psock->sk_redir);
925 }
926 out:
927 rcu_read_unlock();
928 return ret;
929 }
930 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
931
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)932 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
933 {
934 struct sk_psock *psock_other;
935 struct sock *sk_other;
936
937 sk_other = skb_bpf_redirect_fetch(skb);
938 /* This error is a buggy BPF program, it returned a redirect
939 * return code, but then didn't set a redirect interface.
940 */
941 if (unlikely(!sk_other)) {
942 skb_bpf_redirect_clear(skb);
943 sock_drop(from->sk, skb);
944 return -EIO;
945 }
946 psock_other = sk_psock(sk_other);
947 /* This error indicates the socket is being torn down or had another
948 * error that caused the pipe to break. We can't send a packet on
949 * a socket that is in this state so we drop the skb.
950 */
951 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
952 skb_bpf_redirect_clear(skb);
953 sock_drop(from->sk, skb);
954 return -EIO;
955 }
956 spin_lock_bh(&psock_other->ingress_lock);
957 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
958 spin_unlock_bh(&psock_other->ingress_lock);
959 skb_bpf_redirect_clear(skb);
960 sock_drop(from->sk, skb);
961 return -EIO;
962 }
963
964 skb_queue_tail(&psock_other->ingress_skb, skb);
965 schedule_delayed_work(&psock_other->work, 0);
966 spin_unlock_bh(&psock_other->ingress_lock);
967 return 0;
968 }
969
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)970 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
971 struct sk_psock *from, int verdict)
972 {
973 switch (verdict) {
974 case __SK_REDIRECT:
975 sk_psock_skb_redirect(from, skb);
976 break;
977 case __SK_PASS:
978 case __SK_DROP:
979 default:
980 break;
981 }
982 }
983
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)984 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
985 {
986 struct bpf_prog *prog;
987 int ret = __SK_PASS;
988
989 rcu_read_lock();
990 prog = READ_ONCE(psock->progs.stream_verdict);
991 if (likely(prog)) {
992 skb->sk = psock->sk;
993 skb_dst_drop(skb);
994 skb_bpf_redirect_clear(skb);
995 ret = bpf_prog_run_pin_on_cpu(prog, skb);
996 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
997 skb->sk = NULL;
998 }
999 sk_psock_tls_verdict_apply(skb, psock, ret);
1000 rcu_read_unlock();
1001 return ret;
1002 }
1003 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
1004
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)1005 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
1006 int verdict)
1007 {
1008 struct sock *sk_other;
1009 int err = 0;
1010 u32 len, off;
1011
1012 switch (verdict) {
1013 case __SK_PASS:
1014 err = -EIO;
1015 sk_other = psock->sk;
1016 if (sock_flag(sk_other, SOCK_DEAD) ||
1017 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1018 goto out_free;
1019
1020 skb_bpf_set_ingress(skb);
1021
1022 /* If the queue is empty then we can submit directly
1023 * into the msg queue. If its not empty we have to
1024 * queue work otherwise we may get OOO data. Otherwise,
1025 * if sk_psock_skb_ingress errors will be handled by
1026 * retrying later from workqueue.
1027 */
1028 if (skb_queue_empty(&psock->ingress_skb)) {
1029 len = skb->len;
1030 off = 0;
1031 if (skb_bpf_strparser(skb)) {
1032 struct strp_msg *stm = strp_msg(skb);
1033
1034 off = stm->offset;
1035 len = stm->full_len;
1036 }
1037 err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1038 }
1039 if (err < 0) {
1040 spin_lock_bh(&psock->ingress_lock);
1041 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1042 skb_queue_tail(&psock->ingress_skb, skb);
1043 schedule_delayed_work(&psock->work, 0);
1044 err = 0;
1045 }
1046 spin_unlock_bh(&psock->ingress_lock);
1047 if (err < 0)
1048 goto out_free;
1049 }
1050 break;
1051 case __SK_REDIRECT:
1052 tcp_eat_skb(psock->sk, skb);
1053 err = sk_psock_skb_redirect(psock, skb);
1054 break;
1055 case __SK_DROP:
1056 default:
1057 out_free:
1058 skb_bpf_redirect_clear(skb);
1059 tcp_eat_skb(psock->sk, skb);
1060 sock_drop(psock->sk, skb);
1061 }
1062
1063 return err;
1064 }
1065
sk_psock_write_space(struct sock * sk)1066 static void sk_psock_write_space(struct sock *sk)
1067 {
1068 struct sk_psock *psock;
1069 void (*write_space)(struct sock *sk) = NULL;
1070
1071 rcu_read_lock();
1072 psock = sk_psock(sk);
1073 if (likely(psock)) {
1074 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1075 schedule_delayed_work(&psock->work, 0);
1076 write_space = psock->saved_write_space;
1077 }
1078 rcu_read_unlock();
1079 if (write_space)
1080 write_space(sk);
1081 }
1082
1083 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1084 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1085 {
1086 struct sk_psock *psock;
1087 struct bpf_prog *prog;
1088 int ret = __SK_DROP;
1089 struct sock *sk;
1090
1091 rcu_read_lock();
1092 sk = strp->sk;
1093 psock = sk_psock(sk);
1094 if (unlikely(!psock)) {
1095 sock_drop(sk, skb);
1096 goto out;
1097 }
1098 prog = READ_ONCE(psock->progs.stream_verdict);
1099 if (likely(prog)) {
1100 skb->sk = sk;
1101 skb_dst_drop(skb);
1102 skb_bpf_redirect_clear(skb);
1103 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1104 skb_bpf_set_strparser(skb);
1105 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1106 skb->sk = NULL;
1107 }
1108 sk_psock_verdict_apply(psock, skb, ret);
1109 out:
1110 rcu_read_unlock();
1111 }
1112
sk_psock_strp_read_done(struct strparser * strp,int err)1113 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1114 {
1115 return err;
1116 }
1117
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1118 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1119 {
1120 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1121 struct bpf_prog *prog;
1122 int ret = skb->len;
1123
1124 rcu_read_lock();
1125 prog = READ_ONCE(psock->progs.stream_parser);
1126 if (likely(prog)) {
1127 skb->sk = psock->sk;
1128 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1129 skb->sk = NULL;
1130 }
1131 rcu_read_unlock();
1132 return ret;
1133 }
1134
1135 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1136 static void sk_psock_strp_data_ready(struct sock *sk)
1137 {
1138 struct sk_psock *psock;
1139
1140 trace_sk_data_ready(sk);
1141
1142 rcu_read_lock();
1143 psock = sk_psock(sk);
1144 if (likely(psock)) {
1145 if (tls_sw_has_ctx_rx(sk)) {
1146 psock->saved_data_ready(sk);
1147 } else {
1148 read_lock_bh(&sk->sk_callback_lock);
1149 strp_data_ready(&psock->strp);
1150 read_unlock_bh(&sk->sk_callback_lock);
1151 }
1152 }
1153 rcu_read_unlock();
1154 }
1155
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1156 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1157 {
1158 int ret;
1159
1160 static const struct strp_callbacks cb = {
1161 .rcv_msg = sk_psock_strp_read,
1162 .read_sock_done = sk_psock_strp_read_done,
1163 .parse_msg = sk_psock_strp_parse,
1164 };
1165
1166 ret = strp_init(&psock->strp, sk, &cb);
1167 if (!ret)
1168 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1169
1170 if (sk_is_tcp(sk)) {
1171 psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1172 psock->copied_seq = tcp_sk(sk)->copied_seq;
1173 }
1174 return ret;
1175 }
1176
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1177 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1178 {
1179 if (psock->saved_data_ready)
1180 return;
1181
1182 psock->saved_data_ready = sk->sk_data_ready;
1183 sk->sk_data_ready = sk_psock_strp_data_ready;
1184 sk->sk_write_space = sk_psock_write_space;
1185 }
1186
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1187 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1188 {
1189 psock_set_prog(&psock->progs.stream_parser, NULL);
1190
1191 if (!psock->saved_data_ready)
1192 return;
1193
1194 sk->sk_data_ready = psock->saved_data_ready;
1195 psock->saved_data_ready = NULL;
1196 strp_stop(&psock->strp);
1197 }
1198
sk_psock_done_strp(struct sk_psock * psock)1199 static void sk_psock_done_strp(struct sk_psock *psock)
1200 {
1201 /* Parser has been stopped */
1202 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1203 strp_done(&psock->strp);
1204 }
1205 #else
sk_psock_done_strp(struct sk_psock * psock)1206 static void sk_psock_done_strp(struct sk_psock *psock)
1207 {
1208 }
1209 #endif /* CONFIG_BPF_STREAM_PARSER */
1210
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1211 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1212 {
1213 struct sk_psock *psock;
1214 struct bpf_prog *prog;
1215 int ret = __SK_DROP;
1216 int len = skb->len;
1217
1218 rcu_read_lock();
1219 psock = sk_psock(sk);
1220 if (unlikely(!psock)) {
1221 len = 0;
1222 tcp_eat_skb(sk, skb);
1223 sock_drop(sk, skb);
1224 goto out;
1225 }
1226 prog = READ_ONCE(psock->progs.stream_verdict);
1227 if (!prog)
1228 prog = READ_ONCE(psock->progs.skb_verdict);
1229 if (likely(prog)) {
1230 skb_dst_drop(skb);
1231 skb_bpf_redirect_clear(skb);
1232 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1233 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1234 }
1235 ret = sk_psock_verdict_apply(psock, skb, ret);
1236 if (ret < 0)
1237 len = ret;
1238 out:
1239 rcu_read_unlock();
1240 return len;
1241 }
1242
sk_psock_verdict_data_ready(struct sock * sk)1243 static void sk_psock_verdict_data_ready(struct sock *sk)
1244 {
1245 struct socket *sock = sk->sk_socket;
1246 const struct proto_ops *ops;
1247 int copied;
1248
1249 trace_sk_data_ready(sk);
1250
1251 if (unlikely(!sock))
1252 return;
1253 ops = READ_ONCE(sock->ops);
1254 if (!ops || !ops->read_skb)
1255 return;
1256 copied = ops->read_skb(sk, sk_psock_verdict_recv);
1257 if (copied >= 0) {
1258 struct sk_psock *psock;
1259
1260 rcu_read_lock();
1261 psock = sk_psock(sk);
1262 if (psock)
1263 sk_psock_data_ready(sk, psock);
1264 rcu_read_unlock();
1265 }
1266 }
1267
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1268 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1269 {
1270 if (psock->saved_data_ready)
1271 return;
1272
1273 psock->saved_data_ready = sk->sk_data_ready;
1274 sk->sk_data_ready = sk_psock_verdict_data_ready;
1275 sk->sk_write_space = sk_psock_write_space;
1276 }
1277
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1278 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1279 {
1280 psock_set_prog(&psock->progs.stream_verdict, NULL);
1281 psock_set_prog(&psock->progs.skb_verdict, NULL);
1282
1283 if (!psock->saved_data_ready)
1284 return;
1285
1286 sk->sk_data_ready = psock->saved_data_ready;
1287 psock->saved_data_ready = NULL;
1288 }
1289