1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #include <linux/bpf.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
11 #include <net/sock.h>
12 #include <net/tcp.h>
13 #include <net/net_namespace.h>
14 #include <linux/error-injection.h>
15 #include <linux/smp.h>
16 #include <linux/sock_diag.h>
17
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/bpf_test_run.h>
20
21 struct bpf_test_timer {
22 enum { NO_PREEMPT, NO_MIGRATE } mode;
23 u32 i;
24 u64 time_start, time_spent;
25 };
26
bpf_test_timer_enter(struct bpf_test_timer * t)27 static void bpf_test_timer_enter(struct bpf_test_timer *t)
28 __acquires(rcu)
29 {
30 rcu_read_lock();
31 if (t->mode == NO_PREEMPT)
32 preempt_disable();
33 else
34 migrate_disable();
35
36 t->time_start = ktime_get_ns();
37 }
38
bpf_test_timer_leave(struct bpf_test_timer * t)39 static void bpf_test_timer_leave(struct bpf_test_timer *t)
40 __releases(rcu)
41 {
42 t->time_start = 0;
43
44 if (t->mode == NO_PREEMPT)
45 preempt_enable();
46 else
47 migrate_enable();
48 rcu_read_unlock();
49 }
50
bpf_test_timer_continue(struct bpf_test_timer * t,u32 repeat,int * err,u32 * duration)51 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
52 __must_hold(rcu)
53 {
54 t->i++;
55 if (t->i >= repeat) {
56 /* We're done. */
57 t->time_spent += ktime_get_ns() - t->time_start;
58 do_div(t->time_spent, t->i);
59 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
60 *err = 0;
61 goto reset;
62 }
63
64 if (signal_pending(current)) {
65 /* During iteration: we've been cancelled, abort. */
66 *err = -EINTR;
67 goto reset;
68 }
69
70 if (need_resched()) {
71 /* During iteration: we need to reschedule between runs. */
72 t->time_spent += ktime_get_ns() - t->time_start;
73 bpf_test_timer_leave(t);
74 cond_resched();
75 bpf_test_timer_enter(t);
76 }
77
78 /* Do another round. */
79 return true;
80
81 reset:
82 t->i = 0;
83 return false;
84 }
85
bpf_test_run(struct bpf_prog * prog,void * ctx,u32 repeat,u32 * retval,u32 * time,bool xdp)86 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
87 u32 *retval, u32 *time, bool xdp)
88 {
89 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
90 struct bpf_test_timer t = { NO_MIGRATE };
91 enum bpf_cgroup_storage_type stype;
92 int ret;
93
94 for_each_cgroup_storage_type(stype) {
95 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
96 if (IS_ERR(storage[stype])) {
97 storage[stype] = NULL;
98 for_each_cgroup_storage_type(stype)
99 bpf_cgroup_storage_free(storage[stype]);
100 return -ENOMEM;
101 }
102 }
103
104 if (!repeat)
105 repeat = 1;
106
107 bpf_test_timer_enter(&t);
108 do {
109 ret = bpf_cgroup_storage_set(storage);
110 if (ret)
111 break;
112
113 if (xdp)
114 *retval = bpf_prog_run_xdp(prog, ctx);
115 else
116 *retval = BPF_PROG_RUN(prog, ctx);
117
118 bpf_cgroup_storage_unset();
119
120 } while (bpf_test_timer_continue(&t, repeat, &ret, time));
121 bpf_test_timer_leave(&t);
122
123 for_each_cgroup_storage_type(stype)
124 bpf_cgroup_storage_free(storage[stype]);
125
126 return ret;
127 }
128
bpf_test_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size,u32 retval,u32 duration)129 static int bpf_test_finish(const union bpf_attr *kattr,
130 union bpf_attr __user *uattr, const void *data,
131 u32 size, u32 retval, u32 duration)
132 {
133 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
134 int err = -EFAULT;
135 u32 copy_size = size;
136
137 /* Clamp copy if the user has provided a size hint, but copy the full
138 * buffer if not to retain old behaviour.
139 */
140 if (kattr->test.data_size_out &&
141 copy_size > kattr->test.data_size_out) {
142 copy_size = kattr->test.data_size_out;
143 err = -ENOSPC;
144 }
145
146 if (data_out && copy_to_user(data_out, data, copy_size))
147 goto out;
148 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
149 goto out;
150 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
151 goto out;
152 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
153 goto out;
154 if (err != -ENOSPC)
155 err = 0;
156 out:
157 trace_bpf_test_finish(&err);
158 return err;
159 }
160
161 /* Integer types of various sizes and pointer combinations cover variety of
162 * architecture dependent calling conventions. 7+ can be supported in the
163 * future.
164 */
165 __diag_push();
166 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
167 "Global functions as their definitions will be in vmlinux BTF");
bpf_fentry_test1(int a)168 int noinline bpf_fentry_test1(int a)
169 {
170 return a + 1;
171 }
172
bpf_fentry_test2(int a,u64 b)173 int noinline bpf_fentry_test2(int a, u64 b)
174 {
175 return a + b;
176 }
177
bpf_fentry_test3(char a,int b,u64 c)178 int noinline bpf_fentry_test3(char a, int b, u64 c)
179 {
180 return a + b + c;
181 }
182
bpf_fentry_test4(void * a,char b,int c,u64 d)183 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
184 {
185 return (long)a + b + c + d;
186 }
187
bpf_fentry_test5(u64 a,void * b,short c,int d,u64 e)188 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
189 {
190 return a + (long)b + c + d + e;
191 }
192
bpf_fentry_test6(u64 a,void * b,short c,int d,void * e,u64 f)193 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
194 {
195 return a + (long)b + c + d + (long)e + f;
196 }
197
198 struct bpf_fentry_test_t {
199 struct bpf_fentry_test_t *a;
200 };
201
bpf_fentry_test7(struct bpf_fentry_test_t * arg)202 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
203 {
204 return (long)arg;
205 }
206
bpf_fentry_test8(struct bpf_fentry_test_t * arg)207 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
208 {
209 return (long)arg->a;
210 }
211
bpf_modify_return_test(int a,int * b)212 int noinline bpf_modify_return_test(int a, int *b)
213 {
214 *b += 1;
215 return a + *b;
216 }
217 __diag_pop();
218
219 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
220
bpf_test_init(const union bpf_attr * kattr,u32 size,u32 headroom,u32 tailroom)221 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
222 u32 headroom, u32 tailroom)
223 {
224 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
225 u32 user_size = kattr->test.data_size_in;
226 void *data;
227
228 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
229 return ERR_PTR(-EINVAL);
230
231 if (user_size > size)
232 return ERR_PTR(-EMSGSIZE);
233
234 size = SKB_DATA_ALIGN(size);
235 data = kzalloc(size + headroom + tailroom, GFP_USER);
236 if (!data)
237 return ERR_PTR(-ENOMEM);
238
239 if (copy_from_user(data + headroom, data_in, user_size)) {
240 kfree(data);
241 return ERR_PTR(-EFAULT);
242 }
243
244 return data;
245 }
246
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)247 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
248 const union bpf_attr *kattr,
249 union bpf_attr __user *uattr)
250 {
251 struct bpf_fentry_test_t arg = {};
252 u16 side_effect = 0, ret = 0;
253 int b = 2, err = -EFAULT;
254 u32 retval = 0;
255
256 if (kattr->test.flags || kattr->test.cpu)
257 return -EINVAL;
258
259 switch (prog->expected_attach_type) {
260 case BPF_TRACE_FENTRY:
261 case BPF_TRACE_FEXIT:
262 if (bpf_fentry_test1(1) != 2 ||
263 bpf_fentry_test2(2, 3) != 5 ||
264 bpf_fentry_test3(4, 5, 6) != 15 ||
265 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
266 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
267 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
268 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
269 bpf_fentry_test8(&arg) != 0)
270 goto out;
271 break;
272 case BPF_MODIFY_RETURN:
273 ret = bpf_modify_return_test(1, &b);
274 if (b != 2)
275 side_effect = 1;
276 break;
277 default:
278 goto out;
279 }
280
281 retval = ((u32)side_effect << 16) | ret;
282 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
283 goto out;
284
285 err = 0;
286 out:
287 trace_bpf_test_finish(&err);
288 return err;
289 }
290
291 struct bpf_raw_tp_test_run_info {
292 struct bpf_prog *prog;
293 void *ctx;
294 u32 retval;
295 };
296
297 static void
__bpf_prog_test_run_raw_tp(void * data)298 __bpf_prog_test_run_raw_tp(void *data)
299 {
300 struct bpf_raw_tp_test_run_info *info = data;
301
302 rcu_read_lock();
303 info->retval = BPF_PROG_RUN(info->prog, info->ctx);
304 rcu_read_unlock();
305 }
306
bpf_prog_test_run_raw_tp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)307 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
308 const union bpf_attr *kattr,
309 union bpf_attr __user *uattr)
310 {
311 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
312 __u32 ctx_size_in = kattr->test.ctx_size_in;
313 struct bpf_raw_tp_test_run_info info;
314 int cpu = kattr->test.cpu, err = 0;
315 int current_cpu;
316
317 /* doesn't support data_in/out, ctx_out, duration, or repeat */
318 if (kattr->test.data_in || kattr->test.data_out ||
319 kattr->test.ctx_out || kattr->test.duration ||
320 kattr->test.repeat)
321 return -EINVAL;
322
323 if (ctx_size_in < prog->aux->max_ctx_offset ||
324 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
325 return -EINVAL;
326
327 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
328 return -EINVAL;
329
330 if (ctx_size_in) {
331 info.ctx = kzalloc(ctx_size_in, GFP_USER);
332 if (!info.ctx)
333 return -ENOMEM;
334 if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
335 err = -EFAULT;
336 goto out;
337 }
338 } else {
339 info.ctx = NULL;
340 }
341
342 info.prog = prog;
343
344 current_cpu = get_cpu();
345 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
346 cpu == current_cpu) {
347 __bpf_prog_test_run_raw_tp(&info);
348 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
349 /* smp_call_function_single() also checks cpu_online()
350 * after csd_lock(). However, since cpu is from user
351 * space, let's do an extra quick check to filter out
352 * invalid value before smp_call_function_single().
353 */
354 err = -ENXIO;
355 } else {
356 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
357 &info, 1);
358 }
359 put_cpu();
360
361 if (!err &&
362 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
363 err = -EFAULT;
364
365 out:
366 kfree(info.ctx);
367 return err;
368 }
369
bpf_ctx_init(const union bpf_attr * kattr,u32 max_size)370 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
371 {
372 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
373 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
374 u32 size = kattr->test.ctx_size_in;
375 void *data;
376 int err;
377
378 if (!data_in && !data_out)
379 return NULL;
380
381 data = kzalloc(max_size, GFP_USER);
382 if (!data)
383 return ERR_PTR(-ENOMEM);
384
385 if (data_in) {
386 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
387 if (err) {
388 kfree(data);
389 return ERR_PTR(err);
390 }
391
392 size = min_t(u32, max_size, size);
393 if (copy_from_user(data, data_in, size)) {
394 kfree(data);
395 return ERR_PTR(-EFAULT);
396 }
397 }
398 return data;
399 }
400
bpf_ctx_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size)401 static int bpf_ctx_finish(const union bpf_attr *kattr,
402 union bpf_attr __user *uattr, const void *data,
403 u32 size)
404 {
405 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
406 int err = -EFAULT;
407 u32 copy_size = size;
408
409 if (!data || !data_out)
410 return 0;
411
412 if (copy_size > kattr->test.ctx_size_out) {
413 copy_size = kattr->test.ctx_size_out;
414 err = -ENOSPC;
415 }
416
417 if (copy_to_user(data_out, data, copy_size))
418 goto out;
419 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
420 goto out;
421 if (err != -ENOSPC)
422 err = 0;
423 out:
424 return err;
425 }
426
427 /**
428 * range_is_zero - test whether buffer is initialized
429 * @buf: buffer to check
430 * @from: check from this position
431 * @to: check up until (excluding) this position
432 *
433 * This function returns true if the there is a non-zero byte
434 * in the buf in the range [from,to).
435 */
range_is_zero(void * buf,size_t from,size_t to)436 static inline bool range_is_zero(void *buf, size_t from, size_t to)
437 {
438 return !memchr_inv((u8 *)buf + from, 0, to - from);
439 }
440
convert___skb_to_skb(struct sk_buff * skb,struct __sk_buff * __skb)441 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
442 {
443 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
444
445 if (!__skb)
446 return 0;
447
448 /* make sure the fields we don't use are zeroed */
449 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
450 return -EINVAL;
451
452 /* mark is allowed */
453
454 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
455 offsetof(struct __sk_buff, priority)))
456 return -EINVAL;
457
458 /* priority is allowed */
459
460 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
461 offsetof(struct __sk_buff, ifindex)))
462 return -EINVAL;
463
464 /* ifindex is allowed */
465
466 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
467 offsetof(struct __sk_buff, cb)))
468 return -EINVAL;
469
470 /* cb is allowed */
471
472 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
473 offsetof(struct __sk_buff, tstamp)))
474 return -EINVAL;
475
476 /* tstamp is allowed */
477 /* wire_len is allowed */
478 /* gso_segs is allowed */
479
480 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
481 offsetof(struct __sk_buff, gso_size)))
482 return -EINVAL;
483
484 /* gso_size is allowed */
485
486 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
487 sizeof(struct __sk_buff)))
488 return -EINVAL;
489
490 skb->mark = __skb->mark;
491 skb->priority = __skb->priority;
492 skb->tstamp = __skb->tstamp;
493 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
494
495 if (__skb->wire_len == 0) {
496 cb->pkt_len = skb->len;
497 } else {
498 if (__skb->wire_len < skb->len ||
499 __skb->wire_len > GSO_MAX_SIZE)
500 return -EINVAL;
501 cb->pkt_len = __skb->wire_len;
502 }
503
504 if (__skb->gso_segs > GSO_MAX_SEGS)
505 return -EINVAL;
506 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
507 skb_shinfo(skb)->gso_size = __skb->gso_size;
508
509 return 0;
510 }
511
convert_skb_to___skb(struct sk_buff * skb,struct __sk_buff * __skb)512 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
513 {
514 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
515
516 if (!__skb)
517 return;
518
519 __skb->mark = skb->mark;
520 __skb->priority = skb->priority;
521 __skb->ifindex = skb->dev->ifindex;
522 __skb->tstamp = skb->tstamp;
523 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
524 __skb->wire_len = cb->pkt_len;
525 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
526 }
527
528 static struct proto bpf_dummy_proto = {
529 .name = "bpf_dummy",
530 .owner = THIS_MODULE,
531 .obj_size = sizeof(struct sock),
532 };
533
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)534 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
535 union bpf_attr __user *uattr)
536 {
537 bool is_l2 = false, is_direct_pkt_access = false;
538 struct net *net = current->nsproxy->net_ns;
539 struct net_device *dev = net->loopback_dev;
540 u32 size = kattr->test.data_size_in;
541 u32 repeat = kattr->test.repeat;
542 struct __sk_buff *ctx = NULL;
543 u32 retval, duration;
544 int hh_len = ETH_HLEN;
545 struct sk_buff *skb;
546 struct sock *sk;
547 void *data;
548 int ret;
549
550 if (kattr->test.flags || kattr->test.cpu)
551 return -EINVAL;
552
553 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
555 if (IS_ERR(data))
556 return PTR_ERR(data);
557
558 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
559 if (IS_ERR(ctx)) {
560 kfree(data);
561 return PTR_ERR(ctx);
562 }
563
564 switch (prog->type) {
565 case BPF_PROG_TYPE_SCHED_CLS:
566 case BPF_PROG_TYPE_SCHED_ACT:
567 is_l2 = true;
568 fallthrough;
569 case BPF_PROG_TYPE_LWT_IN:
570 case BPF_PROG_TYPE_LWT_OUT:
571 case BPF_PROG_TYPE_LWT_XMIT:
572 is_direct_pkt_access = true;
573 break;
574 default:
575 break;
576 }
577
578 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
579 if (!sk) {
580 kfree(data);
581 kfree(ctx);
582 return -ENOMEM;
583 }
584 sock_init_data(NULL, sk);
585
586 skb = build_skb(data, 0);
587 if (!skb) {
588 kfree(data);
589 kfree(ctx);
590 sk_free(sk);
591 return -ENOMEM;
592 }
593 skb->sk = sk;
594
595 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
596 __skb_put(skb, size);
597 if (ctx && ctx->ifindex > 1) {
598 dev = dev_get_by_index(net, ctx->ifindex);
599 if (!dev) {
600 ret = -ENODEV;
601 goto out;
602 }
603 }
604 skb->protocol = eth_type_trans(skb, dev);
605 skb_reset_network_header(skb);
606
607 switch (skb->protocol) {
608 case htons(ETH_P_IP):
609 sk->sk_family = AF_INET;
610 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
611 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
612 sk->sk_daddr = ip_hdr(skb)->daddr;
613 }
614 break;
615 #if IS_ENABLED(CONFIG_IPV6)
616 case htons(ETH_P_IPV6):
617 sk->sk_family = AF_INET6;
618 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
619 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
620 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
621 }
622 break;
623 #endif
624 default:
625 break;
626 }
627
628 if (is_l2)
629 __skb_push(skb, hh_len);
630 if (is_direct_pkt_access)
631 bpf_compute_data_pointers(skb);
632 ret = convert___skb_to_skb(skb, ctx);
633 if (ret)
634 goto out;
635 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
636 if (ret)
637 goto out;
638 if (!is_l2) {
639 if (skb_headroom(skb) < hh_len) {
640 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
641
642 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
643 ret = -ENOMEM;
644 goto out;
645 }
646 }
647 memset(__skb_push(skb, hh_len), 0, hh_len);
648 }
649 convert_skb_to___skb(skb, ctx);
650
651 size = skb->len;
652 /* bpf program can never convert linear skb to non-linear */
653 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
654 size = skb_headlen(skb);
655 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
656 if (!ret)
657 ret = bpf_ctx_finish(kattr, uattr, ctx,
658 sizeof(struct __sk_buff));
659 out:
660 if (dev && dev != net->loopback_dev)
661 dev_put(dev);
662 kfree_skb(skb);
663 sk_free(sk);
664 kfree(ctx);
665 return ret;
666 }
667
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)668 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
669 union bpf_attr __user *uattr)
670 {
671 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
672 u32 headroom = XDP_PACKET_HEADROOM;
673 u32 size = kattr->test.data_size_in;
674 u32 repeat = kattr->test.repeat;
675 struct netdev_rx_queue *rxqueue;
676 struct xdp_buff xdp = {};
677 u32 retval, duration;
678 u32 max_data_sz;
679 void *data;
680 int ret;
681
682 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
683 prog->expected_attach_type == BPF_XDP_CPUMAP)
684 return -EINVAL;
685 if (kattr->test.ctx_in || kattr->test.ctx_out)
686 return -EINVAL;
687
688 /* XDP have extra tailroom as (most) drivers use full page */
689 max_data_sz = 4096 - headroom - tailroom;
690
691 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
692 if (IS_ERR(data))
693 return PTR_ERR(data);
694
695 xdp.data_hard_start = data;
696 xdp.data = data + headroom;
697 xdp.data_meta = xdp.data;
698 xdp.data_end = xdp.data + size;
699 xdp.frame_sz = headroom + max_data_sz + tailroom;
700
701 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
702 xdp.rxq = &rxqueue->xdp_rxq;
703 bpf_prog_change_xdp(NULL, prog);
704 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
705 if (ret)
706 goto out;
707 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
708 size = xdp.data_end - xdp.data;
709 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
710 out:
711 bpf_prog_change_xdp(prog, NULL);
712 kfree(data);
713 return ret;
714 }
715
verify_user_bpf_flow_keys(struct bpf_flow_keys * ctx)716 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
717 {
718 /* make sure the fields we don't use are zeroed */
719 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
720 return -EINVAL;
721
722 /* flags is allowed */
723
724 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
725 sizeof(struct bpf_flow_keys)))
726 return -EINVAL;
727
728 return 0;
729 }
730
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)731 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
732 const union bpf_attr *kattr,
733 union bpf_attr __user *uattr)
734 {
735 struct bpf_test_timer t = { NO_PREEMPT };
736 u32 size = kattr->test.data_size_in;
737 struct bpf_flow_dissector ctx = {};
738 u32 repeat = kattr->test.repeat;
739 struct bpf_flow_keys *user_ctx;
740 struct bpf_flow_keys flow_keys;
741 const struct ethhdr *eth;
742 unsigned int flags = 0;
743 u32 retval, duration;
744 void *data;
745 int ret;
746
747 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
748 return -EINVAL;
749
750 if (kattr->test.flags || kattr->test.cpu)
751 return -EINVAL;
752
753 if (size < ETH_HLEN)
754 return -EINVAL;
755
756 data = bpf_test_init(kattr, size, 0, 0);
757 if (IS_ERR(data))
758 return PTR_ERR(data);
759
760 eth = (struct ethhdr *)data;
761
762 if (!repeat)
763 repeat = 1;
764
765 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
766 if (IS_ERR(user_ctx)) {
767 kfree(data);
768 return PTR_ERR(user_ctx);
769 }
770 if (user_ctx) {
771 ret = verify_user_bpf_flow_keys(user_ctx);
772 if (ret)
773 goto out;
774 flags = user_ctx->flags;
775 }
776
777 ctx.flow_keys = &flow_keys;
778 ctx.data = data;
779 ctx.data_end = (__u8 *)data + size;
780
781 bpf_test_timer_enter(&t);
782 do {
783 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
784 size, flags);
785 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
786 bpf_test_timer_leave(&t);
787
788 if (ret < 0)
789 goto out;
790
791 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
792 retval, duration);
793 if (!ret)
794 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
795 sizeof(struct bpf_flow_keys));
796
797 out:
798 kfree(user_ctx);
799 kfree(data);
800 return ret;
801 }
802
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)803 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
804 union bpf_attr __user *uattr)
805 {
806 struct bpf_test_timer t = { NO_PREEMPT };
807 struct bpf_prog_array *progs = NULL;
808 struct bpf_sk_lookup_kern ctx = {};
809 u32 repeat = kattr->test.repeat;
810 struct bpf_sk_lookup *user_ctx;
811 u32 retval, duration;
812 int ret = -EINVAL;
813
814 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
815 return -EINVAL;
816
817 if (kattr->test.flags || kattr->test.cpu)
818 return -EINVAL;
819
820 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
821 kattr->test.data_size_out)
822 return -EINVAL;
823
824 if (!repeat)
825 repeat = 1;
826
827 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
828 if (IS_ERR(user_ctx))
829 return PTR_ERR(user_ctx);
830
831 if (!user_ctx)
832 return -EINVAL;
833
834 if (user_ctx->sk)
835 goto out;
836
837 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
838 goto out;
839
840 if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
841 ret = -ERANGE;
842 goto out;
843 }
844
845 ctx.family = (u16)user_ctx->family;
846 ctx.protocol = (u16)user_ctx->protocol;
847 ctx.dport = (u16)user_ctx->local_port;
848 ctx.sport = (__force __be16)user_ctx->remote_port;
849
850 switch (ctx.family) {
851 case AF_INET:
852 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
853 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
854 break;
855
856 #if IS_ENABLED(CONFIG_IPV6)
857 case AF_INET6:
858 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
859 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
860 break;
861 #endif
862
863 default:
864 ret = -EAFNOSUPPORT;
865 goto out;
866 }
867
868 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
869 if (!progs) {
870 ret = -ENOMEM;
871 goto out;
872 }
873
874 progs->items[0].prog = prog;
875
876 bpf_test_timer_enter(&t);
877 do {
878 ctx.selected_sk = NULL;
879 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
880 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
881 bpf_test_timer_leave(&t);
882
883 if (ret < 0)
884 goto out;
885
886 user_ctx->cookie = 0;
887 if (ctx.selected_sk) {
888 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
889 ret = -EOPNOTSUPP;
890 goto out;
891 }
892
893 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
894 }
895
896 ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
897 if (!ret)
898 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
899
900 out:
901 bpf_prog_array_free(progs);
902 kfree(user_ctx);
903 return ret;
904 }
905