1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Functions to manage eBPF programs attached to cgroups
4 *
5 * Copyright (c) 2016 Daniel Mack
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 #include <net/bpf_sk_storage.h>
19
20 #include "../cgroup/cgroup-internal.h"
21
22 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24
cgroup_bpf_offline(struct cgroup * cgrp)25 void cgroup_bpf_offline(struct cgroup *cgrp)
26 {
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
29 }
30
bpf_cgroup_storages_free(struct bpf_cgroup_storage * storages[])31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
32 {
33 enum bpf_cgroup_storage_type stype;
34
35 for_each_cgroup_storage_type(stype)
36 bpf_cgroup_storage_free(storages[stype]);
37 }
38
bpf_cgroup_storages_alloc(struct bpf_cgroup_storage * storages[],struct bpf_cgroup_storage * new_storages[],enum bpf_attach_type type,struct bpf_prog * prog,struct cgroup * cgrp)39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
40 struct bpf_cgroup_storage *new_storages[],
41 enum bpf_attach_type type,
42 struct bpf_prog *prog,
43 struct cgroup *cgrp)
44 {
45 enum bpf_cgroup_storage_type stype;
46 struct bpf_cgroup_storage_key key;
47 struct bpf_map *map;
48
49 key.cgroup_inode_id = cgroup_id(cgrp);
50 key.attach_type = type;
51
52 for_each_cgroup_storage_type(stype) {
53 map = prog->aux->cgroup_storage[stype];
54 if (!map)
55 continue;
56
57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
58 if (storages[stype])
59 continue;
60
61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
62 if (IS_ERR(storages[stype])) {
63 bpf_cgroup_storages_free(new_storages);
64 return -ENOMEM;
65 }
66
67 new_storages[stype] = storages[stype];
68 }
69
70 return 0;
71 }
72
bpf_cgroup_storages_assign(struct bpf_cgroup_storage * dst[],struct bpf_cgroup_storage * src[])73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
74 struct bpf_cgroup_storage *src[])
75 {
76 enum bpf_cgroup_storage_type stype;
77
78 for_each_cgroup_storage_type(stype)
79 dst[stype] = src[stype];
80 }
81
bpf_cgroup_storages_link(struct bpf_cgroup_storage * storages[],struct cgroup * cgrp,enum bpf_attach_type attach_type)82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
83 struct cgroup *cgrp,
84 enum bpf_attach_type attach_type)
85 {
86 enum bpf_cgroup_storage_type stype;
87
88 for_each_cgroup_storage_type(stype)
89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
90 }
91
92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
94 * doesn't free link memory, which will eventually be done by bpf_link's
95 * release() callback, when its last FD is closed.
96 */
bpf_cgroup_link_auto_detach(struct bpf_cgroup_link * link)97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
98 {
99 cgroup_put(link->cgroup);
100 link->cgroup = NULL;
101 }
102
103 /**
104 * cgroup_bpf_release() - put references of all bpf programs and
105 * release all cgroup bpf data
106 * @work: work structure embedded into the cgroup to modify
107 */
cgroup_bpf_release(struct work_struct * work)108 static void cgroup_bpf_release(struct work_struct *work)
109 {
110 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
111 bpf.release_work);
112 struct bpf_prog_array *old_array;
113 struct list_head *storages = &cgrp->bpf.storages;
114 struct bpf_cgroup_storage *storage, *stmp;
115
116 unsigned int atype;
117
118 mutex_lock(&cgroup_mutex);
119
120 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
121 struct list_head *progs = &cgrp->bpf.progs[atype];
122 struct bpf_prog_list *pl, *pltmp;
123
124 list_for_each_entry_safe(pl, pltmp, progs, node) {
125 list_del(&pl->node);
126 if (pl->prog)
127 bpf_prog_put(pl->prog);
128 if (pl->link)
129 bpf_cgroup_link_auto_detach(pl->link);
130 kfree(pl);
131 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
132 }
133 old_array = rcu_dereference_protected(
134 cgrp->bpf.effective[atype],
135 lockdep_is_held(&cgroup_mutex));
136 bpf_prog_array_free(old_array);
137 }
138
139 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
140 bpf_cgroup_storage_unlink(storage);
141 bpf_cgroup_storage_free(storage);
142 }
143
144 mutex_unlock(&cgroup_mutex);
145
146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
147 cgroup_bpf_put(p);
148
149 percpu_ref_exit(&cgrp->bpf.refcnt);
150 cgroup_put(cgrp);
151 }
152
153 /**
154 * cgroup_bpf_release_fn() - callback used to schedule releasing
155 * of bpf cgroup data
156 * @ref: percpu ref counter structure
157 */
cgroup_bpf_release_fn(struct percpu_ref * ref)158 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
159 {
160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
161
162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
163 queue_work(system_wq, &cgrp->bpf.release_work);
164 }
165
166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
167 * link or direct prog.
168 */
prog_list_prog(struct bpf_prog_list * pl)169 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
170 {
171 if (pl->prog)
172 return pl->prog;
173 if (pl->link)
174 return pl->link->link.prog;
175 return NULL;
176 }
177
178 /* count number of elements in the list.
179 * it's slow but the list cannot be long
180 */
prog_list_length(struct list_head * head)181 static u32 prog_list_length(struct list_head *head)
182 {
183 struct bpf_prog_list *pl;
184 u32 cnt = 0;
185
186 list_for_each_entry(pl, head, node) {
187 if (!prog_list_prog(pl))
188 continue;
189 cnt++;
190 }
191 return cnt;
192 }
193
194 /* if parent has non-overridable prog attached,
195 * disallow attaching new programs to the descendent cgroup.
196 * if parent has overridable or multi-prog, allow attaching
197 */
hierarchy_allows_attach(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)198 static bool hierarchy_allows_attach(struct cgroup *cgrp,
199 enum cgroup_bpf_attach_type atype)
200 {
201 struct cgroup *p;
202
203 p = cgroup_parent(cgrp);
204 if (!p)
205 return true;
206 do {
207 u32 flags = p->bpf.flags[atype];
208 u32 cnt;
209
210 if (flags & BPF_F_ALLOW_MULTI)
211 return true;
212 cnt = prog_list_length(&p->bpf.progs[atype]);
213 WARN_ON_ONCE(cnt > 1);
214 if (cnt == 1)
215 return !!(flags & BPF_F_ALLOW_OVERRIDE);
216 p = cgroup_parent(p);
217 } while (p);
218 return true;
219 }
220
221 /* compute a chain of effective programs for a given cgroup:
222 * start from the list of programs in this cgroup and add
223 * all parent programs.
224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
225 * to programs in this cgroup
226 */
compute_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array ** array)227 static int compute_effective_progs(struct cgroup *cgrp,
228 enum cgroup_bpf_attach_type atype,
229 struct bpf_prog_array **array)
230 {
231 struct bpf_prog_array_item *item;
232 struct bpf_prog_array *progs;
233 struct bpf_prog_list *pl;
234 struct cgroup *p = cgrp;
235 int cnt = 0;
236
237 /* count number of effective programs by walking parents */
238 do {
239 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
240 cnt += prog_list_length(&p->bpf.progs[atype]);
241 p = cgroup_parent(p);
242 } while (p);
243
244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
245 if (!progs)
246 return -ENOMEM;
247
248 /* populate the array with effective progs */
249 cnt = 0;
250 p = cgrp;
251 do {
252 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
253 continue;
254
255 list_for_each_entry(pl, &p->bpf.progs[atype], node) {
256 if (!prog_list_prog(pl))
257 continue;
258
259 item = &progs->items[cnt];
260 item->prog = prog_list_prog(pl);
261 bpf_cgroup_storages_assign(item->cgroup_storage,
262 pl->storage);
263 cnt++;
264 }
265 } while ((p = cgroup_parent(p)));
266
267 *array = progs;
268 return 0;
269 }
270
activate_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array * old_array)271 static void activate_effective_progs(struct cgroup *cgrp,
272 enum cgroup_bpf_attach_type atype,
273 struct bpf_prog_array *old_array)
274 {
275 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
276 lockdep_is_held(&cgroup_mutex));
277 /* free prog array after grace period, since __cgroup_bpf_run_*()
278 * might be still walking the array
279 */
280 bpf_prog_array_free(old_array);
281 }
282
283 /**
284 * cgroup_bpf_inherit() - inherit effective programs from parent
285 * @cgrp: the cgroup to modify
286 */
cgroup_bpf_inherit(struct cgroup * cgrp)287 int cgroup_bpf_inherit(struct cgroup *cgrp)
288 {
289 /* has to use marco instead of const int, since compiler thinks
290 * that array below is variable length
291 */
292 #define NR ARRAY_SIZE(cgrp->bpf.effective)
293 struct bpf_prog_array *arrays[NR] = {};
294 struct cgroup *p;
295 int ret, i;
296
297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
298 GFP_KERNEL);
299 if (ret)
300 return ret;
301
302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
303 cgroup_bpf_get(p);
304
305 for (i = 0; i < NR; i++)
306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
307
308 INIT_LIST_HEAD(&cgrp->bpf.storages);
309
310 for (i = 0; i < NR; i++)
311 if (compute_effective_progs(cgrp, i, &arrays[i]))
312 goto cleanup;
313
314 for (i = 0; i < NR; i++)
315 activate_effective_progs(cgrp, i, arrays[i]);
316
317 return 0;
318 cleanup:
319 for (i = 0; i < NR; i++)
320 bpf_prog_array_free(arrays[i]);
321
322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
323 cgroup_bpf_put(p);
324
325 percpu_ref_exit(&cgrp->bpf.refcnt);
326
327 return -ENOMEM;
328 }
329
update_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)330 static int update_effective_progs(struct cgroup *cgrp,
331 enum cgroup_bpf_attach_type atype)
332 {
333 struct cgroup_subsys_state *css;
334 int err;
335
336 /* allocate and recompute effective prog arrays */
337 css_for_each_descendant_pre(css, &cgrp->self) {
338 struct cgroup *desc = container_of(css, struct cgroup, self);
339
340 if (percpu_ref_is_zero(&desc->bpf.refcnt))
341 continue;
342
343 err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
344 if (err)
345 goto cleanup;
346 }
347
348 /* all allocations were successful. Activate all prog arrays */
349 css_for_each_descendant_pre(css, &cgrp->self) {
350 struct cgroup *desc = container_of(css, struct cgroup, self);
351
352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
353 if (unlikely(desc->bpf.inactive)) {
354 bpf_prog_array_free(desc->bpf.inactive);
355 desc->bpf.inactive = NULL;
356 }
357 continue;
358 }
359
360 activate_effective_progs(desc, atype, desc->bpf.inactive);
361 desc->bpf.inactive = NULL;
362 }
363
364 return 0;
365
366 cleanup:
367 /* oom while computing effective. Free all computed effective arrays
368 * since they were not activated
369 */
370 css_for_each_descendant_pre(css, &cgrp->self) {
371 struct cgroup *desc = container_of(css, struct cgroup, self);
372
373 bpf_prog_array_free(desc->bpf.inactive);
374 desc->bpf.inactive = NULL;
375 }
376
377 return err;
378 }
379
380 #define BPF_CGROUP_MAX_PROGS 64
381
find_attach_entry(struct list_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,struct bpf_prog * replace_prog,bool allow_multi)382 static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
383 struct bpf_prog *prog,
384 struct bpf_cgroup_link *link,
385 struct bpf_prog *replace_prog,
386 bool allow_multi)
387 {
388 struct bpf_prog_list *pl;
389
390 /* single-attach case */
391 if (!allow_multi) {
392 if (list_empty(progs))
393 return NULL;
394 return list_first_entry(progs, typeof(*pl), node);
395 }
396
397 list_for_each_entry(pl, progs, node) {
398 if (prog && pl->prog == prog && prog != replace_prog)
399 /* disallow attaching the same prog twice */
400 return ERR_PTR(-EINVAL);
401 if (link && pl->link == link)
402 /* disallow attaching the same link twice */
403 return ERR_PTR(-EINVAL);
404 }
405
406 /* direct prog multi-attach w/ replacement case */
407 if (replace_prog) {
408 list_for_each_entry(pl, progs, node) {
409 if (pl->prog == replace_prog)
410 /* a match found */
411 return pl;
412 }
413 /* prog to replace not found for cgroup */
414 return ERR_PTR(-ENOENT);
415 }
416
417 return NULL;
418 }
419
420 /**
421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
422 * propagate the change to descendants
423 * @cgrp: The cgroup which descendants to traverse
424 * @prog: A program to attach
425 * @link: A link to attach
426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
427 * @type: Type of attach operation
428 * @flags: Option flags
429 *
430 * Exactly one of @prog or @link can be non-null.
431 * Must be called with cgroup_mutex held.
432 */
__cgroup_bpf_attach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_prog * replace_prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u32 flags)433 int __cgroup_bpf_attach(struct cgroup *cgrp,
434 struct bpf_prog *prog, struct bpf_prog *replace_prog,
435 struct bpf_cgroup_link *link,
436 enum bpf_attach_type type, u32 flags)
437 {
438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
439 struct bpf_prog *old_prog = NULL;
440 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
441 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
442 enum cgroup_bpf_attach_type atype;
443 struct bpf_prog_list *pl;
444 struct list_head *progs;
445 int err;
446
447 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
448 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
449 /* invalid combination */
450 return -EINVAL;
451 if (link && (prog || replace_prog))
452 /* only either link or prog/replace_prog can be specified */
453 return -EINVAL;
454 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
455 /* replace_prog implies BPF_F_REPLACE, and vice versa */
456 return -EINVAL;
457
458 atype = to_cgroup_bpf_attach_type(type);
459 if (atype < 0)
460 return -EINVAL;
461
462 progs = &cgrp->bpf.progs[atype];
463
464 if (!hierarchy_allows_attach(cgrp, atype))
465 return -EPERM;
466
467 if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
468 /* Disallow attaching non-overridable on top
469 * of existing overridable in this cgroup.
470 * Disallow attaching multi-prog if overridable or none
471 */
472 return -EPERM;
473
474 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
475 return -E2BIG;
476
477 pl = find_attach_entry(progs, prog, link, replace_prog,
478 flags & BPF_F_ALLOW_MULTI);
479 if (IS_ERR(pl))
480 return PTR_ERR(pl);
481
482 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
483 prog ? : link->link.prog, cgrp))
484 return -ENOMEM;
485
486 if (pl) {
487 old_prog = pl->prog;
488 } else {
489 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
490 if (!pl) {
491 bpf_cgroup_storages_free(new_storage);
492 return -ENOMEM;
493 }
494 list_add_tail(&pl->node, progs);
495 }
496
497 pl->prog = prog;
498 pl->link = link;
499 bpf_cgroup_storages_assign(pl->storage, storage);
500 cgrp->bpf.flags[atype] = saved_flags;
501
502 err = update_effective_progs(cgrp, atype);
503 if (err)
504 goto cleanup;
505
506 if (old_prog)
507 bpf_prog_put(old_prog);
508 else
509 static_branch_inc(&cgroup_bpf_enabled_key[atype]);
510 bpf_cgroup_storages_link(new_storage, cgrp, type);
511 return 0;
512
513 cleanup:
514 if (old_prog) {
515 pl->prog = old_prog;
516 pl->link = NULL;
517 }
518 bpf_cgroup_storages_free(new_storage);
519 if (!old_prog) {
520 list_del(&pl->node);
521 kfree(pl);
522 }
523 return err;
524 }
525
526 /* Swap updated BPF program for given link in effective program arrays across
527 * all descendant cgroups. This function is guaranteed to succeed.
528 */
replace_effective_prog(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_cgroup_link * link)529 static void replace_effective_prog(struct cgroup *cgrp,
530 enum cgroup_bpf_attach_type atype,
531 struct bpf_cgroup_link *link)
532 {
533 struct bpf_prog_array_item *item;
534 struct cgroup_subsys_state *css;
535 struct bpf_prog_array *progs;
536 struct bpf_prog_list *pl;
537 struct list_head *head;
538 struct cgroup *cg;
539 int pos;
540
541 css_for_each_descendant_pre(css, &cgrp->self) {
542 struct cgroup *desc = container_of(css, struct cgroup, self);
543
544 if (percpu_ref_is_zero(&desc->bpf.refcnt))
545 continue;
546
547 /* find position of link in effective progs array */
548 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
549 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
550 continue;
551
552 head = &cg->bpf.progs[atype];
553 list_for_each_entry(pl, head, node) {
554 if (!prog_list_prog(pl))
555 continue;
556 if (pl->link == link)
557 goto found;
558 pos++;
559 }
560 }
561 found:
562 BUG_ON(!cg);
563 progs = rcu_dereference_protected(
564 desc->bpf.effective[atype],
565 lockdep_is_held(&cgroup_mutex));
566 item = &progs->items[pos];
567 WRITE_ONCE(item->prog, link->link.prog);
568 }
569 }
570
571 /**
572 * __cgroup_bpf_replace() - Replace link's program and propagate the change
573 * to descendants
574 * @cgrp: The cgroup which descendants to traverse
575 * @link: A link for which to replace BPF program
576 * @type: Type of attach operation
577 *
578 * Must be called with cgroup_mutex held.
579 */
__cgroup_bpf_replace(struct cgroup * cgrp,struct bpf_cgroup_link * link,struct bpf_prog * new_prog)580 static int __cgroup_bpf_replace(struct cgroup *cgrp,
581 struct bpf_cgroup_link *link,
582 struct bpf_prog *new_prog)
583 {
584 enum cgroup_bpf_attach_type atype;
585 struct bpf_prog *old_prog;
586 struct bpf_prog_list *pl;
587 struct list_head *progs;
588 bool found = false;
589
590 atype = to_cgroup_bpf_attach_type(link->type);
591 if (atype < 0)
592 return -EINVAL;
593
594 progs = &cgrp->bpf.progs[atype];
595
596 if (link->link.prog->type != new_prog->type)
597 return -EINVAL;
598
599 list_for_each_entry(pl, progs, node) {
600 if (pl->link == link) {
601 found = true;
602 break;
603 }
604 }
605 if (!found)
606 return -ENOENT;
607
608 old_prog = xchg(&link->link.prog, new_prog);
609 replace_effective_prog(cgrp, atype, link);
610 bpf_prog_put(old_prog);
611 return 0;
612 }
613
cgroup_bpf_replace(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)614 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
615 struct bpf_prog *old_prog)
616 {
617 struct bpf_cgroup_link *cg_link;
618 int ret;
619
620 cg_link = container_of(link, struct bpf_cgroup_link, link);
621
622 mutex_lock(&cgroup_mutex);
623 /* link might have been auto-released by dying cgroup, so fail */
624 if (!cg_link->cgroup) {
625 ret = -ENOLINK;
626 goto out_unlock;
627 }
628 if (old_prog && link->prog != old_prog) {
629 ret = -EPERM;
630 goto out_unlock;
631 }
632 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
633 out_unlock:
634 mutex_unlock(&cgroup_mutex);
635 return ret;
636 }
637
find_detach_entry(struct list_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,bool allow_multi)638 static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
639 struct bpf_prog *prog,
640 struct bpf_cgroup_link *link,
641 bool allow_multi)
642 {
643 struct bpf_prog_list *pl;
644
645 if (!allow_multi) {
646 if (list_empty(progs))
647 /* report error when trying to detach and nothing is attached */
648 return ERR_PTR(-ENOENT);
649
650 /* to maintain backward compatibility NONE and OVERRIDE cgroups
651 * allow detaching with invalid FD (prog==NULL) in legacy mode
652 */
653 return list_first_entry(progs, typeof(*pl), node);
654 }
655
656 if (!prog && !link)
657 /* to detach MULTI prog the user has to specify valid FD
658 * of the program or link to be detached
659 */
660 return ERR_PTR(-EINVAL);
661
662 /* find the prog or link and detach it */
663 list_for_each_entry(pl, progs, node) {
664 if (pl->prog == prog && pl->link == link)
665 return pl;
666 }
667 return ERR_PTR(-ENOENT);
668 }
669
670 /**
671 * purge_effective_progs() - After compute_effective_progs fails to alloc new
672 * cgrp->bpf.inactive table we can recover by
673 * recomputing the array in place.
674 *
675 * @cgrp: The cgroup which descendants to travers
676 * @prog: A program to detach or NULL
677 * @link: A link to detach or NULL
678 * @atype: Type of detach operation
679 */
purge_effective_progs(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum cgroup_bpf_attach_type atype)680 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
681 struct bpf_cgroup_link *link,
682 enum cgroup_bpf_attach_type atype)
683 {
684 struct cgroup_subsys_state *css;
685 struct bpf_prog_array *progs;
686 struct bpf_prog_list *pl;
687 struct list_head *head;
688 struct cgroup *cg;
689 int pos;
690
691 /* recompute effective prog array in place */
692 css_for_each_descendant_pre(css, &cgrp->self) {
693 struct cgroup *desc = container_of(css, struct cgroup, self);
694
695 if (percpu_ref_is_zero(&desc->bpf.refcnt))
696 continue;
697
698 /* find position of link or prog in effective progs array */
699 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
700 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
701 continue;
702
703 head = &cg->bpf.progs[atype];
704 list_for_each_entry(pl, head, node) {
705 if (!prog_list_prog(pl))
706 continue;
707 if (pl->prog == prog && pl->link == link)
708 goto found;
709 pos++;
710 }
711 }
712
713 /* no link or prog match, skip the cgroup of this layer */
714 continue;
715 found:
716 progs = rcu_dereference_protected(
717 desc->bpf.effective[atype],
718 lockdep_is_held(&cgroup_mutex));
719
720 /* Remove the program from the array */
721 WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
722 "Failed to purge a prog from array at index %d", pos);
723 }
724 }
725
726 /**
727 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
728 * propagate the change to descendants
729 * @cgrp: The cgroup which descendants to traverse
730 * @prog: A program to detach or NULL
731 * @prog: A link to detach or NULL
732 * @type: Type of detach operation
733 *
734 * At most one of @prog or @link can be non-NULL.
735 * Must be called with cgroup_mutex held.
736 */
__cgroup_bpf_detach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum bpf_attach_type type)737 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
738 struct bpf_cgroup_link *link, enum bpf_attach_type type)
739 {
740 enum cgroup_bpf_attach_type atype;
741 struct bpf_prog *old_prog;
742 struct bpf_prog_list *pl;
743 struct list_head *progs;
744 u32 flags;
745
746 atype = to_cgroup_bpf_attach_type(type);
747 if (atype < 0)
748 return -EINVAL;
749
750 progs = &cgrp->bpf.progs[atype];
751 flags = cgrp->bpf.flags[atype];
752
753 if (prog && link)
754 /* only one of prog or link can be specified */
755 return -EINVAL;
756
757 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
758 if (IS_ERR(pl))
759 return PTR_ERR(pl);
760
761 /* mark it deleted, so it's ignored while recomputing effective */
762 old_prog = pl->prog;
763 pl->prog = NULL;
764 pl->link = NULL;
765
766 if (update_effective_progs(cgrp, atype)) {
767 /* if update effective array failed replace the prog with a dummy prog*/
768 pl->prog = old_prog;
769 pl->link = link;
770 purge_effective_progs(cgrp, old_prog, link, atype);
771 }
772
773 /* now can actually delete it from this cgroup list */
774 list_del(&pl->node);
775 kfree(pl);
776 if (list_empty(progs))
777 /* last program was detached, reset flags to zero */
778 cgrp->bpf.flags[atype] = 0;
779 if (old_prog)
780 bpf_prog_put(old_prog);
781 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
782 return 0;
783 }
784
785 /* Must be called with cgroup_mutex held to avoid races. */
__cgroup_bpf_query(struct cgroup * cgrp,const union bpf_attr * attr,union bpf_attr __user * uattr)786 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
787 union bpf_attr __user *uattr)
788 {
789 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
790 enum bpf_attach_type type = attr->query.attach_type;
791 enum cgroup_bpf_attach_type atype;
792 struct bpf_prog_array *effective;
793 struct list_head *progs;
794 struct bpf_prog *prog;
795 int cnt, ret = 0, i;
796 u32 flags;
797
798 atype = to_cgroup_bpf_attach_type(type);
799 if (atype < 0)
800 return -EINVAL;
801
802 progs = &cgrp->bpf.progs[atype];
803 flags = cgrp->bpf.flags[atype];
804
805 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
806 lockdep_is_held(&cgroup_mutex));
807
808 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
809 cnt = bpf_prog_array_length(effective);
810 else
811 cnt = prog_list_length(progs);
812
813 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
814 return -EFAULT;
815 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
816 return -EFAULT;
817 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
818 /* return early if user requested only program count + flags */
819 return 0;
820 if (attr->query.prog_cnt < cnt) {
821 cnt = attr->query.prog_cnt;
822 ret = -ENOSPC;
823 }
824
825 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
826 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
827 } else {
828 struct bpf_prog_list *pl;
829 u32 id;
830
831 i = 0;
832 list_for_each_entry(pl, progs, node) {
833 prog = prog_list_prog(pl);
834 id = prog->aux->id;
835 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
836 return -EFAULT;
837 if (++i == cnt)
838 break;
839 }
840 }
841 return ret;
842 }
843
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)844 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
845 enum bpf_prog_type ptype, struct bpf_prog *prog)
846 {
847 struct bpf_prog *replace_prog = NULL;
848 struct cgroup *cgrp;
849 int ret;
850
851 cgrp = cgroup_get_from_fd(attr->target_fd);
852 if (IS_ERR(cgrp))
853 return PTR_ERR(cgrp);
854
855 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
856 (attr->attach_flags & BPF_F_REPLACE)) {
857 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
858 if (IS_ERR(replace_prog)) {
859 cgroup_put(cgrp);
860 return PTR_ERR(replace_prog);
861 }
862 }
863
864 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
865 attr->attach_type, attr->attach_flags);
866
867 if (replace_prog)
868 bpf_prog_put(replace_prog);
869 cgroup_put(cgrp);
870 return ret;
871 }
872
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)873 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
874 {
875 struct bpf_prog *prog;
876 struct cgroup *cgrp;
877 int ret;
878
879 cgrp = cgroup_get_from_fd(attr->target_fd);
880 if (IS_ERR(cgrp))
881 return PTR_ERR(cgrp);
882
883 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
884 if (IS_ERR(prog))
885 prog = NULL;
886
887 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
888 if (prog)
889 bpf_prog_put(prog);
890
891 cgroup_put(cgrp);
892 return ret;
893 }
894
bpf_cgroup_link_release(struct bpf_link * link)895 static void bpf_cgroup_link_release(struct bpf_link *link)
896 {
897 struct bpf_cgroup_link *cg_link =
898 container_of(link, struct bpf_cgroup_link, link);
899 struct cgroup *cg;
900
901 /* link might have been auto-detached by dying cgroup already,
902 * in that case our work is done here
903 */
904 if (!cg_link->cgroup)
905 return;
906
907 mutex_lock(&cgroup_mutex);
908
909 /* re-check cgroup under lock again */
910 if (!cg_link->cgroup) {
911 mutex_unlock(&cgroup_mutex);
912 return;
913 }
914
915 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
916 cg_link->type));
917
918 cg = cg_link->cgroup;
919 cg_link->cgroup = NULL;
920
921 mutex_unlock(&cgroup_mutex);
922
923 cgroup_put(cg);
924 }
925
bpf_cgroup_link_dealloc(struct bpf_link * link)926 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
927 {
928 struct bpf_cgroup_link *cg_link =
929 container_of(link, struct bpf_cgroup_link, link);
930
931 kfree(cg_link);
932 }
933
bpf_cgroup_link_detach(struct bpf_link * link)934 static int bpf_cgroup_link_detach(struct bpf_link *link)
935 {
936 bpf_cgroup_link_release(link);
937
938 return 0;
939 }
940
bpf_cgroup_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)941 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
942 struct seq_file *seq)
943 {
944 struct bpf_cgroup_link *cg_link =
945 container_of(link, struct bpf_cgroup_link, link);
946 u64 cg_id = 0;
947
948 mutex_lock(&cgroup_mutex);
949 if (cg_link->cgroup)
950 cg_id = cgroup_id(cg_link->cgroup);
951 mutex_unlock(&cgroup_mutex);
952
953 seq_printf(seq,
954 "cgroup_id:\t%llu\n"
955 "attach_type:\t%d\n",
956 cg_id,
957 cg_link->type);
958 }
959
bpf_cgroup_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)960 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
961 struct bpf_link_info *info)
962 {
963 struct bpf_cgroup_link *cg_link =
964 container_of(link, struct bpf_cgroup_link, link);
965 u64 cg_id = 0;
966
967 mutex_lock(&cgroup_mutex);
968 if (cg_link->cgroup)
969 cg_id = cgroup_id(cg_link->cgroup);
970 mutex_unlock(&cgroup_mutex);
971
972 info->cgroup.cgroup_id = cg_id;
973 info->cgroup.attach_type = cg_link->type;
974 return 0;
975 }
976
977 static const struct bpf_link_ops bpf_cgroup_link_lops = {
978 .release = bpf_cgroup_link_release,
979 .dealloc = bpf_cgroup_link_dealloc,
980 .detach = bpf_cgroup_link_detach,
981 .update_prog = cgroup_bpf_replace,
982 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
983 .fill_link_info = bpf_cgroup_link_fill_link_info,
984 };
985
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)986 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
987 {
988 struct bpf_link_primer link_primer;
989 struct bpf_cgroup_link *link;
990 struct cgroup *cgrp;
991 int err;
992
993 if (attr->link_create.flags)
994 return -EINVAL;
995
996 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
997 if (IS_ERR(cgrp))
998 return PTR_ERR(cgrp);
999
1000 link = kzalloc(sizeof(*link), GFP_USER);
1001 if (!link) {
1002 err = -ENOMEM;
1003 goto out_put_cgroup;
1004 }
1005 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1006 prog);
1007 link->cgroup = cgrp;
1008 link->type = attr->link_create.attach_type;
1009
1010 err = bpf_link_prime(&link->link, &link_primer);
1011 if (err) {
1012 kfree(link);
1013 goto out_put_cgroup;
1014 }
1015
1016 err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1017 link->type, BPF_F_ALLOW_MULTI);
1018 if (err) {
1019 bpf_link_cleanup(&link_primer);
1020 goto out_put_cgroup;
1021 }
1022
1023 return bpf_link_settle(&link_primer);
1024
1025 out_put_cgroup:
1026 cgroup_put(cgrp);
1027 return err;
1028 }
1029
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)1030 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1031 union bpf_attr __user *uattr)
1032 {
1033 struct cgroup *cgrp;
1034 int ret;
1035
1036 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1037 if (IS_ERR(cgrp))
1038 return PTR_ERR(cgrp);
1039
1040 ret = cgroup_bpf_query(cgrp, attr, uattr);
1041
1042 cgroup_put(cgrp);
1043 return ret;
1044 }
1045
1046 /**
1047 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1048 * @sk: The socket sending or receiving traffic
1049 * @skb: The skb that is being sent or received
1050 * @type: The type of program to be exectuted
1051 *
1052 * If no socket is passed, or the socket is not of type INET or INET6,
1053 * this function does nothing and returns 0.
1054 *
1055 * The program type passed in via @type must be suitable for network
1056 * filtering. No further check is performed to assert that.
1057 *
1058 * For egress packets, this function can return:
1059 * NET_XMIT_SUCCESS (0) - continue with packet output
1060 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
1061 * NET_XMIT_CN (2) - continue with packet output and notify TCP
1062 * to call cwr
1063 * -EPERM - drop packet
1064 *
1065 * For ingress packets, this function will return -EPERM if any
1066 * attached program was found and if it returned != 1 during execution.
1067 * Otherwise 0 is returned.
1068 */
__cgroup_bpf_run_filter_skb(struct sock * sk,struct sk_buff * skb,enum cgroup_bpf_attach_type atype)1069 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1070 struct sk_buff *skb,
1071 enum cgroup_bpf_attach_type atype)
1072 {
1073 unsigned int offset = skb->data - skb_network_header(skb);
1074 struct sock *save_sk;
1075 void *saved_data_end;
1076 struct cgroup *cgrp;
1077 int ret;
1078
1079 if (!sk || !sk_fullsock(sk))
1080 return 0;
1081
1082 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1083 return 0;
1084
1085 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1086 save_sk = skb->sk;
1087 skb->sk = sk;
1088 __skb_push(skb, offset);
1089
1090 /* compute pointers for the bpf prog */
1091 bpf_compute_and_save_data_end(skb, &saved_data_end);
1092
1093 if (atype == CGROUP_INET_EGRESS) {
1094 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1095 cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
1096 } else {
1097 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
1098 __bpf_prog_run_save_cb);
1099 ret = (ret == 1 ? 0 : -EPERM);
1100 }
1101 bpf_restore_data_end(skb, saved_data_end);
1102 __skb_pull(skb, offset);
1103 skb->sk = save_sk;
1104
1105 return ret;
1106 }
1107 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1108
1109 /**
1110 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1111 * @sk: sock structure to manipulate
1112 * @type: The type of program to be exectuted
1113 *
1114 * socket is passed is expected to be of type INET or INET6.
1115 *
1116 * The program type passed in via @type must be suitable for sock
1117 * filtering. No further check is performed to assert that.
1118 *
1119 * This function will return %-EPERM if any if an attached program was found
1120 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1121 */
__cgroup_bpf_run_filter_sk(struct sock * sk,enum cgroup_bpf_attach_type atype)1122 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1123 enum cgroup_bpf_attach_type atype)
1124 {
1125 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1126 int ret;
1127
1128 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run);
1129 return ret == 1 ? 0 : -EPERM;
1130 }
1131 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1132
1133 /**
1134 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1135 * provided by user sockaddr
1136 * @sk: sock struct that will use sockaddr
1137 * @uaddr: sockaddr struct provided by user
1138 * @type: The type of program to be exectuted
1139 * @t_ctx: Pointer to attach type specific context
1140 * @flags: Pointer to u32 which contains higher bits of BPF program
1141 * return value (OR'ed together).
1142 *
1143 * socket is expected to be of type INET or INET6.
1144 *
1145 * This function will return %-EPERM if an attached program is found and
1146 * returned value != 1 during execution. In all other cases, 0 is returned.
1147 */
__cgroup_bpf_run_filter_sock_addr(struct sock * sk,struct sockaddr * uaddr,enum cgroup_bpf_attach_type atype,void * t_ctx,u32 * flags)1148 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1149 struct sockaddr *uaddr,
1150 enum cgroup_bpf_attach_type atype,
1151 void *t_ctx,
1152 u32 *flags)
1153 {
1154 struct bpf_sock_addr_kern ctx = {
1155 .sk = sk,
1156 .uaddr = uaddr,
1157 .t_ctx = t_ctx,
1158 };
1159 struct sockaddr_storage unspec;
1160 struct cgroup *cgrp;
1161 int ret;
1162
1163 /* Check socket family since not all sockets represent network
1164 * endpoint (e.g. AF_UNIX).
1165 */
1166 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1167 return 0;
1168
1169 if (!ctx.uaddr) {
1170 memset(&unspec, 0, sizeof(unspec));
1171 ctx.uaddr = (struct sockaddr *)&unspec;
1172 }
1173
1174 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1175 ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
1176 bpf_prog_run, flags);
1177
1178 return ret == 1 ? 0 : -EPERM;
1179 }
1180 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1181
1182 /**
1183 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1184 * @sk: socket to get cgroup from
1185 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1186 * sk with connection information (IP addresses, etc.) May not contain
1187 * cgroup info if it is a req sock.
1188 * @type: The type of program to be exectuted
1189 *
1190 * socket passed is expected to be of type INET or INET6.
1191 *
1192 * The program type passed in via @type must be suitable for sock_ops
1193 * filtering. No further check is performed to assert that.
1194 *
1195 * This function will return %-EPERM if any if an attached program was found
1196 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1197 */
__cgroup_bpf_run_filter_sock_ops(struct sock * sk,struct bpf_sock_ops_kern * sock_ops,enum cgroup_bpf_attach_type atype)1198 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1199 struct bpf_sock_ops_kern *sock_ops,
1200 enum cgroup_bpf_attach_type atype)
1201 {
1202 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1203 int ret;
1204
1205 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
1206 bpf_prog_run);
1207 return ret == 1 ? 0 : -EPERM;
1208 }
1209 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1210
__cgroup_bpf_check_dev_permission(short dev_type,u32 major,u32 minor,short access,enum cgroup_bpf_attach_type atype)1211 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1212 short access, enum cgroup_bpf_attach_type atype)
1213 {
1214 struct cgroup *cgrp;
1215 struct bpf_cgroup_dev_ctx ctx = {
1216 .access_type = (access << 16) | dev_type,
1217 .major = major,
1218 .minor = minor,
1219 };
1220 int allow;
1221
1222 rcu_read_lock();
1223 cgrp = task_dfl_cgroup(current);
1224 allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
1225 bpf_prog_run);
1226 rcu_read_unlock();
1227
1228 return !allow;
1229 }
1230
1231 static const struct bpf_func_proto *
cgroup_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1232 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1233 {
1234 switch (func_id) {
1235 case BPF_FUNC_get_current_uid_gid:
1236 return &bpf_get_current_uid_gid_proto;
1237 case BPF_FUNC_get_local_storage:
1238 return &bpf_get_local_storage_proto;
1239 case BPF_FUNC_get_current_cgroup_id:
1240 return &bpf_get_current_cgroup_id_proto;
1241 case BPF_FUNC_perf_event_output:
1242 return &bpf_event_output_data_proto;
1243 default:
1244 return bpf_base_func_proto(func_id);
1245 }
1246 }
1247
1248 static const struct bpf_func_proto *
cgroup_dev_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1249 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1250 {
1251 return cgroup_base_func_proto(func_id, prog);
1252 }
1253
cgroup_dev_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1254 static bool cgroup_dev_is_valid_access(int off, int size,
1255 enum bpf_access_type type,
1256 const struct bpf_prog *prog,
1257 struct bpf_insn_access_aux *info)
1258 {
1259 const int size_default = sizeof(__u32);
1260
1261 if (type == BPF_WRITE)
1262 return false;
1263
1264 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1265 return false;
1266 /* The verifier guarantees that size > 0. */
1267 if (off % size != 0)
1268 return false;
1269
1270 switch (off) {
1271 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1272 bpf_ctx_record_field_size(info, size_default);
1273 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1274 return false;
1275 break;
1276 default:
1277 if (size != size_default)
1278 return false;
1279 }
1280
1281 return true;
1282 }
1283
1284 const struct bpf_prog_ops cg_dev_prog_ops = {
1285 };
1286
1287 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1288 .get_func_proto = cgroup_dev_func_proto,
1289 .is_valid_access = cgroup_dev_is_valid_access,
1290 };
1291
1292 /**
1293 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1294 *
1295 * @head: sysctl table header
1296 * @table: sysctl table
1297 * @write: sysctl is being read (= 0) or written (= 1)
1298 * @buf: pointer to buffer (in and out)
1299 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1300 * result is size of @new_buf if program set new value, initial value
1301 * otherwise
1302 * @ppos: value-result argument: value is position at which read from or write
1303 * to sysctl is happening, result is new position if program overrode it,
1304 * initial value otherwise
1305 * @type: type of program to be executed
1306 *
1307 * Program is run when sysctl is being accessed, either read or written, and
1308 * can allow or deny such access.
1309 *
1310 * This function will return %-EPERM if an attached program is found and
1311 * returned value != 1 during execution. In all other cases 0 is returned.
1312 */
__cgroup_bpf_run_filter_sysctl(struct ctl_table_header * head,struct ctl_table * table,int write,char ** buf,size_t * pcount,loff_t * ppos,enum cgroup_bpf_attach_type atype)1313 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1314 struct ctl_table *table, int write,
1315 char **buf, size_t *pcount, loff_t *ppos,
1316 enum cgroup_bpf_attach_type atype)
1317 {
1318 struct bpf_sysctl_kern ctx = {
1319 .head = head,
1320 .table = table,
1321 .write = write,
1322 .ppos = ppos,
1323 .cur_val = NULL,
1324 .cur_len = PAGE_SIZE,
1325 .new_val = NULL,
1326 .new_len = 0,
1327 .new_updated = 0,
1328 };
1329 struct cgroup *cgrp;
1330 loff_t pos = 0;
1331 int ret;
1332
1333 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1334 if (!ctx.cur_val ||
1335 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1336 /* Let BPF program decide how to proceed. */
1337 ctx.cur_len = 0;
1338 }
1339
1340 if (write && *buf && *pcount) {
1341 /* BPF program should be able to override new value with a
1342 * buffer bigger than provided by user.
1343 */
1344 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1345 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1346 if (ctx.new_val) {
1347 memcpy(ctx.new_val, *buf, ctx.new_len);
1348 } else {
1349 /* Let BPF program decide how to proceed. */
1350 ctx.new_len = 0;
1351 }
1352 }
1353
1354 rcu_read_lock();
1355 cgrp = task_dfl_cgroup(current);
1356 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run);
1357 rcu_read_unlock();
1358
1359 kfree(ctx.cur_val);
1360
1361 if (ret == 1 && ctx.new_updated) {
1362 kfree(*buf);
1363 *buf = ctx.new_val;
1364 *pcount = ctx.new_len;
1365 } else {
1366 kfree(ctx.new_val);
1367 }
1368
1369 return ret == 1 ? 0 : -EPERM;
1370 }
1371
1372 #ifdef CONFIG_NET
__cgroup_bpf_prog_array_is_empty(struct cgroup * cgrp,enum cgroup_bpf_attach_type attach_type)1373 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1374 enum cgroup_bpf_attach_type attach_type)
1375 {
1376 struct bpf_prog_array *prog_array;
1377 bool empty;
1378
1379 rcu_read_lock();
1380 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1381 empty = bpf_prog_array_is_empty(prog_array);
1382 rcu_read_unlock();
1383
1384 return empty;
1385 }
1386
sockopt_alloc_buf(struct bpf_sockopt_kern * ctx,int max_optlen,struct bpf_sockopt_buf * buf)1387 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1388 struct bpf_sockopt_buf *buf)
1389 {
1390 if (unlikely(max_optlen < 0))
1391 return -EINVAL;
1392
1393 if (unlikely(max_optlen > PAGE_SIZE)) {
1394 /* We don't expose optvals that are greater than PAGE_SIZE
1395 * to the BPF program.
1396 */
1397 max_optlen = PAGE_SIZE;
1398 }
1399
1400 if (max_optlen <= sizeof(buf->data)) {
1401 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1402 * bytes avoid the cost of kzalloc.
1403 */
1404 ctx->optval = buf->data;
1405 ctx->optval_end = ctx->optval + max_optlen;
1406 return max_optlen;
1407 }
1408
1409 ctx->optval = kzalloc(max_optlen, GFP_USER);
1410 if (!ctx->optval)
1411 return -ENOMEM;
1412
1413 ctx->optval_end = ctx->optval + max_optlen;
1414
1415 return max_optlen;
1416 }
1417
sockopt_free_buf(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1418 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1419 struct bpf_sockopt_buf *buf)
1420 {
1421 if (ctx->optval == buf->data)
1422 return;
1423 kfree(ctx->optval);
1424 }
1425
sockopt_buf_allocated(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1426 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1427 struct bpf_sockopt_buf *buf)
1428 {
1429 return ctx->optval != buf->data;
1430 }
1431
__cgroup_bpf_run_filter_setsockopt(struct sock * sk,int * level,int * optname,char __user * optval,int * optlen,char ** kernel_optval)1432 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1433 int *optname, char __user *optval,
1434 int *optlen, char **kernel_optval)
1435 {
1436 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1437 struct bpf_sockopt_buf buf = {};
1438 struct bpf_sockopt_kern ctx = {
1439 .sk = sk,
1440 .level = *level,
1441 .optname = *optname,
1442 };
1443 int ret, max_optlen;
1444
1445 /* Opportunistic check to see whether we have any BPF program
1446 * attached to the hook so we don't waste time allocating
1447 * memory and locking the socket.
1448 */
1449 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT))
1450 return 0;
1451
1452 /* Allocate a bit more than the initial user buffer for
1453 * BPF program. The canonical use case is overriding
1454 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1455 */
1456 max_optlen = max_t(int, 16, *optlen);
1457
1458 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1459 if (max_optlen < 0)
1460 return max_optlen;
1461
1462 ctx.optlen = *optlen;
1463
1464 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1465 ret = -EFAULT;
1466 goto out;
1467 }
1468
1469 lock_sock(sk);
1470 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
1471 &ctx, bpf_prog_run);
1472 release_sock(sk);
1473
1474 if (!ret) {
1475 ret = -EPERM;
1476 goto out;
1477 }
1478
1479 if (ctx.optlen == -1) {
1480 /* optlen set to -1, bypass kernel */
1481 ret = 1;
1482 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1483 /* optlen is out of bounds */
1484 if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
1485 pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
1486 ctx.optlen, max_optlen);
1487 ret = 0;
1488 goto out;
1489 }
1490 ret = -EFAULT;
1491 } else {
1492 /* optlen within bounds, run kernel handler */
1493 ret = 0;
1494
1495 /* export any potential modifications */
1496 *level = ctx.level;
1497 *optname = ctx.optname;
1498
1499 /* optlen == 0 from BPF indicates that we should
1500 * use original userspace data.
1501 */
1502 if (ctx.optlen != 0) {
1503 *optlen = ctx.optlen;
1504 /* We've used bpf_sockopt_kern->buf as an intermediary
1505 * storage, but the BPF program indicates that we need
1506 * to pass this data to the kernel setsockopt handler.
1507 * No way to export on-stack buf, have to allocate a
1508 * new buffer.
1509 */
1510 if (!sockopt_buf_allocated(&ctx, &buf)) {
1511 void *p = kmalloc(ctx.optlen, GFP_USER);
1512
1513 if (!p) {
1514 ret = -ENOMEM;
1515 goto out;
1516 }
1517 memcpy(p, ctx.optval, ctx.optlen);
1518 *kernel_optval = p;
1519 } else {
1520 *kernel_optval = ctx.optval;
1521 }
1522 /* export and don't free sockopt buf */
1523 return 0;
1524 }
1525 }
1526
1527 out:
1528 sockopt_free_buf(&ctx, &buf);
1529 return ret;
1530 }
1531
__cgroup_bpf_run_filter_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen,int max_optlen,int retval)1532 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1533 int optname, char __user *optval,
1534 int __user *optlen, int max_optlen,
1535 int retval)
1536 {
1537 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1538 struct bpf_sockopt_buf buf = {};
1539 struct bpf_sockopt_kern ctx = {
1540 .sk = sk,
1541 .level = level,
1542 .optname = optname,
1543 .retval = retval,
1544 };
1545 int orig_optlen;
1546 int ret;
1547
1548 /* Opportunistic check to see whether we have any BPF program
1549 * attached to the hook so we don't waste time allocating
1550 * memory and locking the socket.
1551 */
1552 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT))
1553 return retval;
1554
1555 orig_optlen = max_optlen;
1556 ctx.optlen = max_optlen;
1557
1558 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1559 if (max_optlen < 0)
1560 return max_optlen;
1561
1562 if (!retval) {
1563 /* If kernel getsockopt finished successfully,
1564 * copy whatever was returned to the user back
1565 * into our temporary buffer. Set optlen to the
1566 * one that kernel returned as well to let
1567 * BPF programs inspect the value.
1568 */
1569
1570 if (get_user(ctx.optlen, optlen)) {
1571 ret = -EFAULT;
1572 goto out;
1573 }
1574
1575 if (ctx.optlen < 0) {
1576 ret = -EFAULT;
1577 goto out;
1578 }
1579 orig_optlen = ctx.optlen;
1580
1581 if (copy_from_user(ctx.optval, optval,
1582 min(ctx.optlen, max_optlen)) != 0) {
1583 ret = -EFAULT;
1584 goto out;
1585 }
1586 }
1587
1588 lock_sock(sk);
1589 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1590 &ctx, bpf_prog_run);
1591 release_sock(sk);
1592
1593 if (!ret) {
1594 ret = -EPERM;
1595 goto out;
1596 }
1597
1598 if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
1599 if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
1600 pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
1601 ctx.optlen, max_optlen);
1602 ret = retval;
1603 goto out;
1604 }
1605 ret = -EFAULT;
1606 goto out;
1607 }
1608
1609 /* BPF programs only allowed to set retval to 0, not some
1610 * arbitrary value.
1611 */
1612 if (ctx.retval != 0 && ctx.retval != retval) {
1613 ret = -EFAULT;
1614 goto out;
1615 }
1616
1617 if (ctx.optlen != 0) {
1618 if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
1619 ret = -EFAULT;
1620 goto out;
1621 }
1622 if (put_user(ctx.optlen, optlen)) {
1623 ret = -EFAULT;
1624 goto out;
1625 }
1626 }
1627
1628 ret = ctx.retval;
1629
1630 out:
1631 sockopt_free_buf(&ctx, &buf);
1632 return ret;
1633 }
1634
__cgroup_bpf_run_filter_getsockopt_kern(struct sock * sk,int level,int optname,void * optval,int * optlen,int retval)1635 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1636 int optname, void *optval,
1637 int *optlen, int retval)
1638 {
1639 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1640 struct bpf_sockopt_kern ctx = {
1641 .sk = sk,
1642 .level = level,
1643 .optname = optname,
1644 .retval = retval,
1645 .optlen = *optlen,
1646 .optval = optval,
1647 .optval_end = optval + *optlen,
1648 };
1649 int ret;
1650
1651 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1652 * user data back into BPF buffer when reval != 0. This is
1653 * done as an optimization to avoid extra copy, assuming
1654 * kernel won't populate the data in case of an error.
1655 * Here we always pass the data and memset() should
1656 * be called if that data shouldn't be "exported".
1657 */
1658
1659 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1660 &ctx, bpf_prog_run);
1661 if (!ret)
1662 return -EPERM;
1663
1664 if (ctx.optlen > *optlen)
1665 return -EFAULT;
1666
1667 /* BPF programs only allowed to set retval to 0, not some
1668 * arbitrary value.
1669 */
1670 if (ctx.retval != 0 && ctx.retval != retval)
1671 return -EFAULT;
1672
1673 /* BPF programs can shrink the buffer, export the modifications.
1674 */
1675 if (ctx.optlen != 0)
1676 *optlen = ctx.optlen;
1677
1678 return ctx.retval;
1679 }
1680 #endif
1681
sysctl_cpy_dir(const struct ctl_dir * dir,char ** bufp,size_t * lenp)1682 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1683 size_t *lenp)
1684 {
1685 ssize_t tmp_ret = 0, ret;
1686
1687 if (dir->header.parent) {
1688 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1689 if (tmp_ret < 0)
1690 return tmp_ret;
1691 }
1692
1693 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1694 if (ret < 0)
1695 return ret;
1696 *bufp += ret;
1697 *lenp -= ret;
1698 ret += tmp_ret;
1699
1700 /* Avoid leading slash. */
1701 if (!ret)
1702 return ret;
1703
1704 tmp_ret = strscpy(*bufp, "/", *lenp);
1705 if (tmp_ret < 0)
1706 return tmp_ret;
1707 *bufp += tmp_ret;
1708 *lenp -= tmp_ret;
1709
1710 return ret + tmp_ret;
1711 }
1712
BPF_CALL_4(bpf_sysctl_get_name,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len,u64,flags)1713 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1714 size_t, buf_len, u64, flags)
1715 {
1716 ssize_t tmp_ret = 0, ret;
1717
1718 if (!buf)
1719 return -EINVAL;
1720
1721 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1722 if (!ctx->head)
1723 return -EINVAL;
1724 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1725 if (tmp_ret < 0)
1726 return tmp_ret;
1727 }
1728
1729 ret = strscpy(buf, ctx->table->procname, buf_len);
1730
1731 return ret < 0 ? ret : tmp_ret + ret;
1732 }
1733
1734 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1735 .func = bpf_sysctl_get_name,
1736 .gpl_only = false,
1737 .ret_type = RET_INTEGER,
1738 .arg1_type = ARG_PTR_TO_CTX,
1739 .arg2_type = ARG_PTR_TO_MEM,
1740 .arg3_type = ARG_CONST_SIZE,
1741 .arg4_type = ARG_ANYTHING,
1742 };
1743
copy_sysctl_value(char * dst,size_t dst_len,char * src,size_t src_len)1744 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1745 size_t src_len)
1746 {
1747 if (!dst)
1748 return -EINVAL;
1749
1750 if (!dst_len)
1751 return -E2BIG;
1752
1753 if (!src || !src_len) {
1754 memset(dst, 0, dst_len);
1755 return -EINVAL;
1756 }
1757
1758 memcpy(dst, src, min(dst_len, src_len));
1759
1760 if (dst_len > src_len) {
1761 memset(dst + src_len, '\0', dst_len - src_len);
1762 return src_len;
1763 }
1764
1765 dst[dst_len - 1] = '\0';
1766
1767 return -E2BIG;
1768 }
1769
BPF_CALL_3(bpf_sysctl_get_current_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)1770 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1771 char *, buf, size_t, buf_len)
1772 {
1773 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1774 }
1775
1776 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1777 .func = bpf_sysctl_get_current_value,
1778 .gpl_only = false,
1779 .ret_type = RET_INTEGER,
1780 .arg1_type = ARG_PTR_TO_CTX,
1781 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1782 .arg3_type = ARG_CONST_SIZE,
1783 };
1784
BPF_CALL_3(bpf_sysctl_get_new_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)1785 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1786 size_t, buf_len)
1787 {
1788 if (!ctx->write) {
1789 if (buf && buf_len)
1790 memset(buf, '\0', buf_len);
1791 return -EINVAL;
1792 }
1793 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1794 }
1795
1796 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1797 .func = bpf_sysctl_get_new_value,
1798 .gpl_only = false,
1799 .ret_type = RET_INTEGER,
1800 .arg1_type = ARG_PTR_TO_CTX,
1801 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1802 .arg3_type = ARG_CONST_SIZE,
1803 };
1804
BPF_CALL_3(bpf_sysctl_set_new_value,struct bpf_sysctl_kern *,ctx,const char *,buf,size_t,buf_len)1805 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1806 const char *, buf, size_t, buf_len)
1807 {
1808 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1809 return -EINVAL;
1810
1811 if (buf_len > PAGE_SIZE - 1)
1812 return -E2BIG;
1813
1814 memcpy(ctx->new_val, buf, buf_len);
1815 ctx->new_len = buf_len;
1816 ctx->new_updated = 1;
1817
1818 return 0;
1819 }
1820
1821 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1822 .func = bpf_sysctl_set_new_value,
1823 .gpl_only = false,
1824 .ret_type = RET_INTEGER,
1825 .arg1_type = ARG_PTR_TO_CTX,
1826 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1827 .arg3_type = ARG_CONST_SIZE,
1828 };
1829
1830 static const struct bpf_func_proto *
sysctl_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1831 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1832 {
1833 switch (func_id) {
1834 case BPF_FUNC_strtol:
1835 return &bpf_strtol_proto;
1836 case BPF_FUNC_strtoul:
1837 return &bpf_strtoul_proto;
1838 case BPF_FUNC_sysctl_get_name:
1839 return &bpf_sysctl_get_name_proto;
1840 case BPF_FUNC_sysctl_get_current_value:
1841 return &bpf_sysctl_get_current_value_proto;
1842 case BPF_FUNC_sysctl_get_new_value:
1843 return &bpf_sysctl_get_new_value_proto;
1844 case BPF_FUNC_sysctl_set_new_value:
1845 return &bpf_sysctl_set_new_value_proto;
1846 case BPF_FUNC_ktime_get_coarse_ns:
1847 return &bpf_ktime_get_coarse_ns_proto;
1848 default:
1849 return cgroup_base_func_proto(func_id, prog);
1850 }
1851 }
1852
sysctl_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1853 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1854 const struct bpf_prog *prog,
1855 struct bpf_insn_access_aux *info)
1856 {
1857 const int size_default = sizeof(__u32);
1858
1859 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1860 return false;
1861
1862 switch (off) {
1863 case bpf_ctx_range(struct bpf_sysctl, write):
1864 if (type != BPF_READ)
1865 return false;
1866 bpf_ctx_record_field_size(info, size_default);
1867 return bpf_ctx_narrow_access_ok(off, size, size_default);
1868 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1869 if (type == BPF_READ) {
1870 bpf_ctx_record_field_size(info, size_default);
1871 return bpf_ctx_narrow_access_ok(off, size, size_default);
1872 } else {
1873 return size == size_default;
1874 }
1875 default:
1876 return false;
1877 }
1878 }
1879
sysctl_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)1880 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1881 const struct bpf_insn *si,
1882 struct bpf_insn *insn_buf,
1883 struct bpf_prog *prog, u32 *target_size)
1884 {
1885 struct bpf_insn *insn = insn_buf;
1886 u32 read_size;
1887
1888 switch (si->off) {
1889 case offsetof(struct bpf_sysctl, write):
1890 *insn++ = BPF_LDX_MEM(
1891 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1892 bpf_target_off(struct bpf_sysctl_kern, write,
1893 sizeof_field(struct bpf_sysctl_kern,
1894 write),
1895 target_size));
1896 break;
1897 case offsetof(struct bpf_sysctl, file_pos):
1898 /* ppos is a pointer so it should be accessed via indirect
1899 * loads and stores. Also for stores additional temporary
1900 * register is used since neither src_reg nor dst_reg can be
1901 * overridden.
1902 */
1903 if (type == BPF_WRITE) {
1904 int treg = BPF_REG_9;
1905
1906 if (si->src_reg == treg || si->dst_reg == treg)
1907 --treg;
1908 if (si->src_reg == treg || si->dst_reg == treg)
1909 --treg;
1910 *insn++ = BPF_STX_MEM(
1911 BPF_DW, si->dst_reg, treg,
1912 offsetof(struct bpf_sysctl_kern, tmp_reg));
1913 *insn++ = BPF_LDX_MEM(
1914 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1915 treg, si->dst_reg,
1916 offsetof(struct bpf_sysctl_kern, ppos));
1917 *insn++ = BPF_STX_MEM(
1918 BPF_SIZEOF(u32), treg, si->src_reg,
1919 bpf_ctx_narrow_access_offset(
1920 0, sizeof(u32), sizeof(loff_t)));
1921 *insn++ = BPF_LDX_MEM(
1922 BPF_DW, treg, si->dst_reg,
1923 offsetof(struct bpf_sysctl_kern, tmp_reg));
1924 } else {
1925 *insn++ = BPF_LDX_MEM(
1926 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1927 si->dst_reg, si->src_reg,
1928 offsetof(struct bpf_sysctl_kern, ppos));
1929 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1930 *insn++ = BPF_LDX_MEM(
1931 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1932 bpf_ctx_narrow_access_offset(
1933 0, read_size, sizeof(loff_t)));
1934 }
1935 *target_size = sizeof(u32);
1936 break;
1937 }
1938
1939 return insn - insn_buf;
1940 }
1941
1942 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1943 .get_func_proto = sysctl_func_proto,
1944 .is_valid_access = sysctl_is_valid_access,
1945 .convert_ctx_access = sysctl_convert_ctx_access,
1946 };
1947
1948 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1949 };
1950
1951 #ifdef CONFIG_NET
BPF_CALL_1(bpf_get_netns_cookie_sockopt,struct bpf_sockopt_kern *,ctx)1952 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
1953 {
1954 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
1955
1956 return net->net_cookie;
1957 }
1958
1959 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
1960 .func = bpf_get_netns_cookie_sockopt,
1961 .gpl_only = false,
1962 .ret_type = RET_INTEGER,
1963 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
1964 };
1965 #endif
1966
1967 static const struct bpf_func_proto *
cg_sockopt_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1968 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1969 {
1970 switch (func_id) {
1971 #ifdef CONFIG_NET
1972 case BPF_FUNC_get_netns_cookie:
1973 return &bpf_get_netns_cookie_sockopt_proto;
1974 case BPF_FUNC_sk_storage_get:
1975 return &bpf_sk_storage_get_proto;
1976 case BPF_FUNC_sk_storage_delete:
1977 return &bpf_sk_storage_delete_proto;
1978 case BPF_FUNC_setsockopt:
1979 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1980 return &bpf_sk_setsockopt_proto;
1981 return NULL;
1982 case BPF_FUNC_getsockopt:
1983 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1984 return &bpf_sk_getsockopt_proto;
1985 return NULL;
1986 #endif
1987 #ifdef CONFIG_INET
1988 case BPF_FUNC_tcp_sock:
1989 return &bpf_tcp_sock_proto;
1990 #endif
1991 default:
1992 return cgroup_base_func_proto(func_id, prog);
1993 }
1994 }
1995
cg_sockopt_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1996 static bool cg_sockopt_is_valid_access(int off, int size,
1997 enum bpf_access_type type,
1998 const struct bpf_prog *prog,
1999 struct bpf_insn_access_aux *info)
2000 {
2001 const int size_default = sizeof(__u32);
2002
2003 if (off < 0 || off >= sizeof(struct bpf_sockopt))
2004 return false;
2005
2006 if (off % size != 0)
2007 return false;
2008
2009 if (type == BPF_WRITE) {
2010 switch (off) {
2011 case offsetof(struct bpf_sockopt, retval):
2012 if (size != size_default)
2013 return false;
2014 return prog->expected_attach_type ==
2015 BPF_CGROUP_GETSOCKOPT;
2016 case offsetof(struct bpf_sockopt, optname):
2017 fallthrough;
2018 case offsetof(struct bpf_sockopt, level):
2019 if (size != size_default)
2020 return false;
2021 return prog->expected_attach_type ==
2022 BPF_CGROUP_SETSOCKOPT;
2023 case offsetof(struct bpf_sockopt, optlen):
2024 return size == size_default;
2025 default:
2026 return false;
2027 }
2028 }
2029
2030 switch (off) {
2031 case offsetof(struct bpf_sockopt, sk):
2032 if (size != sizeof(__u64))
2033 return false;
2034 info->reg_type = PTR_TO_SOCKET;
2035 break;
2036 case offsetof(struct bpf_sockopt, optval):
2037 if (size != sizeof(__u64))
2038 return false;
2039 info->reg_type = PTR_TO_PACKET;
2040 break;
2041 case offsetof(struct bpf_sockopt, optval_end):
2042 if (size != sizeof(__u64))
2043 return false;
2044 info->reg_type = PTR_TO_PACKET_END;
2045 break;
2046 case offsetof(struct bpf_sockopt, retval):
2047 if (size != size_default)
2048 return false;
2049 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2050 default:
2051 if (size != size_default)
2052 return false;
2053 break;
2054 }
2055 return true;
2056 }
2057
2058 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
2059 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
2060 si->dst_reg, si->src_reg, \
2061 offsetof(struct bpf_sockopt_kern, F))
2062
cg_sockopt_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2063 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2064 const struct bpf_insn *si,
2065 struct bpf_insn *insn_buf,
2066 struct bpf_prog *prog,
2067 u32 *target_size)
2068 {
2069 struct bpf_insn *insn = insn_buf;
2070
2071 switch (si->off) {
2072 case offsetof(struct bpf_sockopt, sk):
2073 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2074 break;
2075 case offsetof(struct bpf_sockopt, level):
2076 if (type == BPF_WRITE)
2077 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2078 else
2079 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2080 break;
2081 case offsetof(struct bpf_sockopt, optname):
2082 if (type == BPF_WRITE)
2083 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2084 else
2085 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2086 break;
2087 case offsetof(struct bpf_sockopt, optlen):
2088 if (type == BPF_WRITE)
2089 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2090 else
2091 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2092 break;
2093 case offsetof(struct bpf_sockopt, retval):
2094 if (type == BPF_WRITE)
2095 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
2096 else
2097 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
2098 break;
2099 case offsetof(struct bpf_sockopt, optval):
2100 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2101 break;
2102 case offsetof(struct bpf_sockopt, optval_end):
2103 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2104 break;
2105 }
2106
2107 return insn - insn_buf;
2108 }
2109
cg_sockopt_get_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)2110 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2111 bool direct_write,
2112 const struct bpf_prog *prog)
2113 {
2114 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2115 */
2116 return 0;
2117 }
2118
2119 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2120 .get_func_proto = cg_sockopt_func_proto,
2121 .is_valid_access = cg_sockopt_is_valid_access,
2122 .convert_ctx_access = cg_sockopt_convert_ctx_access,
2123 .gen_prologue = cg_sockopt_get_prologue,
2124 };
2125
2126 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2127 };
2128