• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
12 
13 struct sock;
14 struct sockaddr;
15 struct cgroup;
16 struct sk_buff;
17 struct bpf_map;
18 struct bpf_prog;
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
21 struct ctl_table;
22 struct ctl_table_header;
23 struct task_struct;
24 
25 #ifdef CONFIG_CGROUP_BPF
26 
27 extern struct static_key_false cgroup_bpf_enabled_key;
28 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29 
30 #define BPF_CGROUP_STORAGE_NEST_MAX	8
31 
32 struct bpf_cgroup_storage_info {
33 	struct task_struct *task;
34 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
35 };
36 
37 /* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
38  * to use bpf cgroup storage simultaneously.
39  */
40 DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
41 		bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
42 
43 #define for_each_cgroup_storage_type(stype) \
44 	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
45 
46 struct bpf_cgroup_storage_map;
47 
48 struct bpf_storage_buffer {
49 	struct rcu_head rcu;
50 	char data[];
51 };
52 
53 struct bpf_cgroup_storage {
54 	union {
55 		struct bpf_storage_buffer *buf;
56 		void __percpu *percpu_buf;
57 	};
58 	struct bpf_cgroup_storage_map *map;
59 	struct bpf_cgroup_storage_key key;
60 	struct list_head list_map;
61 	struct list_head list_cg;
62 	struct rb_node node;
63 	struct rcu_head rcu;
64 };
65 
66 struct bpf_cgroup_link {
67 	struct bpf_link link;
68 	struct cgroup *cgroup;
69 	enum bpf_attach_type type;
70 };
71 
72 struct bpf_prog_list {
73 	struct list_head node;
74 	struct bpf_prog *prog;
75 	struct bpf_cgroup_link *link;
76 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
77 };
78 
79 struct bpf_prog_array;
80 
81 struct cgroup_bpf {
82 	/* array of effective progs in this cgroup */
83 	struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
84 
85 	/* attached progs to this cgroup and attach flags
86 	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
87 	 * have either zero or one element
88 	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
89 	 */
90 	struct list_head progs[MAX_BPF_ATTACH_TYPE];
91 	u32 flags[MAX_BPF_ATTACH_TYPE];
92 
93 	/* list of cgroup shared storages */
94 	struct list_head storages;
95 
96 	/* temp storage for effective prog array used by prog_attach/detach */
97 	struct bpf_prog_array *inactive;
98 
99 	/* reference counter used to detach bpf programs after cgroup removal */
100 	struct percpu_ref refcnt;
101 
102 	/* cgroup_bpf is released using a work queue */
103 	struct work_struct release_work;
104 };
105 
106 int cgroup_bpf_inherit(struct cgroup *cgrp);
107 void cgroup_bpf_offline(struct cgroup *cgrp);
108 
109 int __cgroup_bpf_attach(struct cgroup *cgrp,
110 			struct bpf_prog *prog, struct bpf_prog *replace_prog,
111 			struct bpf_cgroup_link *link,
112 			enum bpf_attach_type type, u32 flags);
113 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
114 			struct bpf_cgroup_link *link,
115 			enum bpf_attach_type type);
116 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
117 		       union bpf_attr __user *uattr);
118 
119 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
120 int cgroup_bpf_attach(struct cgroup *cgrp,
121 		      struct bpf_prog *prog, struct bpf_prog *replace_prog,
122 		      struct bpf_cgroup_link *link, enum bpf_attach_type type,
123 		      u32 flags);
124 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
125 		      enum bpf_attach_type type);
126 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
127 		     union bpf_attr __user *uattr);
128 
129 int __cgroup_bpf_run_filter_skb(struct sock *sk,
130 				struct sk_buff *skb,
131 				enum bpf_attach_type type);
132 
133 int __cgroup_bpf_run_filter_sk(struct sock *sk,
134 			       enum bpf_attach_type type);
135 
136 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
137 				      struct sockaddr *uaddr,
138 				      enum bpf_attach_type type,
139 				      void *t_ctx);
140 
141 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
142 				     struct bpf_sock_ops_kern *sock_ops,
143 				     enum bpf_attach_type type);
144 
145 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
146 				      short access, enum bpf_attach_type type);
147 
148 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
149 				   struct ctl_table *table, int write,
150 				   char **buf, size_t *pcount, loff_t *ppos,
151 				   enum bpf_attach_type type);
152 
153 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
154 				       int *optname, char __user *optval,
155 				       int *optlen, char **kernel_optval);
156 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
157 				       int optname, char __user *optval,
158 				       int __user *optlen, int max_optlen,
159 				       int retval);
160 
cgroup_storage_type(struct bpf_map * map)161 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
162 	struct bpf_map *map)
163 {
164 	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
165 		return BPF_CGROUP_STORAGE_PERCPU;
166 
167 	return BPF_CGROUP_STORAGE_SHARED;
168 }
169 
bpf_cgroup_storage_set(struct bpf_cgroup_storage * storage[MAX_BPF_CGROUP_STORAGE_TYPE])170 static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
171 					 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
172 {
173 	enum bpf_cgroup_storage_type stype;
174 	int i, err = 0;
175 
176 	preempt_disable();
177 	for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
178 		if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
179 			continue;
180 
181 		this_cpu_write(bpf_cgroup_storage_info[i].task, current);
182 		for_each_cgroup_storage_type(stype)
183 			this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
184 				       storage[stype]);
185 		goto out;
186 	}
187 	err = -EBUSY;
188 	WARN_ON_ONCE(1);
189 
190 out:
191 	preempt_enable();
192 	return err;
193 }
194 
bpf_cgroup_storage_unset(void)195 static inline void bpf_cgroup_storage_unset(void)
196 {
197 	int i;
198 
199 	for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
200 		if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
201 			continue;
202 
203 		this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
204 		return;
205 	}
206 }
207 
208 struct bpf_cgroup_storage *
209 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
210 		      void *key, bool locked);
211 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
212 					enum bpf_cgroup_storage_type stype);
213 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
214 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
215 			     struct cgroup *cgroup,
216 			     enum bpf_attach_type type);
217 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
218 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
219 
220 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
221 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
222 				     void *value, u64 flags);
223 
224 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
225 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
226 ({									      \
227 	int __ret = 0;							      \
228 	if (cgroup_bpf_enabled)						      \
229 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
230 						    BPF_CGROUP_INET_INGRESS); \
231 									      \
232 	__ret;								      \
233 })
234 
235 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
236 ({									       \
237 	int __ret = 0;							       \
238 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
239 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
240 		if (sk_fullsock(__sk))					       \
241 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
242 						      BPF_CGROUP_INET_EGRESS); \
243 	}								       \
244 	__ret;								       \
245 })
246 
247 #define BPF_CGROUP_RUN_SK_PROG(sk, type)				       \
248 ({									       \
249 	int __ret = 0;							       \
250 	if (cgroup_bpf_enabled) {					       \
251 		__ret = __cgroup_bpf_run_filter_sk(sk, type);		       \
252 	}								       \
253 	__ret;								       \
254 })
255 
256 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
257 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
258 
259 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
260 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
261 
262 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
263 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
264 
265 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
266 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
267 
268 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)				       \
269 ({									       \
270 	int __ret = 0;							       \
271 	if (cgroup_bpf_enabled)						       \
272 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
273 							  NULL);	       \
274 	__ret;								       \
275 })
276 
277 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)		       \
278 ({									       \
279 	int __ret = 0;							       \
280 	if (cgroup_bpf_enabled)	{					       \
281 		lock_sock(sk);						       \
282 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
283 							  t_ctx);	       \
284 		release_sock(sk);					       \
285 	}								       \
286 	__ret;								       \
287 })
288 
289 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
290 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
291 
292 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
293 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
294 
295 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
296 					    sk->sk_prot->pre_connect)
297 
298 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
299 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
300 
301 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
302 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
303 
304 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
305 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
306 
307 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
308 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
309 
310 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
311 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
312 
313 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
314 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
315 
316 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
317 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
318 
319 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
320 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
321 
322 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
323  * fullsock and its parent fullsock cannot be traced by
324  * sk_to_full_sk().
325  *
326  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
327  * Its listener-sk is not attached to the rsk_listener.
328  * In this case, the caller holds the listener-sk (unlocked),
329  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
330  * the listener-sk such that the cgroup-bpf-progs of the
331  * listener-sk will be run.
332  *
333  * Regardless of syncookie mode or not,
334  * calling bpf_setsockopt on listener-sk will not make sense anyway,
335  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
336  */
337 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
338 ({									\
339 	int __ret = 0;							\
340 	if (cgroup_bpf_enabled)						\
341 		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
342 							 sock_ops,	\
343 							 BPF_CGROUP_SOCK_OPS); \
344 	__ret;								\
345 })
346 
347 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
348 ({									       \
349 	int __ret = 0;							       \
350 	if (cgroup_bpf_enabled && (sock_ops)->sk) {	       \
351 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
352 		if (__sk && sk_fullsock(__sk))				       \
353 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
354 								 sock_ops,     \
355 							 BPF_CGROUP_SOCK_OPS); \
356 	}								       \
357 	__ret;								       \
358 })
359 
360 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
361 ({									      \
362 	int __ret = 0;							      \
363 	if (cgroup_bpf_enabled)						      \
364 		__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
365 							  access,	      \
366 							  BPF_CGROUP_DEVICE); \
367 									      \
368 	__ret;								      \
369 })
370 
371 
372 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
373 ({									       \
374 	int __ret = 0;							       \
375 	if (cgroup_bpf_enabled)						       \
376 		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
377 						       buf, count, pos,        \
378 						       BPF_CGROUP_SYSCTL);     \
379 	__ret;								       \
380 })
381 
382 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
383 				       kernel_optval)			       \
384 ({									       \
385 	int __ret = 0;							       \
386 	if (cgroup_bpf_enabled)						       \
387 		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
388 							   optname, optval,    \
389 							   optlen,	       \
390 							   kernel_optval);     \
391 	__ret;								       \
392 })
393 
394 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)			       \
395 ({									       \
396 	int __ret = 0;							       \
397 	if (cgroup_bpf_enabled)						       \
398 		get_user(__ret, optlen);				       \
399 	__ret;								       \
400 })
401 
402 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
403 				       max_optlen, retval)		       \
404 ({									       \
405 	int __ret = retval;						       \
406 	if (cgroup_bpf_enabled)						       \
407 		__ret = __cgroup_bpf_run_filter_getsockopt(sock, level,	       \
408 							   optname, optval,    \
409 							   optlen, max_optlen, \
410 							   retval);	       \
411 	__ret;								       \
412 })
413 
414 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
415 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
416 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
417 			   enum bpf_prog_type ptype);
418 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
419 int cgroup_bpf_prog_query(const union bpf_attr *attr,
420 			  union bpf_attr __user *uattr);
421 #else
422 
423 struct bpf_prog;
424 struct cgroup_bpf {};
cgroup_bpf_inherit(struct cgroup * cgrp)425 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)426 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
427 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)428 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
429 					 enum bpf_prog_type ptype,
430 					 struct bpf_prog *prog)
431 {
432 	return -EINVAL;
433 }
434 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)435 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
436 					 enum bpf_prog_type ptype)
437 {
438 	return -EINVAL;
439 }
440 
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)441 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
442 					 struct bpf_prog *prog)
443 {
444 	return -EINVAL;
445 }
446 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)447 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
448 					union bpf_attr __user *uattr)
449 {
450 	return -EINVAL;
451 }
452 
bpf_cgroup_storage_set(struct bpf_cgroup_storage * storage[MAX_BPF_CGROUP_STORAGE_TYPE])453 static inline int bpf_cgroup_storage_set(
454 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
bpf_cgroup_storage_unset(void)455 static inline void bpf_cgroup_storage_unset(void) {}
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)456 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
457 					    struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)458 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
459 	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)460 static inline void bpf_cgroup_storage_free(
461 	struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)462 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
463 						 void *value) {
464 	return 0;
465 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)466 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
467 					void *key, void *value, u64 flags) {
468 	return 0;
469 }
470 
471 #define cgroup_bpf_enabled (0)
472 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
473 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
474 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
475 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
476 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
477 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
478 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
479 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
480 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
481 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
482 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
483 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
484 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
485 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
486 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
487 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
488 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
489 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
490 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
491 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
493 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
495 				       optlen, max_optlen, retval) ({ retval; })
496 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
497 				       kernel_optval) ({ 0; })
498 
499 #define for_each_cgroup_storage_type(stype) for (; false; )
500 
501 #endif /* CONFIG_CGROUP_BPF */
502 
503 #endif /* _BPF_CGROUP_H */
504