• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 R"********(
2 /*
3  * Copyright (c) 2015 PLUMgrid, Inc.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 #ifndef __BPF_HELPERS_H
18 #define __BPF_HELPERS_H
19 
20 /* Before bpf_helpers.h is included, uapi bpf.h has been
21  * included, which references linux/types.h. This will bring
22  * in asm_volatile_goto definition if permitted based on
23  * compiler setup and kernel configs.
24  *
25  * clang does not support "asm volatile goto" yet.
26  * So redefine asm_volatile_goto to some invalid asm code.
27  * If asm_volatile_goto is actually used by the bpf program,
28  * a compilation error will appear.
29  */
30 #ifdef asm_volatile_goto
31 #undef asm_volatile_goto
32 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
33 #endif
34 
35 #include <uapi/linux/bpf.h>
36 #include <uapi/linux/if_packet.h>
37 #include <linux/version.h>
38 #include <linux/log2.h>
39 
40 #ifndef CONFIG_BPF_SYSCALL
41 #error "CONFIG_BPF_SYSCALL is undefined, please check your .config or ask your Linux distro to enable this feature"
42 #endif
43 
44 #ifdef PERF_MAX_STACK_DEPTH
45 #define BPF_MAX_STACK_DEPTH PERF_MAX_STACK_DEPTH
46 #else
47 #define BPF_MAX_STACK_DEPTH 127
48 #endif
49 
50 /* helper macro to place programs, maps, license in
51  * different sections in elf_bpf file. Section names
52  * are interpreted by elf_bpf loader
53  */
54 #define SEC(NAME) __attribute__((section(NAME), used))
55 
56 // Changes to the macro require changes in BFrontendAction classes
57 #define BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, _flags) \
58 struct _name##_table_t { \
59   _key_type key; \
60   _leaf_type leaf; \
61   _leaf_type * (*lookup) (_key_type *); \
62   _leaf_type * (*lookup_or_init) (_key_type *, _leaf_type *); \
63   int (*update) (_key_type *, _leaf_type *); \
64   int (*insert) (_key_type *, _leaf_type *); \
65   int (*delete) (_key_type *); \
66   void (*call) (void *, int index); \
67   void (*increment) (_key_type, ...); \
68   int (*get_stackid) (void *, u64); \
69   u32 max_entries; \
70   int flags; \
71 }; \
72 __attribute__((section("maps/" _table_type))) \
73 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }
74 
75 #define BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries) \
76 BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, 0)
77 
78 // define a table same as above but allow it to be referenced by other modules
79 #define BPF_TABLE_PUBLIC(_table_type, _key_type, _leaf_type, _name, _max_entries) \
80 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
81 __attribute__((section("maps/export"))) \
82 struct _name##_table_t __##_name
83 
84 // define a table that is shared accross the programs in the same namespace
85 #define BPF_TABLE_SHARED(_table_type, _key_type, _leaf_type, _name, _max_entries) \
86 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
87 __attribute__((section("maps/shared"))) \
88 struct _name##_table_t __##_name
89 
90 // Identifier for current CPU used in perf_submit and perf_read
91 // Prefer BPF_F_CURRENT_CPU flag, falls back to call helper for older kernel
92 // Can be overridden from BCC
93 #ifndef CUR_CPU_IDENTIFIER
94 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
95 #define CUR_CPU_IDENTIFIER BPF_F_CURRENT_CPU
96 #else
97 #define CUR_CPU_IDENTIFIER bpf_get_smp_processor_id()
98 #endif
99 #endif
100 
101 // Table for pushing custom events to userspace via ring buffer
102 #define BPF_PERF_OUTPUT(_name) \
103 struct _name##_table_t { \
104   int key; \
105   u32 leaf; \
106   /* map.perf_submit(ctx, data, data_size) */ \
107   int (*perf_submit) (void *, void *, u32); \
108   int (*perf_submit_skb) (void *, u32, void *, u32); \
109   u32 max_entries; \
110 }; \
111 __attribute__((section("maps/perf_output"))) \
112 struct _name##_table_t _name = { .max_entries = 0 }
113 
114 // Table for reading hw perf cpu counters
115 #define BPF_PERF_ARRAY(_name, _max_entries) \
116 struct _name##_table_t { \
117   int key; \
118   u32 leaf; \
119   /* counter = map.perf_read(index) */ \
120   u64 (*perf_read) (int); \
121   int (*perf_counter_value) (int, void *, u32); \
122   u32 max_entries; \
123 }; \
124 __attribute__((section("maps/perf_array"))) \
125 struct _name##_table_t _name = { .max_entries = (_max_entries) }
126 
127 // Table for cgroup file descriptors
128 #define BPF_CGROUP_ARRAY(_name, _max_entries) \
129 struct _name##_table_t { \
130   int key; \
131   u32 leaf; \
132   int (*check_current_task) (int); \
133   u32 max_entries; \
134 }; \
135 __attribute__((section("maps/cgroup_array"))) \
136 struct _name##_table_t _name = { .max_entries = (_max_entries) }
137 
138 #define BPF_HASH1(_name) \
139   BPF_TABLE("hash", u64, u64, _name, 10240)
140 #define BPF_HASH2(_name, _key_type) \
141   BPF_TABLE("hash", _key_type, u64, _name, 10240)
142 #define BPF_HASH3(_name, _key_type, _leaf_type) \
143   BPF_TABLE("hash", _key_type, _leaf_type, _name, 10240)
144 #define BPF_HASH4(_name, _key_type, _leaf_type, _size) \
145   BPF_TABLE("hash", _key_type, _leaf_type, _name, _size)
146 
147 // helper for default-variable macro function
148 #define BPF_HASHX(_1, _2, _3, _4, NAME, ...) NAME
149 
150 // Define a hash function, some arguments optional
151 // BPF_HASH(name, key_type=u64, leaf_type=u64, size=10240)
152 #define BPF_HASH(...) \
153   BPF_HASHX(__VA_ARGS__, BPF_HASH4, BPF_HASH3, BPF_HASH2, BPF_HASH1)(__VA_ARGS__)
154 
155 #define BPF_ARRAY1(_name) \
156   BPF_TABLE("array", int, u64, _name, 10240)
157 #define BPF_ARRAY2(_name, _leaf_type) \
158   BPF_TABLE("array", int, _leaf_type, _name, 10240)
159 #define BPF_ARRAY3(_name, _leaf_type, _size) \
160   BPF_TABLE("array", int, _leaf_type, _name, _size)
161 
162 // helper for default-variable macro function
163 #define BPF_ARRAYX(_1, _2, _3, NAME, ...) NAME
164 
165 // Define an array function, some arguments optional
166 // BPF_ARRAY(name, leaf_type=u64, size=10240)
167 #define BPF_ARRAY(...) \
168   BPF_ARRAYX(__VA_ARGS__, BPF_ARRAY3, BPF_ARRAY2, BPF_ARRAY1)(__VA_ARGS__)
169 
170 #define BPF_PERCPU_ARRAY1(_name)                        \
171     BPF_TABLE("percpu_array", int, u64, _name, 10240)
172 #define BPF_PERCPU_ARRAY2(_name, _leaf_type) \
173     BPF_TABLE("percpu_array", int, _leaf_type, _name, 10240)
174 #define BPF_PERCPU_ARRAY3(_name, _leaf_type, _size) \
175     BPF_TABLE("percpu_array", int, _leaf_type, _name, _size)
176 
177 // helper for default-variable macro function
178 #define BPF_PERCPU_ARRAYX(_1, _2, _3, NAME, ...) NAME
179 
180 // Define an array function (per CPU), some arguments optional
181 // BPF_PERCPU_ARRAY(name, leaf_type=u64, size=10240)
182 #define BPF_PERCPU_ARRAY(...)                                           \
183   BPF_PERCPU_ARRAYX(                                                    \
184     __VA_ARGS__, BPF_PERCPU_ARRAY3, BPF_PERCPU_ARRAY2, BPF_PERCPU_ARRAY1) \
185            (__VA_ARGS__)
186 
187 #define BPF_HIST1(_name) \
188   BPF_TABLE("histogram", int, u64, _name, 64)
189 #define BPF_HIST2(_name, _key_type) \
190   BPF_TABLE("histogram", _key_type, u64, _name, 64)
191 #define BPF_HIST3(_name, _key_type, _size) \
192   BPF_TABLE("histogram", _key_type, u64, _name, _size)
193 #define BPF_HISTX(_1, _2, _3, NAME, ...) NAME
194 
195 // Define a histogram, some arguments optional
196 // BPF_HISTOGRAM(name, key_type=int, size=64)
197 #define BPF_HISTOGRAM(...) \
198   BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__)
199 
200 #define BPF_LPM_TRIE1(_name) \
201   BPF_F_TABLE("lpm_trie", u64, u64, _name, 10240, BPF_F_NO_PREALLOC)
202 #define BPF_LPM_TRIE2(_name, _key_type) \
203   BPF_F_TABLE("lpm_trie", _key_type, u64, _name, 10240, BPF_F_NO_PREALLOC)
204 #define BPF_LPM_TRIE3(_name, _key_type, _leaf_type) \
205   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, 10240, BPF_F_NO_PREALLOC)
206 #define BPF_LPM_TRIE4(_name, _key_type, _leaf_type, _size) \
207   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, _size, BPF_F_NO_PREALLOC)
208 #define BPF_LPM_TRIEX(_1, _2, _3, _4, NAME, ...) NAME
209 
210 // Define a LPM trie function, some arguments optional
211 // BPF_LPM_TRIE(name, key_type=u64, leaf_type=u64, size=10240)
212 #define BPF_LPM_TRIE(...) \
213   BPF_LPM_TRIEX(__VA_ARGS__, BPF_LPM_TRIE4, BPF_LPM_TRIE3, BPF_LPM_TRIE2, BPF_LPM_TRIE1)(__VA_ARGS__)
214 
215 struct bpf_stacktrace {
216   u64 ip[BPF_MAX_STACK_DEPTH];
217 };
218 
219 #define BPF_STACK_TRACE(_name, _max_entries) \
220   BPF_TABLE("stacktrace", int, struct bpf_stacktrace, _name, roundup_pow_of_two(_max_entries))
221 
222 #define BPF_PROG_ARRAY(_name, _max_entries) \
223   BPF_TABLE("prog", u32, u32, _name, _max_entries)
224 
225 #define BPF_XDP_REDIRECT_MAP(_table_type, _leaf_type, _name, _max_entries) \
226 struct _name##_table_t { \
227   u32 key; \
228   _leaf_type leaf; \
229   /* xdp_act = map.redirect_map(index, flag) */ \
230   u64 (*redirect_map) (int, int); \
231   u32 max_entries; \
232 }; \
233 __attribute__((section("maps/"_table_type))) \
234 struct _name##_table_t _name = { .max_entries = (_max_entries) }
235 
236 #define BPF_DEVMAP(_name, _max_entries) \
237   BPF_XDP_REDIRECT_MAP("devmap", int, _name, _max_entries)
238 
239 #define BPF_CPUMAP(_name, _max_entries) \
240   BPF_XDP_REDIRECT_MAP("cpumap", u32, _name, _max_entries)
241 
242 // packet parsing state machine helpers
243 #define cursor_advance(_cursor, _len) \
244   ({ void *_tmp = _cursor; _cursor += _len; _tmp; })
245 
246 #ifdef LINUX_VERSION_CODE_OVERRIDE
247 unsigned _version SEC("version") = LINUX_VERSION_CODE_OVERRIDE;
248 #else
249 unsigned _version SEC("version") = LINUX_VERSION_CODE;
250 #endif
251 
252 /* helper functions called from eBPF programs written in C */
253 static void *(*bpf_map_lookup_elem)(void *map, void *key) =
254   (void *) BPF_FUNC_map_lookup_elem;
255 static int (*bpf_map_update_elem)(void *map, void *key, void *value, u64 flags) =
256   (void *) BPF_FUNC_map_update_elem;
257 static int (*bpf_map_delete_elem)(void *map, void *key) =
258   (void *) BPF_FUNC_map_delete_elem;
259 static int (*bpf_probe_read)(void *dst, u64 size, const void *unsafe_ptr) =
260   (void *) BPF_FUNC_probe_read;
261 static u64 (*bpf_ktime_get_ns)(void) =
262   (void *) BPF_FUNC_ktime_get_ns;
263 static u32 (*bpf_get_prandom_u32)(void) =
264   (void *) BPF_FUNC_get_prandom_u32;
265 static int (*bpf_trace_printk_)(const char *fmt, u64 fmt_size, ...) =
266   (void *) BPF_FUNC_trace_printk;
267 static int (*bpf_probe_read_str)(void *dst, u64 size, const void *unsafe_ptr) =
268   (void *) BPF_FUNC_probe_read_str;
269 int bpf_trace_printk(const char *fmt, ...) asm("llvm.bpf.extra");
270 static inline __attribute__((always_inline))
271 void bpf_tail_call_(u64 map_fd, void *ctx, int index) {
272   ((void (*)(void *, u64, int))BPF_FUNC_tail_call)(ctx, map_fd, index);
273 }
274 static int (*bpf_clone_redirect)(void *ctx, int ifindex, u32 flags) =
275   (void *) BPF_FUNC_clone_redirect;
276 static u64 (*bpf_get_smp_processor_id)(void) =
277   (void *) BPF_FUNC_get_smp_processor_id;
278 static u64 (*bpf_get_current_pid_tgid)(void) =
279   (void *) BPF_FUNC_get_current_pid_tgid;
280 static u64 (*bpf_get_current_uid_gid)(void) =
281   (void *) BPF_FUNC_get_current_uid_gid;
282 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
283   (void *) BPF_FUNC_get_current_comm;
284 static u64 (*bpf_get_cgroup_classid)(void *ctx) =
285   (void *) BPF_FUNC_get_cgroup_classid;
286 static u64 (*bpf_skb_vlan_push)(void *ctx, u16 proto, u16 vlan_tci) =
287   (void *) BPF_FUNC_skb_vlan_push;
288 static u64 (*bpf_skb_vlan_pop)(void *ctx) =
289   (void *) BPF_FUNC_skb_vlan_pop;
290 static int (*bpf_skb_get_tunnel_key)(void *ctx, void *to, u32 size, u64 flags) =
291   (void *) BPF_FUNC_skb_get_tunnel_key;
292 static int (*bpf_skb_set_tunnel_key)(void *ctx, void *from, u32 size, u64 flags) =
293   (void *) BPF_FUNC_skb_set_tunnel_key;
294 static u64 (*bpf_perf_event_read)(void *map, u64 flags) =
295   (void *) BPF_FUNC_perf_event_read;
296 static int (*bpf_redirect)(int ifindex, u32 flags) =
297   (void *) BPF_FUNC_redirect;
298 static u32 (*bpf_get_route_realm)(void *ctx) =
299   (void *) BPF_FUNC_get_route_realm;
300 static int (*bpf_perf_event_output)(void *ctx, void *map, u64 index, void *data, u32 size) =
301   (void *) BPF_FUNC_perf_event_output;
302 static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) =
303   (void *) BPF_FUNC_skb_load_bytes;
304 static int (*bpf_perf_event_read_value)(void *map, u64 flags, void *buf, u32 buf_size) =
305   (void *) BPF_FUNC_perf_event_read_value;
306 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, u32 buf_size) =
307   (void *) BPF_FUNC_perf_prog_read_value;
308 static int (*bpf_current_task_under_cgroup)(void *map, int index) =
309   (void *) BPF_FUNC_current_task_under_cgroup;
310 static u32 (*bpf_get_socket_cookie)(void *ctx) =
311   (void *) BPF_FUNC_get_socket_cookie;
312 static u64 (*bpf_get_socket_uid)(void *ctx) =
313   (void *) BPF_FUNC_get_socket_uid;
314 static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
315   (void *) BPF_FUNC_getsockopt;
316 static int (*bpf_redirect_map)(void *map, int key, int flags) =
317   (void *) BPF_FUNC_redirect_map;
318 static int (*bpf_set_hash)(void *ctx, u32 hash) =
319   (void *) BPF_FUNC_set_hash;
320 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
321   (void *) BPF_FUNC_setsockopt;
322 static int (*bpf_skb_adjust_room)(void *ctx, int len_diff, u32 mode, u64 flags) =
323   (void *) BPF_FUNC_skb_adjust_room;
324 static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
325   (void *) BPF_FUNC_skb_under_cgroup;
326 static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
327   (void *) BPF_FUNC_sk_redirect_map;
328 static int (*bpf_sock_map_update)(void *map, void *key, void *value, unsigned long long flags) =
329   (void *) BPF_FUNC_sock_map_update;
330 static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
331   (void *) BPF_FUNC_xdp_adjust_meta;
332 
333 /* bcc_get_stackid will return a negative value in the case of an error
334  *
335  * BPF_STACK_TRACE(_name, _size) will allocate space for _size stack traces.
336  *  -ENOMEM will be returned when this limit is reached.
337  *
338  * -EFAULT is typically returned when requesting user-space stack straces (using
339  * BPF_F_USER_STACK) for kernel threads. However, a valid stackid may be
340  * returned in some cases; consider a tracepoint or kprobe executing in the
341  * kernel context. Given this you can typically ignore -EFAULT errors when
342  * retrieving user-space stack traces.
343  */
344 static int (*bcc_get_stackid_)(void *ctx, void *map, u64 flags) =
345   (void *) BPF_FUNC_get_stackid;
346 static inline __attribute__((always_inline))
347 int bcc_get_stackid(uintptr_t map, void *ctx, u64 flags) {
348   return bcc_get_stackid_(ctx, (void *)map, flags);
349 }
350 
351 static int (*bpf_csum_diff)(void *from, u64 from_size, void *to, u64 to_size, u64 seed) =
352   (void *) BPF_FUNC_csum_diff;
353 static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, u32 size) =
354   (void *) BPF_FUNC_skb_get_tunnel_opt;
355 static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, u32 size) =
356   (void *) BPF_FUNC_skb_set_tunnel_opt;
357 static int (*bpf_skb_change_proto)(void *ctx, u16 proto, u64 flags) =
358   (void *) BPF_FUNC_skb_change_proto;
359 static int (*bpf_skb_change_type)(void *ctx, u32 type) =
360   (void *) BPF_FUNC_skb_change_type;
361 static u32 (*bpf_get_hash_recalc)(void *ctx) =
362   (void *) BPF_FUNC_get_hash_recalc;
363 static u64 (*bpf_get_current_task)(void) =
364   (void *) BPF_FUNC_get_current_task;
365 static int (*bpf_probe_write_user)(void *dst, void *src, u32 size) =
366   (void *) BPF_FUNC_probe_write_user;
367 static int (*bpf_skb_change_tail)(void *ctx, u32 new_len, u64 flags) =
368   (void *) BPF_FUNC_skb_change_tail;
369 static int (*bpf_skb_pull_data)(void *ctx, u32 len) =
370   (void *) BPF_FUNC_skb_pull_data;
371 static int (*bpf_csum_update)(void *ctx, u16 csum) =
372   (void *) BPF_FUNC_csum_update;
373 static int (*bpf_set_hash_invalid)(void *ctx) =
374   (void *) BPF_FUNC_set_hash_invalid;
375 static int (*bpf_get_numa_node_id)(void) =
376   (void *) BPF_FUNC_get_numa_node_id;
377 static int (*bpf_skb_change_head)(void *ctx, u32 len, u64 flags) =
378   (void *) BPF_FUNC_skb_change_head;
379 static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
380   (void *) BPF_FUNC_xdp_adjust_head;
381 static int (*bpf_override_return)(void *pt_regs, unsigned long rc) =
382   (void *) BPF_FUNC_override_return;
383 static int (*bpf_sock_ops_cb_flags_set)(void *skops, int flags) =
384   (void *) BPF_FUNC_sock_ops_cb_flags_set;
385 static int (*bpf_msg_redirect_map)(void *msg, void *map, u32 key, u64 flags) =
386   (void *) BPF_FUNC_msg_redirect_map;
387 static int (*bpf_msg_apply_bytes)(void *msg, u32 bytes) =
388   (void *) BPF_FUNC_msg_apply_bytes;
389 static int (*bpf_msg_cork_bytes)(void *msg, u32 bytes) =
390   (void *) BPF_FUNC_msg_cork_bytes;
391 static int (*bpf_msg_pull_data)(void *msg, u32 start, u32 end, u64 flags) =
392   (void *) BPF_FUNC_msg_pull_data;
393 static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
394   (void *) BPF_FUNC_bind;
395 static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
396   (void *) BPF_FUNC_xdp_adjust_tail;
397 static int (*bpf_skb_get_xfrm_state)(void *ctx, u32 index, void *xfrm_state, u32 size, u64 flags) =
398   (void *) BPF_FUNC_skb_get_xfrm_state;
399 static int (*bpf_get_stack)(void *ctx, void *buf, u32 size, u64 flags) =
400   (void *) BPF_FUNC_get_stack;
401 static int (*bpf_skb_load_bytes_relative)(void *ctx, u32 offset, void *to, u32 len, u32 start_header) =
402   (void *) BPF_FUNC_skb_load_bytes_relative;
403 static int (*bpf_fib_lookup)(void *ctx, void *params, int plen, u32 flags) =
404   (void *) BPF_FUNC_fib_lookup;
405 static int (*bpf_sock_hash_update)(void *ctx, void *map, void *key, u64 flags) =
406   (void *) BPF_FUNC_sock_hash_update;
407 static int (*bpf_msg_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
408   (void *) BPF_FUNC_msg_redirect_hash;
409 static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
410   (void *) BPF_FUNC_sk_redirect_hash;
411 static int (*bpf_lwt_push_encap)(void *skb, u32 type, void *hdr, u32 len) =
412   (void *) BPF_FUNC_lwt_push_encap;
413 static int (*bpf_lwt_seg6_store_bytes)(void *ctx, u32 offset, const void *from, u32 len) =
414   (void *) BPF_FUNC_lwt_seg6_store_bytes;
415 static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, u32 offset, s32 delta) =
416   (void *) BPF_FUNC_lwt_seg6_adjust_srh;
417 static int (*bpf_lwt_seg6_action)(void *ctx, u32 action, void *param, u32 param_len) =
418   (void *) BPF_FUNC_lwt_seg6_action;
419 static int (*bpf_rc_keydown)(void *ctx, u32 protocol, u64 scancode, u32 toggle) =
420   (void *) BPF_FUNC_rc_keydown;
421 static int (*bpf_rc_repeat)(void *ctx) =
422   (void *) BPF_FUNC_rc_repeat;
423 static u64 (*bpf_skb_cgroup_id)(void *skb) =
424   (void *) BPF_FUNC_skb_cgroup_id;
425 static u64 (*bpf_get_current_cgroup_id)(void) =
426   (void *) BPF_FUNC_get_current_cgroup_id;
427 static u64 (*bpf_skb_ancestor_cgroup_id)(void *skb, int ancestor_level) =
428   (void *) BPF_FUNC_skb_ancestor_cgroup_id;
429 static void * (*bpf_get_local_storage)(void *map, u64 flags) =
430   (void *) BPF_FUNC_get_local_storage;
431 static int (*bpf_sk_select_reuseport)(void *reuse, void *map, void *key, u64 flags) =
432   (void *) BPF_FUNC_sk_select_reuseport;
433 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
434                                              struct bpf_sock_tuple *tuple,
435                                              int size, unsigned int netns_id,
436                                              unsigned long long flags) =
437   (void *) BPF_FUNC_sk_lookup_tcp;
438 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
439                                              struct bpf_sock_tuple *tuple,
440                                              int size, unsigned int netns_id,
441                                              unsigned long long flags) =
442   (void *) BPF_FUNC_sk_lookup_udp;
443 static int (*bpf_sk_release)(struct bpf_sock *sk) =
444   (void *) BPF_FUNC_sk_release;
445 static int (*bpf_map_push_elem)(void *map, const void *value, u64 flags) =
446   (void *) BPF_FUNC_map_push_elem;
447 static int (*bpf_map_pop_elem)(void *map, void *value) =
448   (void *) BPF_FUNC_map_pop_elem;
449 static int (*bpf_map_peek_elem)(void *map, void *value) =
450   (void *) BPF_FUNC_map_peek_elem;
451 static int (*bpf_msg_push_data)(void *skb, u32 start, u32 len, u64 flags) =
452   (void *) BPF_FUNC_msg_push_data;
453 static int (*bpf_msg_pop_data)(void *msg, u32 start, u32 pop, u64 flags) =
454   (void *) BPF_FUNC_msg_pop_data;
455 static int (*bpf_rc_pointer_rel)(void *ctx, s32 rel_x, s32 rel_y) =
456   (void *) BPF_FUNC_rc_pointer_rel;
457 
458 /* llvm builtin functions that eBPF C program may use to
459  * emit BPF_LD_ABS and BPF_LD_IND instructions
460  */
461 struct sk_buff;
462 unsigned long long load_byte(void *skb,
463   unsigned long long off) asm("llvm.bpf.load.byte");
464 unsigned long long load_half(void *skb,
465   unsigned long long off) asm("llvm.bpf.load.half");
466 unsigned long long load_word(void *skb,
467   unsigned long long off) asm("llvm.bpf.load.word");
468 
469 /* a helper structure used by eBPF C program
470  * to describe map attributes to elf_bpf loader
471  */
472 struct bpf_map_def {
473   unsigned int type;
474   unsigned int key_size;
475   unsigned int value_size;
476   unsigned int max_entries;
477 };
478 
479 static int (*bpf_skb_store_bytes)(void *ctx, unsigned long long off, void *from,
480                                   unsigned long long len, unsigned long long flags) =
481   (void *) BPF_FUNC_skb_store_bytes;
482 static int (*bpf_l3_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
483                                   unsigned long long to, unsigned long long flags) =
484   (void *) BPF_FUNC_l3_csum_replace;
485 static int (*bpf_l4_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
486                                   unsigned long long to, unsigned long long flags) =
487   (void *) BPF_FUNC_l4_csum_replace;
488 
489 static inline __attribute__((always_inline))
490 u16 bpf_ntohs(u16 val) {
491   /* will be recognized by gcc into rotate insn and eventually rolw 8 */
492   return (val << 8) | (val >> 8);
493 }
494 
495 static inline __attribute__((always_inline))
496 u32 bpf_ntohl(u32 val) {
497   /* gcc will use bswapsi2 insn */
498   return __builtin_bswap32(val);
499 }
500 
501 static inline __attribute__((always_inline))
502 u64 bpf_ntohll(u64 val) {
503   /* gcc will use bswapdi2 insn */
504   return __builtin_bswap64(val);
505 }
506 
507 static inline __attribute__((always_inline))
508 unsigned __int128 bpf_ntoh128(unsigned __int128 val) {
509   return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64));
510 }
511 
512 static inline __attribute__((always_inline))
513 u16 bpf_htons(u16 val) {
514   return bpf_ntohs(val);
515 }
516 
517 static inline __attribute__((always_inline))
518 u32 bpf_htonl(u32 val) {
519   return bpf_ntohl(val);
520 }
521 
522 static inline __attribute__((always_inline))
523 u64 bpf_htonll(u64 val) {
524   return bpf_ntohll(val);
525 }
526 
527 static inline __attribute__((always_inline))
528 unsigned __int128 bpf_hton128(unsigned __int128 val) {
529   return bpf_ntoh128(val);
530 }
531 
532 static inline __attribute__((always_inline))
533 u64 load_dword(void *skb, u64 off) {
534   return ((u64)load_word(skb, off) << 32) | load_word(skb, off + 4);
535 }
536 
537 void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte");
538 void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half");
539 void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word");
540 u64 bpf_pseudo_fd(u64, u64) asm("llvm.bpf.pseudo");
541 
542 static inline void __attribute__((always_inline))
543 bpf_store_dword(void *skb, u64 off, u64 val) {
544   bpf_store_word(skb, off, (u32)val);
545   bpf_store_word(skb, off + 4, val >> 32);
546 }
547 
548 #define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL))
549 #define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1))
550 
551 static inline __attribute__((always_inline))
552 unsigned int bpf_log2(unsigned int v)
553 {
554   unsigned int r;
555   unsigned int shift;
556 
557   r = (v > 0xFFFF) << 4; v >>= r;
558   shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
559   shift = (v > 0xF) << 2; v >>= shift; r |= shift;
560   shift = (v > 0x3) << 1; v >>= shift; r |= shift;
561   r |= (v >> 1);
562   return r;
563 }
564 
565 static inline __attribute__((always_inline))
566 unsigned int bpf_log2l(unsigned long v)
567 {
568   unsigned int hi = v >> 32;
569   if (hi)
570     return bpf_log2(hi) + 32 + 1;
571   else
572     return bpf_log2(v) + 1;
573 }
574 
575 struct bpf_context;
576 
577 static inline __attribute__((always_inline))
578 SEC("helpers")
579 u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) {
580   if (bofs == 0 && bsz == 8) {
581     return load_byte(pkt, off);
582   } else if (bofs + bsz <= 8) {
583     return load_byte(pkt, off) >> (8 - (bofs + bsz))  &  MASK(bsz);
584   } else if (bofs == 0 && bsz == 16) {
585     return load_half(pkt, off);
586   } else if (bofs + bsz <= 16) {
587     return load_half(pkt, off) >> (16 - (bofs + bsz))  &  MASK(bsz);
588   } else if (bofs == 0 && bsz == 32) {
589     return load_word(pkt, off);
590   } else if (bofs + bsz <= 32) {
591     return load_word(pkt, off) >> (32 - (bofs + bsz))  &  MASK(bsz);
592   } else if (bofs == 0 && bsz == 64) {
593     return load_dword(pkt, off);
594   } else if (bofs + bsz <= 64) {
595     return load_dword(pkt, off) >> (64 - (bofs + bsz))  &  MASK(bsz);
596   }
597   return 0;
598 }
599 
600 static inline __attribute__((always_inline))
601 SEC("helpers")
602 void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) {
603   // The load_xxx function does a bswap before returning the short/word/dword,
604   // so the value in register will always be host endian. However, the bytes
605   // written back need to be in network order.
606   if (bofs == 0 && bsz == 8) {
607     bpf_skb_store_bytes(pkt, off, &val, 1, 0);
608   } else if (bofs + bsz <= 8) {
609     u8 v = load_byte(pkt, off);
610     v &= ~(MASK(bsz) << (8 - (bofs + bsz)));
611     v |= ((val & MASK(bsz)) << (8 - (bofs + bsz)));
612     bpf_skb_store_bytes(pkt, off, &v, 1, 0);
613   } else if (bofs == 0 && bsz == 16) {
614     u16 v = bpf_htons(val);
615     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
616   } else if (bofs + bsz <= 16) {
617     u16 v = load_half(pkt, off);
618     v &= ~(MASK(bsz) << (16 - (bofs + bsz)));
619     v |= ((val & MASK(bsz)) << (16 - (bofs + bsz)));
620     v = bpf_htons(v);
621     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
622   } else if (bofs == 0 && bsz == 32) {
623     u32 v = bpf_htonl(val);
624     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
625   } else if (bofs + bsz <= 32) {
626     u32 v = load_word(pkt, off);
627     v &= ~(MASK(bsz) << (32 - (bofs + bsz)));
628     v |= ((val & MASK(bsz)) << (32 - (bofs + bsz)));
629     v = bpf_htonl(v);
630     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
631   } else if (bofs == 0 && bsz == 64) {
632     u64 v = bpf_htonll(val);
633     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
634   } else if (bofs + bsz <= 64) {
635     u64 v = load_dword(pkt, off);
636     v &= ~(MASK(bsz) << (64 - (bofs + bsz)));
637     v |= ((val & MASK(bsz)) << (64 - (bofs + bsz)));
638     v = bpf_htonll(v);
639     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
640   }
641 }
642 
643 static inline __attribute__((always_inline))
644 SEC("helpers")
645 void * bpf_map_lookup_elem_(uintptr_t map, void *key) {
646   return bpf_map_lookup_elem((void *)map, key);
647 }
648 
649 static inline __attribute__((always_inline))
650 SEC("helpers")
651 int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) {
652   return bpf_map_update_elem((void *)map, key, value, flags);
653 }
654 
655 static inline __attribute__((always_inline))
656 SEC("helpers")
657 int bpf_map_delete_elem_(uintptr_t map, void *key) {
658   return bpf_map_delete_elem((void *)map, key);
659 }
660 
661 static inline __attribute__((always_inline))
662 SEC("helpers")
663 int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
664   switch (flags & 0xf) {
665     case 2:
666       return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
667     case 4:
668       return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
669     case 8:
670       return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
671     default:
672       {}
673   }
674   return bpf_l3_csum_replace(ctx, off, from, to, flags);
675 }
676 
677 static inline __attribute__((always_inline))
678 SEC("helpers")
679 int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
680   switch (flags & 0xf) {
681     case 2:
682       return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
683     case 4:
684       return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
685     case 8:
686       return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
687     default:
688       {}
689   }
690   return bpf_l4_csum_replace(ctx, off, from, to, flags);
691 }
692 
693 int incr_cksum_l3(void *off, u64 oldval, u64 newval) asm("llvm.bpf.extra");
694 int incr_cksum_l4(void *off, u64 oldval, u64 newval, u64 flags) asm("llvm.bpf.extra");
695 int bpf_num_cpus() asm("llvm.bpf.extra");
696 
697 struct pt_regs;
698 int bpf_usdt_readarg(int argc, struct pt_regs *ctx, void *arg) asm("llvm.bpf.extra");
699 int bpf_usdt_readarg_p(int argc, struct pt_regs *ctx, void *buf, u64 len) asm("llvm.bpf.extra");
700 
701 /* Scan the ARCH passed in from ARCH env variable (see kbuild_helper.cc) */
702 #if defined(__TARGET_ARCH_x86)
703 #define bpf_target_x86
704 #define bpf_target_defined
705 #elif defined(__TARGET_ARCH_s930x)
706 #define bpf_target_s930x
707 #define bpf_target_defined
708 #elif defined(__TARGET_ARCH_arm64)
709 #define bpf_target_arm64
710 #define bpf_target_defined
711 #elif defined(__TARGET_ARCH_powerpc)
712 #define bpf_target_powerpc
713 #define bpf_target_defined
714 #else
715 #undef bpf_target_defined
716 #endif
717 
718 /* Fall back to what the compiler says */
719 #ifndef bpf_target_defined
720 #if defined(__x86_64__)
721 #define bpf_target_x86
722 #elif defined(__s390x__)
723 #define bpf_target_s930x
724 #elif defined(__aarch64__)
725 #define bpf_target_arm64
726 #elif defined(__powerpc__)
727 #define bpf_target_powerpc
728 #endif
729 #endif
730 
731 #if defined(bpf_target_powerpc)
732 #define PT_REGS_PARM1(ctx)	((ctx)->gpr[3])
733 #define PT_REGS_PARM2(ctx)	((ctx)->gpr[4])
734 #define PT_REGS_PARM3(ctx)	((ctx)->gpr[5])
735 #define PT_REGS_PARM4(ctx)	((ctx)->gpr[6])
736 #define PT_REGS_PARM5(ctx)	((ctx)->gpr[7])
737 #define PT_REGS_PARM6(ctx)	((ctx)->gpr[8])
738 #define PT_REGS_RC(ctx)		((ctx)->gpr[3])
739 #define PT_REGS_IP(ctx)		((ctx)->nip)
740 #define PT_REGS_SP(ctx)		((ctx)->gpr[1])
741 #elif defined(bpf_target_s930x)
742 #define PT_REGS_PARM1(x) ((x)->gprs[2])
743 #define PT_REGS_PARM2(x) ((x)->gprs[3])
744 #define PT_REGS_PARM3(x) ((x)->gprs[4])
745 #define PT_REGS_PARM4(x) ((x)->gprs[5])
746 #define PT_REGS_PARM5(x) ((x)->gprs[6])
747 #define PT_REGS_RET(x) ((x)->gprs[14])
748 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
749 #define PT_REGS_RC(x) ((x)->gprs[2])
750 #define PT_REGS_SP(x) ((x)->gprs[15])
751 #define PT_REGS_IP(x) ((x)->psw.addr)
752 #elif defined(bpf_target_x86)
753 #define PT_REGS_PARM1(ctx)	((ctx)->di)
754 #define PT_REGS_PARM2(ctx)	((ctx)->si)
755 #define PT_REGS_PARM3(ctx)	((ctx)->dx)
756 #define PT_REGS_PARM4(ctx)	((ctx)->cx)
757 #define PT_REGS_PARM5(ctx)	((ctx)->r8)
758 #define PT_REGS_PARM6(ctx)	((ctx)->r9)
759 #define PT_REGS_RET(ctx)	((ctx)->sp)
760 #define PT_REGS_FP(ctx)         ((ctx)->bp) /* Works only with CONFIG_FRAME_POINTER */
761 #define PT_REGS_RC(ctx)		((ctx)->ax)
762 #define PT_REGS_IP(ctx)		((ctx)->ip)
763 #define PT_REGS_SP(ctx)		((ctx)->sp)
764 #elif defined(bpf_target_arm64)
765 #define PT_REGS_PARM1(x)	((x)->regs[0])
766 #define PT_REGS_PARM2(x)	((x)->regs[1])
767 #define PT_REGS_PARM3(x)	((x)->regs[2])
768 #define PT_REGS_PARM4(x)	((x)->regs[3])
769 #define PT_REGS_PARM5(x)	((x)->regs[4])
770 #define PT_REGS_PARM6(x)	((x)->regs[5])
771 #define PT_REGS_RET(x)		((x)->regs[30])
772 #define PT_REGS_FP(x)		((x)->regs[29]) /*  Works only with CONFIG_FRAME_POINTER */
773 #define PT_REGS_RC(x)		((x)->regs[0])
774 #define PT_REGS_SP(x)		((x)->sp)
775 #define PT_REGS_IP(x)		((x)->pc)
776 #else
777 #error "bcc does not support this platform yet"
778 #endif
779 
780 #define lock_xadd(ptr, val) ((void)__sync_fetch_and_add(ptr, val))
781 
782 #define TRACEPOINT_PROBE(category, event) \
783 int tracepoint__##category##__##event(struct tracepoint__##category##__##event *args)
784 
785 #define RAW_TRACEPOINT_PROBE(event) \
786 int raw_tracepoint__##event(struct bpf_raw_tracepoint_args *ctx)
787 
788 #define TP_DATA_LOC_READ_CONST(dst, field, length)                        \
789         do {                                                              \
790             unsigned short __offset = args->data_loc_##field & 0xFFFF;    \
791             bpf_probe_read((void *)dst, length, (char *)args + __offset); \
792         } while (0);
793 
794 #define TP_DATA_LOC_READ(dst, field)                                        \
795         do {                                                                \
796             unsigned short __offset = args->data_loc_##field & 0xFFFF;      \
797             unsigned short __length = args->data_loc_##field >> 16;         \
798             bpf_probe_read((void *)dst, __length, (char *)args + __offset); \
799         } while (0);
800 
801 #endif
802 )********"
803