• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 R"********(
2 /*
3  * Copyright (c) 2015 PLUMgrid, Inc.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 #ifndef __BPF_HELPERS_H
18 #define __BPF_HELPERS_H
19 
20 #include <uapi/linux/bpf.h>
21 #include <uapi/linux/if_packet.h>
22 #include <linux/version.h>
23 #include <linux/log2.h>
24 
25 #ifndef CONFIG_BPF_SYSCALL
26 #error "CONFIG_BPF_SYSCALL is undefined, please check your .config or ask your Linux distro to enable this feature"
27 #endif
28 
29 #ifdef PERF_MAX_STACK_DEPTH
30 #define BPF_MAX_STACK_DEPTH PERF_MAX_STACK_DEPTH
31 #else
32 #define BPF_MAX_STACK_DEPTH 127
33 #endif
34 
35 /* helper macro to place programs, maps, license in
36  * different sections in elf_bpf file. Section names
37  * are interpreted by elf_bpf loader
38  */
39 #define SEC(NAME) __attribute__((section(NAME), used))
40 
41 // Changes to the macro require changes in BFrontendAction classes
42 #define BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, _flags) \
43 struct _name##_table_t { \
44   _key_type key; \
45   _leaf_type leaf; \
46   _leaf_type * (*lookup) (_key_type *); \
47   _leaf_type * (*lookup_or_init) (_key_type *, _leaf_type *); \
48   int (*update) (_key_type *, _leaf_type *); \
49   int (*insert) (_key_type *, _leaf_type *); \
50   int (*delete) (_key_type *); \
51   void (*call) (void *, int index); \
52   void (*increment) (_key_type, ...); \
53   int (*get_stackid) (void *, u64); \
54   u32 max_entries; \
55   int flags; \
56 }; \
57 __attribute__((section("maps/" _table_type))) \
58 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }
59 
60 #define BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries) \
61 BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, 0)
62 
63 // define a table same as above but allow it to be referenced by other modules
64 #define BPF_TABLE_PUBLIC(_table_type, _key_type, _leaf_type, _name, _max_entries) \
65 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
66 __attribute__((section("maps/export"))) \
67 struct _name##_table_t __##_name
68 
69 // define a table that is shared accross the programs in the same namespace
70 #define BPF_TABLE_SHARED(_table_type, _key_type, _leaf_type, _name, _max_entries) \
71 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
72 __attribute__((section("maps/shared"))) \
73 struct _name##_table_t __##_name
74 
75 // Identifier for current CPU used in perf_submit and perf_read
76 // Prefer BPF_F_CURRENT_CPU flag, falls back to call helper for older kernel
77 // Can be overridden from BCC
78 #ifndef CUR_CPU_IDENTIFIER
79 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
80 #define CUR_CPU_IDENTIFIER BPF_F_CURRENT_CPU
81 #else
82 #define CUR_CPU_IDENTIFIER bpf_get_smp_processor_id()
83 #endif
84 #endif
85 
86 // Table for pushing custom events to userspace via ring buffer
87 #define BPF_PERF_OUTPUT(_name) \
88 struct _name##_table_t { \
89   int key; \
90   u32 leaf; \
91   /* map.perf_submit(ctx, data, data_size) */ \
92   int (*perf_submit) (void *, void *, u32); \
93   int (*perf_submit_skb) (void *, u32, void *, u32); \
94   u32 max_entries; \
95 }; \
96 __attribute__((section("maps/perf_output"))) \
97 struct _name##_table_t _name = { .max_entries = 0 }
98 
99 // Table for reading hw perf cpu counters
100 #define BPF_PERF_ARRAY(_name, _max_entries) \
101 struct _name##_table_t { \
102   int key; \
103   u32 leaf; \
104   /* counter = map.perf_read(index) */ \
105   u64 (*perf_read) (int); \
106   int (*perf_counter_value) (int, void *, u32); \
107   u32 max_entries; \
108 }; \
109 __attribute__((section("maps/perf_array"))) \
110 struct _name##_table_t _name = { .max_entries = (_max_entries) }
111 
112 // Table for cgroup file descriptors
113 #define BPF_CGROUP_ARRAY(_name, _max_entries) \
114 struct _name##_table_t { \
115   int key; \
116   u32 leaf; \
117   int (*check_current_task) (int); \
118   u32 max_entries; \
119 }; \
120 __attribute__((section("maps/cgroup_array"))) \
121 struct _name##_table_t _name = { .max_entries = (_max_entries) }
122 
123 #define BPF_HASH1(_name) \
124   BPF_TABLE("hash", u64, u64, _name, 10240)
125 #define BPF_HASH2(_name, _key_type) \
126   BPF_TABLE("hash", _key_type, u64, _name, 10240)
127 #define BPF_HASH3(_name, _key_type, _leaf_type) \
128   BPF_TABLE("hash", _key_type, _leaf_type, _name, 10240)
129 #define BPF_HASH4(_name, _key_type, _leaf_type, _size) \
130   BPF_TABLE("hash", _key_type, _leaf_type, _name, _size)
131 
132 // helper for default-variable macro function
133 #define BPF_HASHX(_1, _2, _3, _4, NAME, ...) NAME
134 
135 // Define a hash function, some arguments optional
136 // BPF_HASH(name, key_type=u64, leaf_type=u64, size=10240)
137 #define BPF_HASH(...) \
138   BPF_HASHX(__VA_ARGS__, BPF_HASH4, BPF_HASH3, BPF_HASH2, BPF_HASH1)(__VA_ARGS__)
139 
140 #define BPF_ARRAY1(_name) \
141   BPF_TABLE("array", int, u64, _name, 10240)
142 #define BPF_ARRAY2(_name, _leaf_type) \
143   BPF_TABLE("array", int, _leaf_type, _name, 10240)
144 #define BPF_ARRAY3(_name, _leaf_type, _size) \
145   BPF_TABLE("array", int, _leaf_type, _name, _size)
146 
147 // helper for default-variable macro function
148 #define BPF_ARRAYX(_1, _2, _3, NAME, ...) NAME
149 
150 // Define an array function, some arguments optional
151 // BPF_ARRAY(name, leaf_type=u64, size=10240)
152 #define BPF_ARRAY(...) \
153   BPF_ARRAYX(__VA_ARGS__, BPF_ARRAY3, BPF_ARRAY2, BPF_ARRAY1)(__VA_ARGS__)
154 
155 #define BPF_PERCPU_ARRAY1(_name)                        \
156     BPF_TABLE("percpu_array", int, u64, _name, 10240)
157 #define BPF_PERCPU_ARRAY2(_name, _leaf_type) \
158     BPF_TABLE("percpu_array", int, _leaf_type, _name, 10240)
159 #define BPF_PERCPU_ARRAY3(_name, _leaf_type, _size) \
160     BPF_TABLE("percpu_array", int, _leaf_type, _name, _size)
161 
162 // helper for default-variable macro function
163 #define BPF_PERCPU_ARRAYX(_1, _2, _3, NAME, ...) NAME
164 
165 // Define an array function (per CPU), some arguments optional
166 // BPF_PERCPU_ARRAY(name, leaf_type=u64, size=10240)
167 #define BPF_PERCPU_ARRAY(...)                                           \
168   BPF_PERCPU_ARRAYX(                                                    \
169     __VA_ARGS__, BPF_PERCPU_ARRAY3, BPF_PERCPU_ARRAY2, BPF_PERCPU_ARRAY1) \
170            (__VA_ARGS__)
171 
172 #define BPF_HIST1(_name) \
173   BPF_TABLE("histogram", int, u64, _name, 64)
174 #define BPF_HIST2(_name, _key_type) \
175   BPF_TABLE("histogram", _key_type, u64, _name, 64)
176 #define BPF_HIST3(_name, _key_type, _size) \
177   BPF_TABLE("histogram", _key_type, u64, _name, _size)
178 #define BPF_HISTX(_1, _2, _3, NAME, ...) NAME
179 
180 // Define a histogram, some arguments optional
181 // BPF_HISTOGRAM(name, key_type=int, size=64)
182 #define BPF_HISTOGRAM(...) \
183   BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__)
184 
185 #define BPF_LPM_TRIE1(_name) \
186   BPF_F_TABLE("lpm_trie", u64, u64, _name, 10240, BPF_F_NO_PREALLOC)
187 #define BPF_LPM_TRIE2(_name, _key_type) \
188   BPF_F_TABLE("lpm_trie", _key_type, u64, _name, 10240, BPF_F_NO_PREALLOC)
189 #define BPF_LPM_TRIE3(_name, _key_type, _leaf_type) \
190   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, 10240, BPF_F_NO_PREALLOC)
191 #define BPF_LPM_TRIE4(_name, _key_type, _leaf_type, _size) \
192   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, _size, BPF_F_NO_PREALLOC)
193 #define BPF_LPM_TRIEX(_1, _2, _3, _4, NAME, ...) NAME
194 
195 // Define a LPM trie function, some arguments optional
196 // BPF_LPM_TRIE(name, key_type=u64, leaf_type=u64, size=10240)
197 #define BPF_LPM_TRIE(...) \
198   BPF_LPM_TRIEX(__VA_ARGS__, BPF_LPM_TRIE4, BPF_LPM_TRIE3, BPF_LPM_TRIE2, BPF_LPM_TRIE1)(__VA_ARGS__)
199 
200 struct bpf_stacktrace {
201   u64 ip[BPF_MAX_STACK_DEPTH];
202 };
203 
204 #define BPF_STACK_TRACE(_name, _max_entries) \
205   BPF_TABLE("stacktrace", int, struct bpf_stacktrace, _name, roundup_pow_of_two(_max_entries))
206 
207 #define BPF_PROG_ARRAY(_name, _max_entries) \
208   BPF_TABLE("prog", u32, u32, _name, _max_entries)
209 
210 #define BPF_XDP_REDIRECT_MAP(_table_type, _leaf_type, _name, _max_entries) \
211 struct _name##_table_t { \
212   u32 key; \
213   _leaf_type leaf; \
214   /* xdp_act = map.redirect_map(index, flag) */ \
215   u64 (*redirect_map) (int, int); \
216   u32 max_entries; \
217 }; \
218 __attribute__((section("maps/"_table_type))) \
219 struct _name##_table_t _name = { .max_entries = (_max_entries) }
220 
221 #define BPF_DEVMAP(_name, _max_entries) \
222   BPF_XDP_REDIRECT_MAP("devmap", int, _name, _max_entries)
223 
224 #define BPF_CPUMAP(_name, _max_entries) \
225   BPF_XDP_REDIRECT_MAP("cpumap", u32, _name, _max_entries)
226 
227 // packet parsing state machine helpers
228 #define cursor_advance(_cursor, _len) \
229   ({ void *_tmp = _cursor; _cursor += _len; _tmp; })
230 
231 #ifdef LINUX_VERSION_CODE_OVERRIDE
232 unsigned _version SEC("version") = LINUX_VERSION_CODE_OVERRIDE;
233 #else
234 unsigned _version SEC("version") = LINUX_VERSION_CODE;
235 #endif
236 
237 /* helper functions called from eBPF programs written in C */
238 static void *(*bpf_map_lookup_elem)(void *map, void *key) =
239   (void *) BPF_FUNC_map_lookup_elem;
240 static int (*bpf_map_update_elem)(void *map, void *key, void *value, u64 flags) =
241   (void *) BPF_FUNC_map_update_elem;
242 static int (*bpf_map_delete_elem)(void *map, void *key) =
243   (void *) BPF_FUNC_map_delete_elem;
244 static int (*bpf_probe_read)(void *dst, u64 size, const void *unsafe_ptr) =
245   (void *) BPF_FUNC_probe_read;
246 static u64 (*bpf_ktime_get_ns)(void) =
247   (void *) BPF_FUNC_ktime_get_ns;
248 static u32 (*bpf_get_prandom_u32)(void) =
249   (void *) BPF_FUNC_get_prandom_u32;
250 static int (*bpf_trace_printk_)(const char *fmt, u64 fmt_size, ...) =
251   (void *) BPF_FUNC_trace_printk;
252 static int (*bpf_probe_read_str)(void *dst, u64 size, const void *unsafe_ptr) =
253   (void *) BPF_FUNC_probe_read_str;
254 int bpf_trace_printk(const char *fmt, ...) asm("llvm.bpf.extra");
255 static inline __attribute__((always_inline))
256 void bpf_tail_call_(u64 map_fd, void *ctx, int index) {
257   ((void (*)(void *, u64, int))BPF_FUNC_tail_call)(ctx, map_fd, index);
258 }
259 static int (*bpf_clone_redirect)(void *ctx, int ifindex, u32 flags) =
260   (void *) BPF_FUNC_clone_redirect;
261 static u64 (*bpf_get_smp_processor_id)(void) =
262   (void *) BPF_FUNC_get_smp_processor_id;
263 static u64 (*bpf_get_current_pid_tgid)(void) =
264   (void *) BPF_FUNC_get_current_pid_tgid;
265 static u64 (*bpf_get_current_uid_gid)(void) =
266   (void *) BPF_FUNC_get_current_uid_gid;
267 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
268   (void *) BPF_FUNC_get_current_comm;
269 static u64 (*bpf_get_cgroup_classid)(void *ctx) =
270   (void *) BPF_FUNC_get_cgroup_classid;
271 static u64 (*bpf_skb_vlan_push)(void *ctx, u16 proto, u16 vlan_tci) =
272   (void *) BPF_FUNC_skb_vlan_push;
273 static u64 (*bpf_skb_vlan_pop)(void *ctx) =
274   (void *) BPF_FUNC_skb_vlan_pop;
275 static int (*bpf_skb_get_tunnel_key)(void *ctx, void *to, u32 size, u64 flags) =
276   (void *) BPF_FUNC_skb_get_tunnel_key;
277 static int (*bpf_skb_set_tunnel_key)(void *ctx, void *from, u32 size, u64 flags) =
278   (void *) BPF_FUNC_skb_set_tunnel_key;
279 static u64 (*bpf_perf_event_read)(void *map, u64 flags) =
280   (void *) BPF_FUNC_perf_event_read;
281 static int (*bpf_redirect)(int ifindex, u32 flags) =
282   (void *) BPF_FUNC_redirect;
283 static u32 (*bpf_get_route_realm)(void *ctx) =
284   (void *) BPF_FUNC_get_route_realm;
285 static int (*bpf_perf_event_output)(void *ctx, void *map, u64 index, void *data, u32 size) =
286   (void *) BPF_FUNC_perf_event_output;
287 static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) =
288   (void *) BPF_FUNC_skb_load_bytes;
289 static int (*bpf_perf_event_read_value)(void *map, u64 flags, void *buf, u32 buf_size) =
290   (void *) BPF_FUNC_perf_event_read_value;
291 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, u32 buf_size) =
292   (void *) BPF_FUNC_perf_prog_read_value;
293 static int (*bpf_current_task_under_cgroup)(void *map, int index) =
294   (void *) BPF_FUNC_current_task_under_cgroup;
295 static u32 (*bpf_get_socket_cookie)(void *ctx) =
296   (void *) BPF_FUNC_get_socket_cookie;
297 static u64 (*bpf_get_socket_uid)(void *ctx) =
298   (void *) BPF_FUNC_get_socket_uid;
299 static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
300   (void *) BPF_FUNC_getsockopt;
301 static int (*bpf_redirect_map)(void *map, int key, int flags) =
302   (void *) BPF_FUNC_redirect_map;
303 static int (*bpf_set_hash)(void *ctx, u32 hash) =
304   (void *) BPF_FUNC_set_hash;
305 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
306   (void *) BPF_FUNC_setsockopt;
307 static int (*bpf_skb_adjust_room)(void *ctx, int len_diff, u32 mode, u64 flags) =
308   (void *) BPF_FUNC_skb_adjust_room;
309 static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
310   (void *) BPF_FUNC_skb_under_cgroup;
311 static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
312   (void *) BPF_FUNC_sk_redirect_map;
313 static int (*bpf_sock_map_update)(void *map, void *key, void *value, unsigned long long flags) =
314   (void *) BPF_FUNC_sock_map_update;
315 static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
316   (void *) BPF_FUNC_xdp_adjust_meta;
317 
318 /* bcc_get_stackid will return a negative value in the case of an error
319  *
320  * BPF_STACK_TRACE(_name, _size) will allocate space for _size stack traces.
321  *  -ENOMEM will be returned when this limit is reached.
322  *
323  * -EFAULT is typically returned when requesting user-space stack straces (using
324  * BPF_F_USER_STACK) for kernel threads. However, a valid stackid may be
325  * returned in some cases; consider a tracepoint or kprobe executing in the
326  * kernel context. Given this you can typically ignore -EFAULT errors when
327  * retrieving user-space stack traces.
328  */
329 static int (*bcc_get_stackid_)(void *ctx, void *map, u64 flags) =
330   (void *) BPF_FUNC_get_stackid;
331 static inline __attribute__((always_inline))
332 int bcc_get_stackid(uintptr_t map, void *ctx, u64 flags) {
333   return bcc_get_stackid_(ctx, (void *)map, flags);
334 }
335 
336 static int (*bpf_csum_diff)(void *from, u64 from_size, void *to, u64 to_size, u64 seed) =
337   (void *) BPF_FUNC_csum_diff;
338 static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, u32 size) =
339   (void *) BPF_FUNC_skb_get_tunnel_opt;
340 static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, u32 size) =
341   (void *) BPF_FUNC_skb_set_tunnel_opt;
342 static int (*bpf_skb_change_proto)(void *ctx, u16 proto, u64 flags) =
343   (void *) BPF_FUNC_skb_change_proto;
344 static int (*bpf_skb_change_type)(void *ctx, u32 type) =
345   (void *) BPF_FUNC_skb_change_type;
346 static u32 (*bpf_get_hash_recalc)(void *ctx) =
347   (void *) BPF_FUNC_get_hash_recalc;
348 static u64 (*bpf_get_current_task)(void) =
349   (void *) BPF_FUNC_get_current_task;
350 static int (*bpf_probe_write_user)(void *dst, void *src, u32 size) =
351   (void *) BPF_FUNC_probe_write_user;
352 static int (*bpf_skb_change_tail)(void *ctx, u32 new_len, u64 flags) =
353   (void *) BPF_FUNC_skb_change_tail;
354 static int (*bpf_skb_pull_data)(void *ctx, u32 len) =
355   (void *) BPF_FUNC_skb_pull_data;
356 static int (*bpf_csum_update)(void *ctx, u16 csum) =
357   (void *) BPF_FUNC_csum_update;
358 static int (*bpf_set_hash_invalid)(void *ctx) =
359   (void *) BPF_FUNC_set_hash_invalid;
360 static int (*bpf_get_numa_node_id)(void) =
361   (void *) BPF_FUNC_get_numa_node_id;
362 static int (*bpf_skb_change_head)(void *ctx, u32 len, u64 flags) =
363   (void *) BPF_FUNC_skb_change_head;
364 static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
365   (void *) BPF_FUNC_xdp_adjust_head;
366 static int (*bpf_override_return)(void *pt_regs, unsigned long rc) =
367   (void *) BPF_FUNC_override_return;
368 static int (*bpf_sock_ops_cb_flags_set)(void *skops, int flags) =
369   (void *) BPF_FUNC_sock_ops_cb_flags_set;
370 static int (*bpf_msg_redirect_map)(void *msg, void *map, u32 key, u64 flags) =
371   (void *) BPF_FUNC_msg_redirect_map;
372 static int (*bpf_msg_apply_bytes)(void *msg, u32 bytes) =
373   (void *) BPF_FUNC_msg_apply_bytes;
374 static int (*bpf_msg_cork_bytes)(void *msg, u32 bytes) =
375   (void *) BPF_FUNC_msg_cork_bytes;
376 static int (*bpf_msg_pull_data)(void *msg, u32 start, u32 end, u64 flags) =
377   (void *) BPF_FUNC_msg_pull_data;
378 static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
379   (void *) BPF_FUNC_bind;
380 static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
381   (void *) BPF_FUNC_xdp_adjust_tail;
382 static int (*bpf_skb_get_xfrm_state)(void *ctx, u32 index, void *xfrm_state, u32 size, u64 flags) =
383   (void *) BPF_FUNC_skb_get_xfrm_state;
384 static int (*bpf_get_stack)(void *ctx, void *buf, u32 size, u64 flags) =
385   (void *) BPF_FUNC_get_stack;
386 static int (*bpf_skb_load_bytes_relative)(void *ctx, u32 offset, void *to, u32 len, u32 start_header) =
387   (void *) BPF_FUNC_skb_load_bytes_relative;
388 static int (*bpf_fib_lookup)(void *ctx, void *params, int plen, u32 flags) =
389   (void *) BPF_FUNC_fib_lookup;
390 static int (*bpf_sock_hash_update)(void *ctx, void *map, void *key, u64 flags) =
391   (void *) BPF_FUNC_sock_hash_update;
392 static int (*bpf_msg_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
393   (void *) BPF_FUNC_msg_redirect_hash;
394 static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
395   (void *) BPF_FUNC_sk_redirect_hash;
396 static int (*bpf_lwt_push_encap)(void *skb, u32 type, void *hdr, u32 len) =
397   (void *) BPF_FUNC_lwt_push_encap;
398 static int (*bpf_lwt_seg6_store_bytes)(void *ctx, u32 offset, const void *from, u32 len) =
399   (void *) BPF_FUNC_lwt_seg6_store_bytes;
400 static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, u32 offset, s32 delta) =
401   (void *) BPF_FUNC_lwt_seg6_adjust_srh;
402 static int (*bpf_lwt_seg6_action)(void *ctx, u32 action, void *param, u32 param_len) =
403   (void *) BPF_FUNC_lwt_seg6_action;
404 static int (*bpf_rc_keydown)(void *ctx, u32 protocol, u64 scancode, u32 toggle) =
405   (void *) BPF_FUNC_rc_keydown;
406 static int (*bpf_rc_repeat)(void *ctx) =
407   (void *) BPF_FUNC_rc_repeat;
408 static u64 (*bpf_skb_cgroup_id)(void *skb) =
409   (void *) BPF_FUNC_skb_cgroup_id;
410 static u64 (*bpf_get_current_cgroup_id)(void) =
411   (void *) BPF_FUNC_get_current_cgroup_id;
412 static u64 (*bpf_skb_ancestor_cgroup_id)(void *skb, int ancestor_level) =
413   (void *) BPF_FUNC_skb_ancestor_cgroup_id;
414 static void * (*bpf_get_local_storage)(void *map, u64 flags) =
415   (void *) BPF_FUNC_get_local_storage;
416 static int (*bpf_sk_select_reuseport)(void *reuse, void *map, void *key, u64 flags) =
417   (void *) BPF_FUNC_sk_select_reuseport;
418 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
419                                              struct bpf_sock_tuple *tuple,
420                                              int size, unsigned int netns_id,
421                                              unsigned long long flags) =
422   (void *) BPF_FUNC_sk_lookup_tcp;
423 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
424                                              struct bpf_sock_tuple *tuple,
425                                              int size, unsigned int netns_id,
426                                              unsigned long long flags) =
427   (void *) BPF_FUNC_sk_lookup_udp;
428 static int (*bpf_sk_release)(struct bpf_sock *sk) =
429   (void *) BPF_FUNC_sk_release;
430 static int (*bpf_map_push_elem)(void *map, const void *value, u64 flags) =
431   (void *) BPF_FUNC_map_push_elem;
432 static int (*bpf_map_pop_elem)(void *map, void *value) =
433   (void *) BPF_FUNC_map_pop_elem;
434 static int (*bpf_map_peek_elem)(void *map, void *value) =
435   (void *) BPF_FUNC_map_peek_elem;
436 static int (*bpf_msg_push_data)(void *skb, u32 start, u32 len, u64 flags) =
437   (void *) BPF_FUNC_msg_push_data;
438 
439 /* llvm builtin functions that eBPF C program may use to
440  * emit BPF_LD_ABS and BPF_LD_IND instructions
441  */
442 struct sk_buff;
443 unsigned long long load_byte(void *skb,
444   unsigned long long off) asm("llvm.bpf.load.byte");
445 unsigned long long load_half(void *skb,
446   unsigned long long off) asm("llvm.bpf.load.half");
447 unsigned long long load_word(void *skb,
448   unsigned long long off) asm("llvm.bpf.load.word");
449 
450 /* a helper structure used by eBPF C program
451  * to describe map attributes to elf_bpf loader
452  */
453 struct bpf_map_def {
454   unsigned int type;
455   unsigned int key_size;
456   unsigned int value_size;
457   unsigned int max_entries;
458 };
459 
460 static int (*bpf_skb_store_bytes)(void *ctx, unsigned long long off, void *from,
461                                   unsigned long long len, unsigned long long flags) =
462   (void *) BPF_FUNC_skb_store_bytes;
463 static int (*bpf_l3_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
464                                   unsigned long long to, unsigned long long flags) =
465   (void *) BPF_FUNC_l3_csum_replace;
466 static int (*bpf_l4_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
467                                   unsigned long long to, unsigned long long flags) =
468   (void *) BPF_FUNC_l4_csum_replace;
469 
470 static inline __attribute__((always_inline))
471 u16 bpf_ntohs(u16 val) {
472   /* will be recognized by gcc into rotate insn and eventually rolw 8 */
473   return (val << 8) | (val >> 8);
474 }
475 
476 static inline __attribute__((always_inline))
477 u32 bpf_ntohl(u32 val) {
478   /* gcc will use bswapsi2 insn */
479   return __builtin_bswap32(val);
480 }
481 
482 static inline __attribute__((always_inline))
483 u64 bpf_ntohll(u64 val) {
484   /* gcc will use bswapdi2 insn */
485   return __builtin_bswap64(val);
486 }
487 
488 static inline __attribute__((always_inline))
489 unsigned __int128 bpf_ntoh128(unsigned __int128 val) {
490   return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64));
491 }
492 
493 static inline __attribute__((always_inline))
494 u16 bpf_htons(u16 val) {
495   return bpf_ntohs(val);
496 }
497 
498 static inline __attribute__((always_inline))
499 u32 bpf_htonl(u32 val) {
500   return bpf_ntohl(val);
501 }
502 
503 static inline __attribute__((always_inline))
504 u64 bpf_htonll(u64 val) {
505   return bpf_ntohll(val);
506 }
507 
508 static inline __attribute__((always_inline))
509 unsigned __int128 bpf_hton128(unsigned __int128 val) {
510   return bpf_ntoh128(val);
511 }
512 
513 static inline __attribute__((always_inline))
514 u64 load_dword(void *skb, u64 off) {
515   return ((u64)load_word(skb, off) << 32) | load_word(skb, off + 4);
516 }
517 
518 void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte");
519 void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half");
520 void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word");
521 u64 bpf_pseudo_fd(u64, u64) asm("llvm.bpf.pseudo");
522 
523 static inline void __attribute__((always_inline))
524 bpf_store_dword(void *skb, u64 off, u64 val) {
525   bpf_store_word(skb, off, (u32)val);
526   bpf_store_word(skb, off + 4, val >> 32);
527 }
528 
529 #define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL))
530 #define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1))
531 
532 static inline __attribute__((always_inline))
533 unsigned int bpf_log2(unsigned int v)
534 {
535   unsigned int r;
536   unsigned int shift;
537 
538   r = (v > 0xFFFF) << 4; v >>= r;
539   shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
540   shift = (v > 0xF) << 2; v >>= shift; r |= shift;
541   shift = (v > 0x3) << 1; v >>= shift; r |= shift;
542   r |= (v >> 1);
543   return r;
544 }
545 
546 static inline __attribute__((always_inline))
547 unsigned int bpf_log2l(unsigned long v)
548 {
549   unsigned int hi = v >> 32;
550   if (hi)
551     return bpf_log2(hi) + 32 + 1;
552   else
553     return bpf_log2(v) + 1;
554 }
555 
556 struct bpf_context;
557 
558 static inline __attribute__((always_inline))
559 SEC("helpers")
560 u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) {
561   if (bofs == 0 && bsz == 8) {
562     return load_byte(pkt, off);
563   } else if (bofs + bsz <= 8) {
564     return load_byte(pkt, off) >> (8 - (bofs + bsz))  &  MASK(bsz);
565   } else if (bofs == 0 && bsz == 16) {
566     return load_half(pkt, off);
567   } else if (bofs + bsz <= 16) {
568     return load_half(pkt, off) >> (16 - (bofs + bsz))  &  MASK(bsz);
569   } else if (bofs == 0 && bsz == 32) {
570     return load_word(pkt, off);
571   } else if (bofs + bsz <= 32) {
572     return load_word(pkt, off) >> (32 - (bofs + bsz))  &  MASK(bsz);
573   } else if (bofs == 0 && bsz == 64) {
574     return load_dword(pkt, off);
575   } else if (bofs + bsz <= 64) {
576     return load_dword(pkt, off) >> (64 - (bofs + bsz))  &  MASK(bsz);
577   }
578   return 0;
579 }
580 
581 static inline __attribute__((always_inline))
582 SEC("helpers")
583 void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) {
584   // The load_xxx function does a bswap before returning the short/word/dword,
585   // so the value in register will always be host endian. However, the bytes
586   // written back need to be in network order.
587   if (bofs == 0 && bsz == 8) {
588     bpf_skb_store_bytes(pkt, off, &val, 1, 0);
589   } else if (bofs + bsz <= 8) {
590     u8 v = load_byte(pkt, off);
591     v &= ~(MASK(bsz) << (8 - (bofs + bsz)));
592     v |= ((val & MASK(bsz)) << (8 - (bofs + bsz)));
593     bpf_skb_store_bytes(pkt, off, &v, 1, 0);
594   } else if (bofs == 0 && bsz == 16) {
595     u16 v = bpf_htons(val);
596     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
597   } else if (bofs + bsz <= 16) {
598     u16 v = load_half(pkt, off);
599     v &= ~(MASK(bsz) << (16 - (bofs + bsz)));
600     v |= ((val & MASK(bsz)) << (16 - (bofs + bsz)));
601     v = bpf_htons(v);
602     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
603   } else if (bofs == 0 && bsz == 32) {
604     u32 v = bpf_htonl(val);
605     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
606   } else if (bofs + bsz <= 32) {
607     u32 v = load_word(pkt, off);
608     v &= ~(MASK(bsz) << (32 - (bofs + bsz)));
609     v |= ((val & MASK(bsz)) << (32 - (bofs + bsz)));
610     v = bpf_htonl(v);
611     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
612   } else if (bofs == 0 && bsz == 64) {
613     u64 v = bpf_htonll(val);
614     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
615   } else if (bofs + bsz <= 64) {
616     u64 v = load_dword(pkt, off);
617     v &= ~(MASK(bsz) << (64 - (bofs + bsz)));
618     v |= ((val & MASK(bsz)) << (64 - (bofs + bsz)));
619     v = bpf_htonll(v);
620     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
621   }
622 }
623 
624 static inline __attribute__((always_inline))
625 SEC("helpers")
626 void * bpf_map_lookup_elem_(uintptr_t map, void *key) {
627   return bpf_map_lookup_elem((void *)map, key);
628 }
629 
630 static inline __attribute__((always_inline))
631 SEC("helpers")
632 int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) {
633   return bpf_map_update_elem((void *)map, key, value, flags);
634 }
635 
636 static inline __attribute__((always_inline))
637 SEC("helpers")
638 int bpf_map_delete_elem_(uintptr_t map, void *key) {
639   return bpf_map_delete_elem((void *)map, key);
640 }
641 
642 static inline __attribute__((always_inline))
643 SEC("helpers")
644 int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
645   switch (flags & 0xf) {
646     case 2:
647       return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
648     case 4:
649       return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
650     case 8:
651       return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
652     default:
653       {}
654   }
655   return bpf_l3_csum_replace(ctx, off, from, to, flags);
656 }
657 
658 static inline __attribute__((always_inline))
659 SEC("helpers")
660 int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
661   switch (flags & 0xf) {
662     case 2:
663       return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
664     case 4:
665       return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
666     case 8:
667       return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
668     default:
669       {}
670   }
671   return bpf_l4_csum_replace(ctx, off, from, to, flags);
672 }
673 
674 int incr_cksum_l3(void *off, u64 oldval, u64 newval) asm("llvm.bpf.extra");
675 int incr_cksum_l4(void *off, u64 oldval, u64 newval, u64 flags) asm("llvm.bpf.extra");
676 int bpf_num_cpus() asm("llvm.bpf.extra");
677 
678 struct pt_regs;
679 int bpf_usdt_readarg(int argc, struct pt_regs *ctx, void *arg) asm("llvm.bpf.extra");
680 int bpf_usdt_readarg_p(int argc, struct pt_regs *ctx, void *buf, u64 len) asm("llvm.bpf.extra");
681 
682 /* Scan the ARCH passed in from ARCH env variable (see kbuild_helper.cc) */
683 #if defined(__TARGET_ARCH_x86)
684 #define bpf_target_x86
685 #define bpf_target_defined
686 #elif defined(__TARGET_ARCH_s930x)
687 #define bpf_target_s930x
688 #define bpf_target_defined
689 #elif defined(__TARGET_ARCH_arm64)
690 #define bpf_target_arm64
691 #define bpf_target_defined
692 #elif defined(__TARGET_ARCH_powerpc)
693 #define bpf_target_powerpc
694 #define bpf_target_defined
695 #else
696 #undef bpf_target_defined
697 #endif
698 
699 /* Fall back to what the compiler says */
700 #ifndef bpf_target_defined
701 #if defined(__x86_64__)
702 #define bpf_target_x86
703 #elif defined(__s390x__)
704 #define bpf_target_s930x
705 #elif defined(__aarch64__)
706 #define bpf_target_arm64
707 #elif defined(__powerpc__)
708 #define bpf_target_powerpc
709 #endif
710 #endif
711 
712 #if defined(bpf_target_powerpc)
713 #define PT_REGS_PARM1(ctx)	((ctx)->gpr[3])
714 #define PT_REGS_PARM2(ctx)	((ctx)->gpr[4])
715 #define PT_REGS_PARM3(ctx)	((ctx)->gpr[5])
716 #define PT_REGS_PARM4(ctx)	((ctx)->gpr[6])
717 #define PT_REGS_PARM5(ctx)	((ctx)->gpr[7])
718 #define PT_REGS_PARM6(ctx)	((ctx)->gpr[8])
719 #define PT_REGS_RC(ctx)		((ctx)->gpr[3])
720 #define PT_REGS_IP(ctx)		((ctx)->nip)
721 #define PT_REGS_SP(ctx)		((ctx)->gpr[1])
722 #elif defined(bpf_target_s930x)
723 #define PT_REGS_PARM1(x) ((x)->gprs[2])
724 #define PT_REGS_PARM2(x) ((x)->gprs[3])
725 #define PT_REGS_PARM3(x) ((x)->gprs[4])
726 #define PT_REGS_PARM4(x) ((x)->gprs[5])
727 #define PT_REGS_PARM5(x) ((x)->gprs[6])
728 #define PT_REGS_RET(x) ((x)->gprs[14])
729 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
730 #define PT_REGS_RC(x) ((x)->gprs[2])
731 #define PT_REGS_SP(x) ((x)->gprs[15])
732 #define PT_REGS_IP(x) ((x)->psw.addr)
733 #elif defined(bpf_target_x86)
734 #define PT_REGS_PARM1(ctx)	((ctx)->di)
735 #define PT_REGS_PARM2(ctx)	((ctx)->si)
736 #define PT_REGS_PARM3(ctx)	((ctx)->dx)
737 #define PT_REGS_PARM4(ctx)	((ctx)->cx)
738 #define PT_REGS_PARM5(ctx)	((ctx)->r8)
739 #define PT_REGS_PARM6(ctx)	((ctx)->r9)
740 #define PT_REGS_FP(ctx)         ((ctx)->bp) /* Works only with CONFIG_FRAME_POINTER */
741 #define PT_REGS_RC(ctx)		((ctx)->ax)
742 #define PT_REGS_IP(ctx)		((ctx)->ip)
743 #define PT_REGS_SP(ctx)		((ctx)->sp)
744 #elif defined(bpf_target_arm64)
745 #define PT_REGS_PARM1(x)	((x)->regs[0])
746 #define PT_REGS_PARM2(x)	((x)->regs[1])
747 #define PT_REGS_PARM3(x)	((x)->regs[2])
748 #define PT_REGS_PARM4(x)	((x)->regs[3])
749 #define PT_REGS_PARM5(x)	((x)->regs[4])
750 #define PT_REGS_PARM6(x)	((x)->regs[5])
751 #define PT_REGS_RET(x)		((x)->regs[30])
752 #define PT_REGS_FP(x)		((x)->regs[29]) /*  Works only with CONFIG_FRAME_POINTER */
753 #define PT_REGS_RC(x)		((x)->regs[0])
754 #define PT_REGS_SP(x)		((x)->sp)
755 #define PT_REGS_IP(x)		((x)->pc)
756 #else
757 #error "bcc does not support this platform yet"
758 #endif
759 
760 #define lock_xadd(ptr, val) ((void)__sync_fetch_and_add(ptr, val))
761 
762 #define TRACEPOINT_PROBE(category, event) \
763 int tracepoint__##category##__##event(struct tracepoint__##category##__##event *args)
764 
765 #define RAW_TRACEPOINT_PROBE(event) \
766 int raw_tracepoint__##event(struct bpf_raw_tracepoint_args *ctx)
767 
768 #define TP_DATA_LOC_READ_CONST(dst, field, length)                        \
769         do {                                                              \
770             unsigned short __offset = args->data_loc_##field & 0xFFFF;    \
771             bpf_probe_read((void *)dst, length, (char *)args + __offset); \
772         } while (0);
773 
774 #define TP_DATA_LOC_READ(dst, field)                                        \
775         do {                                                                \
776             unsigned short __offset = args->data_loc_##field & 0xFFFF;      \
777             unsigned short __length = args->data_loc_##field >> 16;         \
778             bpf_probe_read((void *)dst, __length, (char *)args + __offset); \
779         } while (0);
780 
781 #endif
782 )********"
783