1 R"********( 2 /* 3 * Copyright (c) 2015 PLUMgrid, Inc. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17 #ifndef __BPF_HELPERS_H 18 #define __BPF_HELPERS_H 19 20 /* In Linux 5.4 asm_inline was introduced, but it's not supported by clang. 21 * Redefine it to just asm to enable successful compilation. 22 */ 23 #ifdef asm_inline 24 #undef asm_inline 25 #define asm_inline asm 26 #endif 27 28 /* Before bpf_helpers.h is included, uapi bpf.h has been 29 * included, which references linux/types.h. This may bring 30 * in asm_volatile_goto definition if permitted based on 31 * compiler setup and kernel configs. 32 * 33 * clang does not support "asm volatile goto" yet. 34 * So redefine asm_volatile_goto to some invalid asm code. 35 * If asm_volatile_goto is actually used by the bpf program, 36 * a compilation error will appear. 37 */ 38 #ifdef asm_volatile_goto 39 #undef asm_volatile_goto 40 #endif 41 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") 42 43 /* In 4.18 and later, when CONFIG_FUNCTION_TRACER is defined, kernel Makefile adds 44 * -DCC_USING_FENTRY. Let do the same for bpf programs. 45 */ 46 #if defined(CONFIG_FUNCTION_TRACER) 47 #define CC_USING_FENTRY 48 #endif 49 50 #include <uapi/linux/bpf.h> 51 #include <uapi/linux/if_packet.h> 52 #include <linux/version.h> 53 #include <linux/log2.h> 54 #include <asm/page.h> 55 56 #ifndef CONFIG_BPF_SYSCALL 57 #error "CONFIG_BPF_SYSCALL is undefined, please check your .config or ask your Linux distro to enable this feature" 58 #endif 59 60 #ifdef PERF_MAX_STACK_DEPTH 61 #define BPF_MAX_STACK_DEPTH PERF_MAX_STACK_DEPTH 62 #else 63 #define BPF_MAX_STACK_DEPTH 127 64 #endif 65 66 /* helper macro to place programs, maps, license in 67 * different sections in elf_bpf file. Section names 68 * are interpreted by elf_bpf loader 69 */ 70 #define BCC_SEC(NAME) __attribute__((section(NAME), used)) 71 72 #ifdef B_WORKAROUND 73 #define BCC_SEC_HELPERS BCC_SEC("helpers") 74 #else 75 #define BCC_SEC_HELPERS 76 #endif 77 78 // Associate map with its key/value types 79 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ 80 struct ____btf_map_##name { \ 81 type_key key; \ 82 type_val value; \ 83 }; \ 84 struct ____btf_map_##name \ 85 __attribute__ ((section(".maps." #name), used)) \ 86 ____btf_map_##name = { } 87 88 // Associate map with its key/value types for QUEUE/STACK map types 89 #define BPF_ANNOTATE_KV_PAIR_QUEUESTACK(name, type_val) \ 90 struct ____btf_map_##name { \ 91 type_val value; \ 92 }; \ 93 struct ____btf_map_##name \ 94 __attribute__ ((section(".maps." #name), used)) \ 95 ____btf_map_##name = { } 96 97 // Changes to the macro require changes in BFrontendAction classes 98 #define BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, _flags) \ 99 struct _name##_table_t { \ 100 _key_type key; \ 101 _leaf_type leaf; \ 102 _leaf_type * (*lookup) (_key_type *); \ 103 _leaf_type * (*lookup_or_init) (_key_type *, _leaf_type *); \ 104 _leaf_type * (*lookup_or_try_init) (_key_type *, _leaf_type *); \ 105 int (*update) (_key_type *, _leaf_type *); \ 106 int (*insert) (_key_type *, _leaf_type *); \ 107 int (*delete) (_key_type *); \ 108 void (*call) (void *, int index); \ 109 void (*increment) (_key_type, ...); \ 110 void (*atomic_increment) (_key_type, ...); \ 111 int (*get_stackid) (void *, u64); \ 112 void * (*sk_storage_get) (void *, void *, int); \ 113 int (*sk_storage_delete) (void *); \ 114 void * (*inode_storage_get) (void *, void *, int); \ 115 int (*inode_storage_delete) (void *); \ 116 void * (*task_storage_get) (void *, void *, int); \ 117 int (*task_storage_delete) (void *); \ 118 u32 max_entries; \ 119 int flags; \ 120 }; \ 121 __attribute__((section("maps/" _table_type))) \ 122 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }; \ 123 BPF_ANNOTATE_KV_PAIR(_name, _key_type, _leaf_type) 124 125 126 // Changes to the macro require changes in BFrontendAction classes 127 #define BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags) \ 128 struct _name##_table_t { \ 129 _leaf_type leaf; \ 130 int * (*peek) (_leaf_type *); \ 131 int * (*pop) (_leaf_type *); \ 132 int * (*push) (_leaf_type *, u64); \ 133 u32 max_entries; \ 134 int flags; \ 135 }; \ 136 __attribute__((section("maps/" _table_type))) \ 137 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }; \ 138 BPF_ANNOTATE_KV_PAIR_QUEUESTACK(_name, _leaf_type) 139 140 // define queue with 3 parameters (_type=queue/stack automatically) and default flags to 0 141 #define BPF_QUEUE_STACK3(_type, _name, _leaf_type, _max_entries) \ 142 BPF_QUEUESTACK(_type, _name, _leaf_type, _max_entries, 0) 143 144 // define queue with 4 parameters (_type=queue/stack automatically) 145 #define BPF_QUEUE_STACK4(_type, _name, _leaf_type, _max_entries, _flags) \ 146 BPF_QUEUESTACK(_type, _name, _leaf_type, _max_entries, _flags) 147 148 // helper for default-variable macro function 149 #define BPF_QUEUE_STACKX(_1, _2, _3, _4, NAME, ...) NAME 150 151 #define BPF_QUEUE(...) \ 152 BPF_QUEUE_STACKX(__VA_ARGS__, BPF_QUEUE_STACK4, BPF_QUEUE_STACK3)("queue", __VA_ARGS__) 153 154 #define BPF_STACK(...) \ 155 BPF_QUEUE_STACKX(__VA_ARGS__, BPF_QUEUE_STACK4, BPF_QUEUE_STACK3)("stack", __VA_ARGS__) 156 157 #define BPF_QUEUESTACK_PINNED(_table_type, _name, _leaf_type, _max_entries, _flags, _pinned) \ 158 BPF_QUEUESTACK(_table_type ":" _pinned, _name, _leaf_type, _max_entries, _flags) 159 160 #define BPF_QUEUESTACK_PUBLIC(_table_type, _name, _leaf_type, _max_entries, _flags) \ 161 BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags); \ 162 __attribute__((section("maps/export"))) \ 163 struct _name##_table_t __##_name 164 165 #define BPF_QUEUESTACK_SHARED(_table_type, _name, _leaf_type, _max_entries, _flags) \ 166 BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags); \ 167 __attribute__((section("maps/shared"))) \ 168 struct _name##_table_t __##_name 169 170 #define BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries) \ 171 BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, 0) 172 173 #define BPF_TABLE_PINNED7(_table_type, _key_type, _leaf_type, _name, _max_entries, _pinned, _flags) \ 174 BPF_F_TABLE(_table_type ":" _pinned, _key_type, _leaf_type, _name, _max_entries, _flags) 175 176 #define BPF_TABLE_PINNED6(_table_type, _key_type, _leaf_type, _name, _max_entries, _pinned) \ 177 BPF_F_TABLE(_table_type ":" _pinned, _key_type, _leaf_type, _name, _max_entries, 0) 178 179 #define BPF_TABLE_PINNEDX(_1, _2, _3, _4, _5, _6, _7, NAME, ...) NAME 180 181 // Define a pinned table with optional flags argument 182 #define BPF_TABLE_PINNED(...) \ 183 BPF_TABLE_PINNEDX(__VA_ARGS__, BPF_TABLE_PINNED7, BPF_TABLE_PINNED6)(__VA_ARGS__) 184 185 // define a table same as above but allow it to be referenced by other modules 186 #define BPF_TABLE_PUBLIC(_table_type, _key_type, _leaf_type, _name, _max_entries) \ 187 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \ 188 __attribute__((section("maps/export"))) \ 189 struct _name##_table_t __##_name 190 191 // define a table that is shared across the programs in the same namespace 192 #define BPF_TABLE_SHARED(_table_type, _key_type, _leaf_type, _name, _max_entries) \ 193 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \ 194 __attribute__((section("maps/shared"))) \ 195 struct _name##_table_t __##_name 196 197 // Identifier for current CPU used in perf_submit and perf_read 198 // Prefer BPF_F_CURRENT_CPU flag, falls back to call helper for older kernel 199 // Can be overridden from BCC 200 #ifndef CUR_CPU_IDENTIFIER 201 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) 202 #define CUR_CPU_IDENTIFIER BPF_F_CURRENT_CPU 203 #else 204 #define CUR_CPU_IDENTIFIER bpf_get_smp_processor_id() 205 #endif 206 #endif 207 208 // Table for pushing custom events to userspace via perf ring buffer 209 #define BPF_PERF_OUTPUT(_name) \ 210 struct _name##_table_t { \ 211 int key; \ 212 u32 leaf; \ 213 /* map.perf_submit(ctx, data, data_size) */ \ 214 int (*perf_submit) (void *, void *, u32); \ 215 int (*perf_submit_skb) (void *, u32, void *, u32); \ 216 u32 max_entries; \ 217 }; \ 218 __attribute__((section("maps/perf_output"))) \ 219 struct _name##_table_t _name = { .max_entries = 0 } 220 221 // Table for pushing custom events to userspace via ring buffer 222 #define BPF_RINGBUF_OUTPUT(_name, _num_pages) \ 223 struct _name##_table_t { \ 224 int key; \ 225 u32 leaf; \ 226 /* map.ringbuf_output(data, data_size, flags) */ \ 227 int (*ringbuf_output) (void *, u64, u64); \ 228 /* map.ringbuf_reserve(data_size) */ \ 229 void* (*ringbuf_reserve) (u64); \ 230 /* map.ringbuf_discard(data, flags) */ \ 231 void (*ringbuf_discard) (void *, u64); \ 232 /* map.ringbuf_submit(data, flags) */ \ 233 void (*ringbuf_submit) (void *, u64); \ 234 u32 max_entries; \ 235 }; \ 236 __attribute__((section("maps/ringbuf"))) \ 237 struct _name##_table_t _name = { .max_entries = ((_num_pages) * PAGE_SIZE) } 238 239 // Table for reading hw perf cpu counters 240 #define BPF_PERF_ARRAY(_name, _max_entries) \ 241 struct _name##_table_t { \ 242 int key; \ 243 u32 leaf; \ 244 /* counter = map.perf_read(index) */ \ 245 u64 (*perf_read) (int); \ 246 int (*perf_counter_value) (int, void *, u32); \ 247 u32 max_entries; \ 248 }; \ 249 __attribute__((section("maps/perf_array"))) \ 250 struct _name##_table_t _name = { .max_entries = (_max_entries) } 251 252 // Table for cgroup file descriptors 253 #define BPF_CGROUP_ARRAY(_name, _max_entries) \ 254 struct _name##_table_t { \ 255 int key; \ 256 u32 leaf; \ 257 int (*check_current_task) (int); \ 258 u32 max_entries; \ 259 }; \ 260 __attribute__((section("maps/cgroup_array"))) \ 261 struct _name##_table_t _name = { .max_entries = (_max_entries) } 262 263 #define BPF_HASH1(_name) \ 264 BPF_TABLE("hash", u64, u64, _name, 10240) 265 #define BPF_HASH2(_name, _key_type) \ 266 BPF_TABLE("hash", _key_type, u64, _name, 10240) 267 #define BPF_HASH3(_name, _key_type, _leaf_type) \ 268 BPF_TABLE("hash", _key_type, _leaf_type, _name, 10240) 269 #define BPF_HASH4(_name, _key_type, _leaf_type, _size) \ 270 BPF_TABLE("hash", _key_type, _leaf_type, _name, _size) 271 272 // helper for default-variable macro function 273 #define BPF_HASHX(_1, _2, _3, _4, NAME, ...) NAME 274 275 // Define a hash function, some arguments optional 276 // BPF_HASH(name, key_type=u64, leaf_type=u64, size=10240) 277 #define BPF_HASH(...) \ 278 BPF_HASHX(__VA_ARGS__, BPF_HASH4, BPF_HASH3, BPF_HASH2, BPF_HASH1)(__VA_ARGS__) 279 280 #define BPF_PERCPU_HASH1(_name) \ 281 BPF_TABLE("percpu_hash", u64, u64, _name, 10240) 282 #define BPF_PERCPU_HASH2(_name, _key_type) \ 283 BPF_TABLE("percpu_hash", _key_type, u64, _name, 10240) 284 #define BPF_PERCPU_HASH3(_name, _key_type, _leaf_type) \ 285 BPF_TABLE("percpu_hash", _key_type, _leaf_type, _name, 10240) 286 #define BPF_PERCPU_HASH4(_name, _key_type, _leaf_type, _size) \ 287 BPF_TABLE("percpu_hash", _key_type, _leaf_type, _name, _size) 288 289 // helper for default-variable macro function 290 #define BPF_PERCPU_HASHX(_1, _2, _3, _4, NAME, ...) NAME 291 292 // Define a hash function, some arguments optional 293 // BPF_PERCPU_HASH(name, key_type=u64, leaf_type=u64, size=10240) 294 #define BPF_PERCPU_HASH(...) \ 295 BPF_PERCPU_HASHX( \ 296 __VA_ARGS__, BPF_PERCPU_HASH4, BPF_PERCPU_HASH3, BPF_PERCPU_HASH2, BPF_PERCPU_HASH1) \ 297 (__VA_ARGS__) 298 299 #define BPF_ARRAY1(_name) \ 300 BPF_TABLE("array", int, u64, _name, 10240) 301 #define BPF_ARRAY2(_name, _leaf_type) \ 302 BPF_TABLE("array", int, _leaf_type, _name, 10240) 303 #define BPF_ARRAY3(_name, _leaf_type, _size) \ 304 BPF_TABLE("array", int, _leaf_type, _name, _size) 305 306 // helper for default-variable macro function 307 #define BPF_ARRAYX(_1, _2, _3, NAME, ...) NAME 308 309 // Define an array function, some arguments optional 310 // BPF_ARRAY(name, leaf_type=u64, size=10240) 311 #define BPF_ARRAY(...) \ 312 BPF_ARRAYX(__VA_ARGS__, BPF_ARRAY3, BPF_ARRAY2, BPF_ARRAY1)(__VA_ARGS__) 313 314 #define BPF_PERCPU_ARRAY1(_name) \ 315 BPF_TABLE("percpu_array", int, u64, _name, 10240) 316 #define BPF_PERCPU_ARRAY2(_name, _leaf_type) \ 317 BPF_TABLE("percpu_array", int, _leaf_type, _name, 10240) 318 #define BPF_PERCPU_ARRAY3(_name, _leaf_type, _size) \ 319 BPF_TABLE("percpu_array", int, _leaf_type, _name, _size) 320 321 // helper for default-variable macro function 322 #define BPF_PERCPU_ARRAYX(_1, _2, _3, NAME, ...) NAME 323 324 // Define an array function (per CPU), some arguments optional 325 // BPF_PERCPU_ARRAY(name, leaf_type=u64, size=10240) 326 #define BPF_PERCPU_ARRAY(...) \ 327 BPF_PERCPU_ARRAYX( \ 328 __VA_ARGS__, BPF_PERCPU_ARRAY3, BPF_PERCPU_ARRAY2, BPF_PERCPU_ARRAY1) \ 329 (__VA_ARGS__) 330 331 #define BPF_HIST1(_name) \ 332 BPF_TABLE("histogram", int, u64, _name, 64) 333 #define BPF_HIST2(_name, _key_type) \ 334 BPF_TABLE("histogram", _key_type, u64, _name, 64) 335 #define BPF_HIST3(_name, _key_type, _size) \ 336 BPF_TABLE("histogram", _key_type, u64, _name, _size) 337 #define BPF_HISTX(_1, _2, _3, NAME, ...) NAME 338 339 // Define a histogram, some arguments optional 340 // BPF_HISTOGRAM(name, key_type=int, size=64) 341 #define BPF_HISTOGRAM(...) \ 342 BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__) 343 344 #define BPF_LPM_TRIE1(_name) \ 345 BPF_F_TABLE("lpm_trie", u64, u64, _name, 10240, BPF_F_NO_PREALLOC) 346 #define BPF_LPM_TRIE2(_name, _key_type) \ 347 BPF_F_TABLE("lpm_trie", _key_type, u64, _name, 10240, BPF_F_NO_PREALLOC) 348 #define BPF_LPM_TRIE3(_name, _key_type, _leaf_type) \ 349 BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, 10240, BPF_F_NO_PREALLOC) 350 #define BPF_LPM_TRIE4(_name, _key_type, _leaf_type, _size) \ 351 BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, _size, BPF_F_NO_PREALLOC) 352 #define BPF_LPM_TRIEX(_1, _2, _3, _4, NAME, ...) NAME 353 354 // Define a LPM trie function, some arguments optional 355 // BPF_LPM_TRIE(name, key_type=u64, leaf_type=u64, size=10240) 356 #define BPF_LPM_TRIE(...) \ 357 BPF_LPM_TRIEX(__VA_ARGS__, BPF_LPM_TRIE4, BPF_LPM_TRIE3, BPF_LPM_TRIE2, BPF_LPM_TRIE1)(__VA_ARGS__) 358 359 struct bpf_stacktrace { 360 u64 ip[BPF_MAX_STACK_DEPTH]; 361 }; 362 363 struct bpf_stacktrace_buildid { 364 struct bpf_stack_build_id trace[BPF_MAX_STACK_DEPTH]; 365 }; 366 367 #define BPF_STACK_TRACE(_name, _max_entries) \ 368 BPF_TABLE("stacktrace", int, struct bpf_stacktrace, _name, roundup_pow_of_two(_max_entries)) 369 370 #define BPF_STACK_TRACE_BUILDID(_name, _max_entries) \ 371 BPF_F_TABLE("stacktrace", int, struct bpf_stacktrace_buildid, _name, roundup_pow_of_two(_max_entries), BPF_F_STACK_BUILD_ID) 372 373 #define BPF_PROG_ARRAY(_name, _max_entries) \ 374 BPF_TABLE("prog", u32, u32, _name, _max_entries) 375 376 #define BPF_XDP_REDIRECT_MAP(_table_type, _leaf_type, _name, _max_entries) \ 377 struct _name##_table_t { \ 378 u32 key; \ 379 _leaf_type leaf; \ 380 /* xdp_act = map.redirect_map(index, flag) */ \ 381 u64 (*redirect_map) (int, int); \ 382 u32 max_entries; \ 383 }; \ 384 __attribute__((section("maps/"_table_type))) \ 385 struct _name##_table_t _name = { .max_entries = (_max_entries) } 386 387 #define BPF_DEVMAP(_name, _max_entries) \ 388 BPF_XDP_REDIRECT_MAP("devmap", int, _name, _max_entries) 389 390 #define BPF_CPUMAP(_name, _max_entries) \ 391 BPF_XDP_REDIRECT_MAP("cpumap", u32, _name, _max_entries) 392 393 #define BPF_XSKMAP(_name, _max_entries) \ 394 struct _name##_table_t { \ 395 u32 key; \ 396 int leaf; \ 397 int * (*lookup) (int *); \ 398 /* xdp_act = map.redirect_map(index, flag) */ \ 399 u64 (*redirect_map) (int, int); \ 400 u32 max_entries; \ 401 }; \ 402 __attribute__((section("maps/xskmap"))) \ 403 struct _name##_table_t _name = { .max_entries = (_max_entries) } 404 405 #define BPF_ARRAY_OF_MAPS(_name, _inner_map_name, _max_entries) \ 406 BPF_TABLE("array_of_maps$" _inner_map_name, int, int, _name, _max_entries) 407 408 #define BPF_HASH_OF_MAPS2(_name, _inner_map_name) \ 409 BPF_TABLE("hash_of_maps$" _inner_map_name, int, int, _name, 10240) 410 #define BPF_HASH_OF_MAPS3(_name, _key_type, _inner_map_name) \ 411 BPF_TABLE("hash_of_maps$" _inner_map_name, _key_type, int, _name, 10240) 412 #define BPF_HASH_OF_MAPS4(_name, _key_type, _inner_map_name, _max_entries) \ 413 BPF_TABLE("hash_of_maps$" _inner_map_name, _key_type, int, _name, _max_entries) 414 415 #define BPF_HASH_OF_MAPSX(_name, _2, _3, _4, NAME, ...) NAME 416 417 #define BPF_HASH_OF_MAPS(...) \ 418 BPF_HASH_OF_MAPSX(__VA_ARGS__, BPF_HASH_OF_MAPS4, BPF_HASH_OF_MAPS3, BPF_HASH_OF_MAPS2)(__VA_ARGS__) 419 420 #define BPF_SK_STORAGE(_name, _leaf_type) \ 421 struct _name##_table_t { \ 422 int key; \ 423 _leaf_type leaf; \ 424 void * (*sk_storage_get) (void *, void *, int); \ 425 int (*sk_storage_delete) (void *); \ 426 u32 flags; \ 427 }; \ 428 __attribute__((section("maps/sk_storage"))) \ 429 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \ 430 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type) 431 432 #define BPF_INODE_STORAGE(_name, _leaf_type) \ 433 struct _name##_table_t { \ 434 int key; \ 435 _leaf_type leaf; \ 436 void * (*inode_storage_get) (void *, void *, int); \ 437 int (*inode_storage_delete) (void *); \ 438 u32 flags; \ 439 }; \ 440 __attribute__((section("maps/inode_storage"))) \ 441 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \ 442 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type) 443 444 #define BPF_TASK_STORAGE(_name, _leaf_type) \ 445 struct _name##_table_t { \ 446 int key; \ 447 _leaf_type leaf; \ 448 void * (*task_storage_get) (void *, void *, int); \ 449 int (*task_storage_delete) (void *); \ 450 u32 flags; \ 451 }; \ 452 __attribute__((section("maps/task_storage"))) \ 453 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \ 454 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type) 455 456 #define BPF_SOCKMAP_COMMON(_name, _max_entries, _kind, _helper_name) \ 457 struct _name##_table_t { \ 458 u32 key; \ 459 int leaf; \ 460 int (*update) (u32 *, int *); \ 461 int (*delete) (u32 *); \ 462 /* ret = map.sock_map_update(ctx, key, flag) */ \ 463 int (* _helper_name) (void *, void *, u64); \ 464 u32 max_entries; \ 465 }; \ 466 __attribute__((section("maps/" _kind))) \ 467 struct _name##_table_t _name = { .max_entries = (_max_entries) }; \ 468 BPF_ANNOTATE_KV_PAIR(_name, u32, int) 469 470 #define BPF_SOCKMAP(_name, _max_entries) \ 471 BPF_SOCKMAP_COMMON(_name, _max_entries, "sockmap", sock_map_update) 472 473 #define BPF_SOCKHASH_COMMON(_name, _key_type, _max_entries) \ 474 struct _name##_table_t {\ 475 _key_type key;\ 476 int leaf; \ 477 int (*update) (_key_type *, int *); \ 478 int (*delete) (_key_type *); \ 479 int (*sock_hash_update) (void *, void *, u64); \ 480 int (*msg_redirect_hash) (void *, void *, u64); \ 481 int (*sk_redirect_hash) (void *, void *, u64); \ 482 u32 max_entries; \ 483 }; \ 484 __attribute__((section("maps/sockhash"))) \ 485 struct _name##_table_t _name = { .max_entries = (_max_entries) }; \ 486 BPF_ANNOTATE_KV_PAIR(_name, _key_type, int) 487 488 #define BPF_SOCKHASH1(_name) \ 489 BPF_SOCKHASH_COMMON(_name, u32, 10240) 490 #define BPF_SOCKHASH2(_name, _key_type) \ 491 BPF_SOCKHASH_COMMON(_name, _key_type, 10240) 492 #define BPF_SOCKHASH3(_name, _key_type, _max_entries) \ 493 BPF_SOCKHASH_COMMON(_name, _key_type, _max_entries) 494 495 #define BPF_SOCKHASHX(_1, _2, _3, NAME, ...) NAME 496 // We can define a five-tuple as the key, and basically never define the val type. 497 // BPF_SOCKHASH(name, key_type=u64, size=10240) 498 #define BPF_SOCKHASH(...) \ 499 BPF_SOCKHASHX(__VA_ARGS__, BPF_SOCKHASH3, BPF_SOCKHASH2, BPF_SOCKHASH1)(__VA_ARGS__) 500 501 #define BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, _kind) \ 502 struct _name##_table_t { \ 503 struct bpf_cgroup_storage_key key; \ 504 _leaf_type leaf; \ 505 _leaf_type * (*lookup) (struct bpf_cgroup_storage_key *); \ 506 int (*update) (struct bpf_cgroup_storage_key *, _leaf_type *); \ 507 int (*get_local_storage) (u64); \ 508 }; \ 509 __attribute__((section("maps/" _kind))) \ 510 struct _name##_table_t _name = { 0 }; \ 511 BPF_ANNOTATE_KV_PAIR(_name, struct bpf_cgroup_storage_key, _leaf_type) 512 513 #define BPF_CGROUP_STORAGE(_name, _leaf_type) \ 514 BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, "cgroup_storage") 515 516 #define BPF_PERCPU_CGROUP_STORAGE(_name, _leaf_type) \ 517 BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, "percpu_cgroup_storage") 518 519 // packet parsing state machine helpers 520 #define cursor_advance(_cursor, _len) \ 521 ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) 522 523 #ifdef LINUX_VERSION_CODE_OVERRIDE 524 unsigned _version BCC_SEC("version") = LINUX_VERSION_CODE_OVERRIDE; 525 #else 526 unsigned _version BCC_SEC("version") = LINUX_VERSION_CODE; 527 #endif 528 529 /* helper functions called from eBPF programs written in C */ 530 static void *(*bpf_map_lookup_elem)(void *map, void *key) = 531 (void *) BPF_FUNC_map_lookup_elem; 532 static int (*bpf_map_update_elem)(void *map, void *key, void *value, u64 flags) = 533 (void *) BPF_FUNC_map_update_elem; 534 static int (*bpf_map_delete_elem)(void *map, void *key) = 535 (void *) BPF_FUNC_map_delete_elem; 536 static int (*bpf_probe_read)(void *dst, u64 size, const void *unsafe_ptr) = 537 (void *) BPF_FUNC_probe_read; 538 static u64 (*bpf_ktime_get_ns)(void) = 539 (void *) BPF_FUNC_ktime_get_ns; 540 static u32 (*bpf_get_prandom_u32)(void) = 541 (void *) BPF_FUNC_get_prandom_u32; 542 static int (*bpf_trace_printk_)(const char *fmt, u64 fmt_size, ...) = 543 (void *) BPF_FUNC_trace_printk; 544 static int (*bpf_probe_read_str)(void *dst, u64 size, const void *unsafe_ptr) = 545 (void *) BPF_FUNC_probe_read_str; 546 int bpf_trace_printk(const char *fmt, ...) asm("llvm.bpf.extra"); 547 static inline __attribute__((always_inline)) 548 void bpf_tail_call_(u64 map_fd, void *ctx, int index) { 549 ((void (*)(void *, u64, int))BPF_FUNC_tail_call)(ctx, map_fd, index); 550 } 551 static int (*bpf_clone_redirect)(void *ctx, int ifindex, u32 flags) = 552 (void *) BPF_FUNC_clone_redirect; 553 static u64 (*bpf_get_smp_processor_id)(void) = 554 (void *) BPF_FUNC_get_smp_processor_id; 555 static u64 (*bpf_get_current_pid_tgid)(void) = 556 (void *) BPF_FUNC_get_current_pid_tgid; 557 static u64 (*bpf_get_current_uid_gid)(void) = 558 (void *) BPF_FUNC_get_current_uid_gid; 559 static int (*bpf_get_current_comm)(void *buf, int buf_size) = 560 (void *) BPF_FUNC_get_current_comm; 561 static u64 (*bpf_get_cgroup_classid)(void *ctx) = 562 (void *) BPF_FUNC_get_cgroup_classid; 563 static u64 (*bpf_skb_vlan_push)(void *ctx, u16 proto, u16 vlan_tci) = 564 (void *) BPF_FUNC_skb_vlan_push; 565 static u64 (*bpf_skb_vlan_pop)(void *ctx) = 566 (void *) BPF_FUNC_skb_vlan_pop; 567 static int (*bpf_skb_get_tunnel_key)(void *ctx, void *to, u32 size, u64 flags) = 568 (void *) BPF_FUNC_skb_get_tunnel_key; 569 static int (*bpf_skb_set_tunnel_key)(void *ctx, void *from, u32 size, u64 flags) = 570 (void *) BPF_FUNC_skb_set_tunnel_key; 571 static u64 (*bpf_perf_event_read)(void *map, u64 flags) = 572 (void *) BPF_FUNC_perf_event_read; 573 static int (*bpf_redirect)(int ifindex, u32 flags) = 574 (void *) BPF_FUNC_redirect; 575 static u32 (*bpf_get_route_realm)(void *ctx) = 576 (void *) BPF_FUNC_get_route_realm; 577 static int (*bpf_perf_event_output)(void *ctx, void *map, u64 index, void *data, u32 size) = 578 (void *) BPF_FUNC_perf_event_output; 579 static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) = 580 (void *) BPF_FUNC_skb_load_bytes; 581 static int (*bpf_perf_event_read_value)(void *map, u64 flags, void *buf, u32 buf_size) = 582 (void *) BPF_FUNC_perf_event_read_value; 583 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, u32 buf_size) = 584 (void *) BPF_FUNC_perf_prog_read_value; 585 static int (*bpf_current_task_under_cgroup)(void *map, int index) = 586 (void *) BPF_FUNC_current_task_under_cgroup; 587 static u32 (*bpf_get_socket_cookie)(void *ctx) = 588 (void *) BPF_FUNC_get_socket_cookie; 589 static u64 (*bpf_get_socket_uid)(void *ctx) = 590 (void *) BPF_FUNC_get_socket_uid; 591 static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = 592 (void *) BPF_FUNC_getsockopt; 593 static int (*bpf_redirect_map)(void *map, int key, int flags) = 594 (void *) BPF_FUNC_redirect_map; 595 static int (*bpf_set_hash)(void *ctx, u32 hash) = 596 (void *) BPF_FUNC_set_hash; 597 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = 598 (void *) BPF_FUNC_setsockopt; 599 static int (*bpf_skb_adjust_room)(void *ctx, int len_diff, u32 mode, u64 flags) = 600 (void *) BPF_FUNC_skb_adjust_room; 601 static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = 602 (void *) BPF_FUNC_skb_under_cgroup; 603 static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, int size, 604 unsigned long long netns_id, 605 unsigned long long flags) = 606 (void *) BPF_FUNC_skc_lookup_tcp; 607 static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = 608 (void *) BPF_FUNC_sk_redirect_map; 609 static int (*bpf_sock_map_update)(void *map, void *key, void *value, unsigned long long flags) = 610 (void *) BPF_FUNC_sock_map_update; 611 static int (*bpf_strtol)(const char *buf, size_t buf_len, u64 flags, long *res) = 612 (void *) BPF_FUNC_strtol; 613 static int (*bpf_strtoul)(const char *buf, size_t buf_len, u64 flags, unsigned long *res) = 614 (void *) BPF_FUNC_strtoul; 615 static int (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) = 616 (void *) BPF_FUNC_sysctl_get_current_value; 617 static int (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) = 618 (void *) BPF_FUNC_sysctl_get_name; 619 static int (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) = 620 (void *) BPF_FUNC_sysctl_get_new_value; 621 static int (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) = 622 (void *) BPF_FUNC_sysctl_set_new_value; 623 static int (*bpf_tcp_check_syncookie)(void *sk, void *ip, int ip_len, void *tcp, 624 int tcp_len) = 625 (void *) BPF_FUNC_tcp_check_syncookie; 626 static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = 627 (void *) BPF_FUNC_xdp_adjust_meta; 628 629 /* bcc_get_stackid will return a negative value in the case of an error 630 * 631 * BPF_STACK_TRACE(_name, _size) will allocate space for _size stack traces. 632 * -ENOMEM will be returned when this limit is reached. 633 * 634 * -EFAULT is typically returned when requesting user-space stack straces (using 635 * BPF_F_USER_STACK) for kernel threads. However, a valid stackid may be 636 * returned in some cases; consider a tracepoint or kprobe executing in the 637 * kernel context. Given this you can typically ignore -EFAULT errors when 638 * retrieving user-space stack traces. 639 */ 640 static int (*bcc_get_stackid_)(void *ctx, void *map, u64 flags) = 641 (void *) BPF_FUNC_get_stackid; 642 static inline __attribute__((always_inline)) 643 int bcc_get_stackid(uintptr_t map, void *ctx, u64 flags) { 644 return bcc_get_stackid_(ctx, (void *)map, flags); 645 } 646 647 static int (*bpf_csum_diff)(void *from, u64 from_size, void *to, u64 to_size, u64 seed) = 648 (void *) BPF_FUNC_csum_diff; 649 static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, u32 size) = 650 (void *) BPF_FUNC_skb_get_tunnel_opt; 651 static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, u32 size) = 652 (void *) BPF_FUNC_skb_set_tunnel_opt; 653 static int (*bpf_skb_change_proto)(void *ctx, u16 proto, u64 flags) = 654 (void *) BPF_FUNC_skb_change_proto; 655 static int (*bpf_skb_change_type)(void *ctx, u32 type) = 656 (void *) BPF_FUNC_skb_change_type; 657 static u32 (*bpf_get_hash_recalc)(void *ctx) = 658 (void *) BPF_FUNC_get_hash_recalc; 659 static u64 (*bpf_get_current_task)(void) = 660 (void *) BPF_FUNC_get_current_task; 661 static int (*bpf_probe_write_user)(void *dst, void *src, u32 size) = 662 (void *) BPF_FUNC_probe_write_user; 663 static int (*bpf_skb_change_tail)(void *ctx, u32 new_len, u64 flags) = 664 (void *) BPF_FUNC_skb_change_tail; 665 static int (*bpf_skb_pull_data)(void *ctx, u32 len) = 666 (void *) BPF_FUNC_skb_pull_data; 667 static int (*bpf_csum_update)(void *ctx, u16 csum) = 668 (void *) BPF_FUNC_csum_update; 669 static int (*bpf_set_hash_invalid)(void *ctx) = 670 (void *) BPF_FUNC_set_hash_invalid; 671 static int (*bpf_get_numa_node_id)(void) = 672 (void *) BPF_FUNC_get_numa_node_id; 673 static int (*bpf_skb_change_head)(void *ctx, u32 len, u64 flags) = 674 (void *) BPF_FUNC_skb_change_head; 675 static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = 676 (void *) BPF_FUNC_xdp_adjust_head; 677 static int (*bpf_override_return)(void *pt_regs, unsigned long rc) = 678 (void *) BPF_FUNC_override_return; 679 static int (*bpf_sock_ops_cb_flags_set)(void *skops, int flags) = 680 (void *) BPF_FUNC_sock_ops_cb_flags_set; 681 static int (*bpf_msg_redirect_map)(void *msg, void *map, u32 key, u64 flags) = 682 (void *) BPF_FUNC_msg_redirect_map; 683 static int (*bpf_msg_apply_bytes)(void *msg, u32 bytes) = 684 (void *) BPF_FUNC_msg_apply_bytes; 685 static int (*bpf_msg_cork_bytes)(void *msg, u32 bytes) = 686 (void *) BPF_FUNC_msg_cork_bytes; 687 static int (*bpf_msg_pull_data)(void *msg, u32 start, u32 end, u64 flags) = 688 (void *) BPF_FUNC_msg_pull_data; 689 static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = 690 (void *) BPF_FUNC_bind; 691 static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = 692 (void *) BPF_FUNC_xdp_adjust_tail; 693 static int (*bpf_skb_get_xfrm_state)(void *ctx, u32 index, void *xfrm_state, u32 size, u64 flags) = 694 (void *) BPF_FUNC_skb_get_xfrm_state; 695 static int (*bpf_get_stack)(void *ctx, void *buf, u32 size, u64 flags) = 696 (void *) BPF_FUNC_get_stack; 697 static int (*bpf_skb_load_bytes_relative)(void *ctx, u32 offset, void *to, u32 len, u32 start_header) = 698 (void *) BPF_FUNC_skb_load_bytes_relative; 699 static int (*bpf_fib_lookup)(void *ctx, void *params, int plen, u32 flags) = 700 (void *) BPF_FUNC_fib_lookup; 701 static int (*bpf_sock_hash_update)(void *ctx, void *map, void *key, u64 flags) = 702 (void *) BPF_FUNC_sock_hash_update; 703 static int (*bpf_msg_redirect_hash)(void *ctx, void *map, void *key, u64 flags) = 704 (void *) BPF_FUNC_msg_redirect_hash; 705 static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, u64 flags) = 706 (void *) BPF_FUNC_sk_redirect_hash; 707 static int (*bpf_lwt_push_encap)(void *skb, u32 type, void *hdr, u32 len) = 708 (void *) BPF_FUNC_lwt_push_encap; 709 static int (*bpf_lwt_seg6_store_bytes)(void *ctx, u32 offset, const void *from, u32 len) = 710 (void *) BPF_FUNC_lwt_seg6_store_bytes; 711 static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, u32 offset, s32 delta) = 712 (void *) BPF_FUNC_lwt_seg6_adjust_srh; 713 static int (*bpf_lwt_seg6_action)(void *ctx, u32 action, void *param, u32 param_len) = 714 (void *) BPF_FUNC_lwt_seg6_action; 715 static int (*bpf_rc_keydown)(void *ctx, u32 protocol, u64 scancode, u32 toggle) = 716 (void *) BPF_FUNC_rc_keydown; 717 static int (*bpf_rc_repeat)(void *ctx) = 718 (void *) BPF_FUNC_rc_repeat; 719 static u64 (*bpf_skb_cgroup_id)(void *skb) = 720 (void *) BPF_FUNC_skb_cgroup_id; 721 static u64 (*bpf_get_current_cgroup_id)(void) = 722 (void *) BPF_FUNC_get_current_cgroup_id; 723 static u64 (*bpf_skb_ancestor_cgroup_id)(void *skb, int ancestor_level) = 724 (void *) BPF_FUNC_skb_ancestor_cgroup_id; 725 static void * (*bpf_get_local_storage)(void *map, u64 flags) = 726 (void *) BPF_FUNC_get_local_storage; 727 static int (*bpf_sk_select_reuseport)(void *reuse, void *map, void *key, u64 flags) = 728 (void *) BPF_FUNC_sk_select_reuseport; 729 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, 730 struct bpf_sock_tuple *tuple, 731 int size, unsigned int netns_id, 732 unsigned long long flags) = 733 (void *) BPF_FUNC_sk_lookup_tcp; 734 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, 735 struct bpf_sock_tuple *tuple, 736 int size, unsigned int netns_id, 737 unsigned long long flags) = 738 (void *) BPF_FUNC_sk_lookup_udp; 739 static int (*bpf_sk_release)(void *sk) = 740 (void *) BPF_FUNC_sk_release; 741 static int (*bpf_map_push_elem)(void *map, const void *value, u64 flags) = 742 (void *) BPF_FUNC_map_push_elem; 743 static int (*bpf_map_pop_elem)(void *map, void *value) = 744 (void *) BPF_FUNC_map_pop_elem; 745 static int (*bpf_map_peek_elem)(void *map, void *value) = 746 (void *) BPF_FUNC_map_peek_elem; 747 static int (*bpf_msg_push_data)(void *skb, u32 start, u32 len, u64 flags) = 748 (void *) BPF_FUNC_msg_push_data; 749 static int (*bpf_msg_pop_data)(void *msg, u32 start, u32 pop, u64 flags) = 750 (void *) BPF_FUNC_msg_pop_data; 751 static int (*bpf_rc_pointer_rel)(void *ctx, s32 rel_x, s32 rel_y) = 752 (void *) BPF_FUNC_rc_pointer_rel; 753 static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) = 754 (void *) BPF_FUNC_spin_lock; 755 static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = 756 (void *) BPF_FUNC_spin_unlock; 757 static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = 758 (void *) BPF_FUNC_sk_fullsock; 759 static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = 760 (void *) BPF_FUNC_tcp_sock; 761 static int (*bpf_skb_ecn_set_ce)(void *ctx) = 762 (void *) BPF_FUNC_skb_ecn_set_ce; 763 static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = 764 (void *) BPF_FUNC_get_listener_sock; 765 static void *(*bpf_sk_storage_get)(void *map, void *sk, 766 void *value, __u64 flags) = 767 (void *) BPF_FUNC_sk_storage_get; 768 static int (*bpf_sk_storage_delete)(void *map, void *sk) = 769 (void *)BPF_FUNC_sk_storage_delete; 770 static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal; 771 static long long (*bpf_tcp_gen_syncookie)(void *sk, void *ip, 772 int ip_len, void *tcp, int tcp_len) = 773 (void *) BPF_FUNC_tcp_gen_syncookie; 774 static int (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, 775 __u64 size) = 776 (void *)BPF_FUNC_skb_output; 777 778 static int (*bpf_probe_read_user)(void *dst, __u32 size, 779 const void *unsafe_ptr) = 780 (void *)BPF_FUNC_probe_read_user; 781 static int (*bpf_probe_read_kernel)(void *dst, __u32 size, 782 const void *unsafe_ptr) = 783 (void *)BPF_FUNC_probe_read_kernel; 784 static int (*bpf_probe_read_user_str)(void *dst, __u32 size, 785 const void *unsafe_ptr) = 786 (void *)BPF_FUNC_probe_read_user_str; 787 static int (*bpf_probe_read_kernel_str)(void *dst, __u32 size, 788 const void *unsafe_ptr) = 789 (void *)BPF_FUNC_probe_read_kernel_str; 790 static int (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = 791 (void *)BPF_FUNC_tcp_send_ack; 792 static int (*bpf_send_signal_thread)(__u32 sig) = 793 (void *)BPF_FUNC_send_signal_thread; 794 static __u64 (*bpf_jiffies64)(void) = (void *)BPF_FUNC_jiffies64; 795 796 struct bpf_perf_event_data; 797 static int (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, 798 __u32 size, __u64 flags) = 799 (void *)BPF_FUNC_read_branch_records; 800 static int (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, 801 struct bpf_pidns_info *nsdata, 802 __u32 size) = 803 (void *)BPF_FUNC_get_ns_current_pid_tgid; 804 805 struct bpf_map; 806 static int (*bpf_xdp_output)(void *ctx, struct bpf_map *map, __u64 flags, 807 void *data, __u64 size) = 808 (void *)BPF_FUNC_xdp_output; 809 static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *)BPF_FUNC_get_netns_cookie; 810 static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = 811 (void *)BPF_FUNC_get_current_ancestor_cgroup_id; 812 813 struct sk_buff; 814 static int (*bpf_sk_assign)(void *skb, void *sk, __u64 flags) = 815 (void *)BPF_FUNC_sk_assign; 816 817 static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *)BPF_FUNC_ktime_get_boot_ns; 818 819 struct seq_file; 820 static int (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, 821 const void *data, __u32 data_len) = 822 (void *)BPF_FUNC_seq_printf; 823 static int (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = 824 (void *)BPF_FUNC_seq_write; 825 826 static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *)BPF_FUNC_sk_cgroup_id; 827 static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = 828 (void *)BPF_FUNC_sk_ancestor_cgroup_id; 829 830 static int (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = 831 (void *)BPF_FUNC_ringbuf_output; 832 static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = 833 (void *)BPF_FUNC_ringbuf_reserve; 834 static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = 835 (void *)BPF_FUNC_ringbuf_submit; 836 static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = 837 (void *)BPF_FUNC_ringbuf_discard; 838 static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = 839 (void *)BPF_FUNC_ringbuf_query; 840 841 static int (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = 842 (void *)BPF_FUNC_csum_level; 843 844 struct tcp6_sock; 845 struct tcp_sock; 846 struct tcp_timewait_sock; 847 struct tcp_request_sock; 848 struct udp6_sock; 849 static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = 850 (void *)BPF_FUNC_skc_to_tcp6_sock; 851 static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = 852 (void *)BPF_FUNC_skc_to_tcp_sock; 853 static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = 854 (void *)BPF_FUNC_skc_to_tcp_timewait_sock; 855 static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = 856 (void *)BPF_FUNC_skc_to_tcp_request_sock; 857 static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = 858 (void *)BPF_FUNC_skc_to_udp6_sock; 859 860 struct task_struct; 861 static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, 862 __u32 size, __u64 flags) = 863 (void *)BPF_FUNC_get_task_stack; 864 865 struct bpf_sock_ops; 866 static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, 867 u32 len, u64 flags) = 868 (void *)BPF_FUNC_load_hdr_opt; 869 static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, 870 u32 len, u64 flags) = 871 (void *)BPF_FUNC_store_hdr_opt; 872 static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, u32 len, 873 u64 flags) = 874 (void *)BPF_FUNC_reserve_hdr_opt; 875 static void *(*bpf_inode_storage_get)(struct bpf_map *map, void *inode, 876 void *value, u64 flags) = 877 (void *)BPF_FUNC_inode_storage_get; 878 static int (*bpf_inode_storage_delete)(struct bpf_map *map, void *inode) = 879 (void *)BPF_FUNC_inode_storage_delete; 880 struct path; 881 static long (*bpf_d_path)(struct path *path, char *buf, u32 sz) = 882 (void *)BPF_FUNC_d_path; 883 static long (*bpf_copy_from_user)(void *dst, u32 size, const void *user_ptr) = 884 (void *)BPF_FUNC_copy_from_user; 885 886 static long (*bpf_snprintf_btf)(char *str, u32 str_size, struct btf_ptr *ptr, 887 u32 btf_ptr_size, u64 flags) = 888 (void *)BPF_FUNC_snprintf_btf; 889 static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, 890 u32 ptr_size, u64 flags) = 891 (void *)BPF_FUNC_seq_printf_btf; 892 static u64 (*bpf_skb_cgroup_classid)(struct sk_buff *skb) = 893 (void *)BPF_FUNC_skb_cgroup_classid; 894 static long (*bpf_redirect_neigh)(u32 ifindex, struct bpf_redir_neigh *params, 895 u64 flags) = 896 (void *)BPF_FUNC_redirect_neigh; 897 static void * (*bpf_per_cpu_ptr)(const void *percpu_ptr, u32 cpu) = 898 (void *)BPF_FUNC_per_cpu_ptr; 899 static void * (*bpf_this_cpu_ptr)(const void *percpu_ptr) = 900 (void *)BPF_FUNC_this_cpu_ptr; 901 static long (*bpf_redirect_peer)(u32 ifindex, u64 flags) = (void *)BPF_FUNC_redirect_peer; 902 903 static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, 904 void *value, __u64 flags) = 905 (void *)BPF_FUNC_task_storage_get; 906 static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = 907 (void *)BPF_FUNC_task_storage_delete; 908 static struct task_struct *(*bpf_get_current_task_btf)(void) = 909 (void *)BPF_FUNC_get_current_task_btf; 910 struct linux_binprm; 911 static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = 912 (void *)BPF_FUNC_bprm_opts_set; 913 static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *)BPF_FUNC_ktime_get_coarse_ns; 914 struct inode; 915 static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = 916 (void *)BPF_FUNC_ima_inode_hash; 917 struct file; 918 static struct socket *(*bpf_sock_from_file)(struct file *file) = 919 (void *)BPF_FUNC_sock_from_file; 920 static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, 921 __s32 len_diff, __u64 flags) = 922 (void *)BPF_FUNC_check_mtu; 923 static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, 924 void *callback_ctx, __u64 flags) = 925 (void *)BPF_FUNC_for_each_map_elem; 926 static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, 927 __u64 *data, __u32 data_len) = 928 (void *)BPF_FUNC_snprintf; 929 930 static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = 931 (void *)BPF_FUNC_sys_bpf; 932 static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = 933 (void *)BPF_FUNC_btf_find_by_name_kind; 934 static long (*bpf_sys_close)(__u32 fd) = (void *)BPF_FUNC_sys_close; 935 936 struct bpf_timer; 937 static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = 938 (void *)BPF_FUNC_timer_init; 939 static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = 940 (void *)BPF_FUNC_timer_set_callback; 941 static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = 942 (void *)BPF_FUNC_timer_start; 943 static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *)BPF_FUNC_timer_cancel; 944 945 static __u64 (*bpf_get_func_ip)(void *ctx) = (void *)BPF_FUNC_get_func_ip; 946 static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *)BPF_FUNC_get_attach_cookie; 947 static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *)BPF_FUNC_task_pt_regs; 948 949 static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = 950 (void *)BPF_FUNC_get_branch_snapshot; 951 static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, 952 __u32 data_len) = 953 (void *)BPF_FUNC_trace_vprintk; 954 static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) = 955 (void *)BPF_FUNC_skc_to_unix_sock; 956 static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, 957 __u64 *res) = 958 (void *)BPF_FUNC_kallsyms_lookup_name; 959 static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, 960 void *callback_ctx, __u64 flags) = 961 (void *)BPF_FUNC_find_vma; 962 static long (*bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) = 963 (void *)BPF_FUNC_loop; 964 static long (*bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) = 965 (void *)BPF_FUNC_strncmp; 966 static long (*bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) = 967 (void *)BPF_FUNC_get_func_arg; 968 static long (*bpf_get_func_ret)(void *ctx, __u64 *value) = (void *)BPF_FUNC_get_func_ret; 969 static long (*bpf_get_func_arg_cnt)(void *ctx) = (void *)BPF_FUNC_get_func_arg_cnt; 970 static int (*bpf_get_retval)(void) = (void *)BPF_FUNC_get_retval; 971 static int (*bpf_set_retval)(int retval) = (void *)BPF_FUNC_set_retval; 972 static __u64 (*bpf_xdp_get_buff_len)(struct xdp_md *xdp_md) = (void *)BPF_FUNC_xdp_get_buff_len; 973 static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = 974 (void *)BPF_FUNC_xdp_load_bytes; 975 static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = 976 (void *)BPF_FUNC_xdp_store_bytes; 977 static long (*bpf_copy_from_user_task)(void *dst, __u32 size, const void *user_ptr, 978 struct task_struct *tsk, __u64 flags) = 979 (void *)BPF_FUNC_copy_from_user_task; 980 static long (*bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __u32 tstamp_type) = 981 (void *)BPF_FUNC_skb_set_tstamp; 982 static long (*bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) = 983 (void *)BPF_FUNC_ima_file_hash; 984 985 /* llvm builtin functions that eBPF C program may use to 986 * emit BPF_LD_ABS and BPF_LD_IND instructions 987 */ 988 unsigned long long load_byte(void *skb, 989 unsigned long long off) asm("llvm.bpf.load.byte"); 990 unsigned long long load_half(void *skb, 991 unsigned long long off) asm("llvm.bpf.load.half"); 992 unsigned long long load_word(void *skb, 993 unsigned long long off) asm("llvm.bpf.load.word"); 994 995 /* a helper structure used by eBPF C program 996 * to describe map attributes to elf_bpf loader 997 */ 998 struct bpf_map_def { 999 unsigned int type; 1000 unsigned int key_size; 1001 unsigned int value_size; 1002 unsigned int max_entries; 1003 }; 1004 1005 static int (*bpf_skb_store_bytes)(void *ctx, unsigned long long off, void *from, 1006 unsigned long long len, unsigned long long flags) = 1007 (void *) BPF_FUNC_skb_store_bytes; 1008 static int (*bpf_l3_csum_replace)(void *ctx, unsigned long long off, unsigned long long from, 1009 unsigned long long to, unsigned long long flags) = 1010 (void *) BPF_FUNC_l3_csum_replace; 1011 static int (*bpf_l4_csum_replace)(void *ctx, unsigned long long off, unsigned long long from, 1012 unsigned long long to, unsigned long long flags) = 1013 (void *) BPF_FUNC_l4_csum_replace; 1014 1015 static inline __attribute__((always_inline)) 1016 u16 bpf_ntohs(u16 val) { 1017 /* will be recognized by gcc into rotate insn and eventually rolw 8 */ 1018 return (val << 8) | (val >> 8); 1019 } 1020 1021 static inline __attribute__((always_inline)) 1022 u32 bpf_ntohl(u32 val) { 1023 /* gcc will use bswapsi2 insn */ 1024 return __builtin_bswap32(val); 1025 } 1026 1027 static inline __attribute__((always_inline)) 1028 u64 bpf_ntohll(u64 val) { 1029 /* gcc will use bswapdi2 insn */ 1030 return __builtin_bswap64(val); 1031 } 1032 1033 static inline __attribute__((always_inline)) 1034 unsigned __int128 bpf_ntoh128(unsigned __int128 val) { 1035 return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64)); 1036 } 1037 1038 static inline __attribute__((always_inline)) 1039 u16 bpf_htons(u16 val) { 1040 return bpf_ntohs(val); 1041 } 1042 1043 static inline __attribute__((always_inline)) 1044 u32 bpf_htonl(u32 val) { 1045 return bpf_ntohl(val); 1046 } 1047 1048 static inline __attribute__((always_inline)) 1049 u64 bpf_htonll(u64 val) { 1050 return bpf_ntohll(val); 1051 } 1052 1053 static inline __attribute__((always_inline)) 1054 unsigned __int128 bpf_hton128(unsigned __int128 val) { 1055 return bpf_ntoh128(val); 1056 } 1057 1058 static inline __attribute__((always_inline)) 1059 u64 load_dword(void *skb, u64 off) { 1060 return ((u64)load_word(skb, off) << 32) | load_word(skb, off + 4); 1061 } 1062 1063 void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte"); 1064 void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half"); 1065 void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word"); 1066 u64 bpf_pseudo_fd(u64, u64) asm("llvm.bpf.pseudo"); 1067 1068 static inline void __attribute__((always_inline)) 1069 bpf_store_dword(void *skb, u64 off, u64 val) { 1070 bpf_store_word(skb, off, (u32)val); 1071 bpf_store_word(skb, off + 4, val >> 32); 1072 } 1073 1074 #define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL)) 1075 #define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1)) 1076 1077 static inline __attribute__((always_inline)) 1078 unsigned int bpf_log2(unsigned int v) 1079 { 1080 unsigned int r; 1081 unsigned int shift; 1082 1083 r = (v > 0xFFFF) << 4; v >>= r; 1084 shift = (v > 0xFF) << 3; v >>= shift; r |= shift; 1085 shift = (v > 0xF) << 2; v >>= shift; r |= shift; 1086 shift = (v > 0x3) << 1; v >>= shift; r |= shift; 1087 r |= (v >> 1); 1088 return r; 1089 } 1090 1091 static inline __attribute__((always_inline)) 1092 unsigned int bpf_log2l(unsigned long v) 1093 { 1094 unsigned int hi = v >> 32; 1095 if (hi) 1096 return bpf_log2(hi) + 32 + 1; 1097 else 1098 return bpf_log2(v) + 1; 1099 } 1100 1101 struct bpf_context; 1102 1103 static inline __attribute__((always_inline)) 1104 BCC_SEC_HELPERS 1105 u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) { 1106 if (bofs == 0 && bsz == 8) { 1107 return load_byte(pkt, off); 1108 } else if (bofs + bsz <= 8) { 1109 return load_byte(pkt, off) >> (8 - (bofs + bsz)) & MASK(bsz); 1110 } else if (bofs == 0 && bsz == 16) { 1111 return load_half(pkt, off); 1112 } else if (bofs + bsz <= 16) { 1113 return load_half(pkt, off) >> (16 - (bofs + bsz)) & MASK(bsz); 1114 } else if (bofs == 0 && bsz == 32) { 1115 return load_word(pkt, off); 1116 } else if (bofs + bsz <= 32) { 1117 return load_word(pkt, off) >> (32 - (bofs + bsz)) & MASK(bsz); 1118 } else if (bofs == 0 && bsz == 64) { 1119 return load_dword(pkt, off); 1120 } else if (bofs + bsz <= 64) { 1121 return load_dword(pkt, off) >> (64 - (bofs + bsz)) & MASK(bsz); 1122 } 1123 return 0; 1124 } 1125 1126 static inline __attribute__((always_inline)) 1127 BCC_SEC_HELPERS 1128 void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) { 1129 // The load_xxx function does a bswap before returning the short/word/dword, 1130 // so the value in register will always be host endian. However, the bytes 1131 // written back need to be in network order. 1132 if (bofs == 0 && bsz == 8) { 1133 bpf_skb_store_bytes(pkt, off, &val, 1, 0); 1134 } else if (bofs + bsz <= 8) { 1135 u8 v = load_byte(pkt, off); 1136 v &= ~(MASK(bsz) << (8 - (bofs + bsz))); 1137 v |= ((val & MASK(bsz)) << (8 - (bofs + bsz))); 1138 bpf_skb_store_bytes(pkt, off, &v, 1, 0); 1139 } else if (bofs == 0 && bsz == 16) { 1140 u16 v = bpf_htons(val); 1141 bpf_skb_store_bytes(pkt, off, &v, 2, 0); 1142 } else if (bofs + bsz <= 16) { 1143 u16 v = load_half(pkt, off); 1144 v &= ~(MASK(bsz) << (16 - (bofs + bsz))); 1145 v |= ((val & MASK(bsz)) << (16 - (bofs + bsz))); 1146 v = bpf_htons(v); 1147 bpf_skb_store_bytes(pkt, off, &v, 2, 0); 1148 } else if (bofs == 0 && bsz == 32) { 1149 u32 v = bpf_htonl(val); 1150 bpf_skb_store_bytes(pkt, off, &v, 4, 0); 1151 } else if (bofs + bsz <= 32) { 1152 u32 v = load_word(pkt, off); 1153 v &= ~(MASK(bsz) << (32 - (bofs + bsz))); 1154 v |= ((val & MASK(bsz)) << (32 - (bofs + bsz))); 1155 v = bpf_htonl(v); 1156 bpf_skb_store_bytes(pkt, off, &v, 4, 0); 1157 } else if (bofs == 0 && bsz == 64) { 1158 u64 v = bpf_htonll(val); 1159 bpf_skb_store_bytes(pkt, off, &v, 8, 0); 1160 } else if (bofs + bsz <= 64) { 1161 u64 v = load_dword(pkt, off); 1162 v &= ~(MASK(bsz) << (64 - (bofs + bsz))); 1163 v |= ((val & MASK(bsz)) << (64 - (bofs + bsz))); 1164 v = bpf_htonll(v); 1165 bpf_skb_store_bytes(pkt, off, &v, 8, 0); 1166 } 1167 } 1168 1169 static inline __attribute__((always_inline)) 1170 BCC_SEC_HELPERS 1171 void * bpf_map_lookup_elem_(uintptr_t map, void *key) { 1172 return bpf_map_lookup_elem((void *)map, key); 1173 } 1174 1175 static inline __attribute__((always_inline)) 1176 BCC_SEC_HELPERS 1177 int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) { 1178 return bpf_map_update_elem((void *)map, key, value, flags); 1179 } 1180 1181 static inline __attribute__((always_inline)) 1182 BCC_SEC_HELPERS 1183 int bpf_map_delete_elem_(uintptr_t map, void *key) { 1184 return bpf_map_delete_elem((void *)map, key); 1185 } 1186 1187 static inline __attribute__((always_inline)) 1188 BCC_SEC_HELPERS 1189 int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) { 1190 switch (flags & 0xf) { 1191 case 2: 1192 return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags); 1193 case 4: 1194 return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags); 1195 case 8: 1196 return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags); 1197 default: 1198 {} 1199 } 1200 return bpf_l3_csum_replace(ctx, off, from, to, flags); 1201 } 1202 1203 static inline __attribute__((always_inline)) 1204 BCC_SEC_HELPERS 1205 int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) { 1206 switch (flags & 0xf) { 1207 case 2: 1208 return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags); 1209 case 4: 1210 return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags); 1211 case 8: 1212 return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags); 1213 default: 1214 {} 1215 } 1216 return bpf_l4_csum_replace(ctx, off, from, to, flags); 1217 } 1218 1219 int incr_cksum_l3(void *off, u64 oldval, u64 newval) asm("llvm.bpf.extra"); 1220 int incr_cksum_l4(void *off, u64 oldval, u64 newval, u64 flags) asm("llvm.bpf.extra"); 1221 int bpf_num_cpus() asm("llvm.bpf.extra"); 1222 1223 struct pt_regs; 1224 int bpf_usdt_readarg(int argc, struct pt_regs *ctx, void *arg) asm("llvm.bpf.extra"); 1225 int bpf_usdt_readarg_p(int argc, struct pt_regs *ctx, void *buf, u64 len) asm("llvm.bpf.extra"); 1226 1227 /* Scan the ARCH passed in from ARCH env variable (see kbuild_helper.cc) */ 1228 #if defined(__TARGET_ARCH_x86) 1229 #define bpf_target_x86 1230 #define bpf_target_defined 1231 #elif defined(__TARGET_ARCH_s390x) 1232 #define bpf_target_s390x 1233 #define bpf_target_defined 1234 #elif defined(__TARGET_ARCH_arm64) 1235 #define bpf_target_arm64 1236 #define bpf_target_defined 1237 #elif defined(__TARGET_ARCH_powerpc) 1238 #define bpf_target_powerpc 1239 #define bpf_target_defined 1240 #elif defined(__TARGET_ARCH_mips) 1241 #define bpf_target_mips 1242 #define bpf_target_defined 1243 #else 1244 #undef bpf_target_defined 1245 #endif 1246 1247 /* Fall back to what the compiler says */ 1248 #ifndef bpf_target_defined 1249 #if defined(__x86_64__) 1250 #define bpf_target_x86 1251 #elif defined(__s390x__) 1252 #define bpf_target_s390x 1253 #elif defined(__aarch64__) 1254 #define bpf_target_arm64 1255 #elif defined(__powerpc__) 1256 #define bpf_target_powerpc 1257 #elif defined(__mips__) 1258 #define bpf_target_mips 1259 #endif 1260 #endif 1261 1262 #if defined(bpf_target_powerpc) 1263 #define PT_REGS_PARM1(ctx) ((ctx)->gpr[3]) 1264 #define PT_REGS_PARM2(ctx) ((ctx)->gpr[4]) 1265 #define PT_REGS_PARM3(ctx) ((ctx)->gpr[5]) 1266 #define PT_REGS_PARM4(ctx) ((ctx)->gpr[6]) 1267 #define PT_REGS_PARM5(ctx) ((ctx)->gpr[7]) 1268 #define PT_REGS_PARM6(ctx) ((ctx)->gpr[8]) 1269 #define PT_REGS_RC(ctx) ((ctx)->gpr[3]) 1270 #define PT_REGS_IP(ctx) ((ctx)->nip) 1271 #define PT_REGS_SP(ctx) ((ctx)->gpr[1]) 1272 #elif defined(bpf_target_s390x) 1273 #define PT_REGS_PARM1(x) ((x)->gprs[2]) 1274 #define PT_REGS_PARM2(x) ((x)->gprs[3]) 1275 #define PT_REGS_PARM3(x) ((x)->gprs[4]) 1276 #define PT_REGS_PARM4(x) ((x)->gprs[5]) 1277 #define PT_REGS_PARM5(x) ((x)->gprs[6]) 1278 #define PT_REGS_RET(x) ((x)->gprs[14]) 1279 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ 1280 #define PT_REGS_RC(x) ((x)->gprs[2]) 1281 #define PT_REGS_SP(x) ((x)->gprs[15]) 1282 #define PT_REGS_IP(x) ((x)->psw.addr) 1283 #elif defined(bpf_target_x86) 1284 #define PT_REGS_PARM1(ctx) ((ctx)->di) 1285 #define PT_REGS_PARM2(ctx) ((ctx)->si) 1286 #define PT_REGS_PARM3(ctx) ((ctx)->dx) 1287 #define PT_REGS_PARM4(ctx) ((ctx)->cx) 1288 #define PT_REGS_PARM5(ctx) ((ctx)->r8) 1289 #define PT_REGS_PARM6(ctx) ((ctx)->r9) 1290 #define PT_REGS_RET(ctx) ((ctx)->sp) 1291 #define PT_REGS_FP(ctx) ((ctx)->bp) /* Works only with CONFIG_FRAME_POINTER */ 1292 #define PT_REGS_RC(ctx) ((ctx)->ax) 1293 #define PT_REGS_IP(ctx) ((ctx)->ip) 1294 #define PT_REGS_SP(ctx) ((ctx)->sp) 1295 #elif defined(bpf_target_arm64) 1296 #define PT_REGS_PARM1(x) ((x)->regs[0]) 1297 #define PT_REGS_PARM2(x) ((x)->regs[1]) 1298 #define PT_REGS_PARM3(x) ((x)->regs[2]) 1299 #define PT_REGS_PARM4(x) ((x)->regs[3]) 1300 #define PT_REGS_PARM5(x) ((x)->regs[4]) 1301 #define PT_REGS_PARM6(x) ((x)->regs[5]) 1302 #define PT_REGS_RET(x) ((x)->regs[30]) 1303 #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ 1304 #define PT_REGS_RC(x) ((x)->regs[0]) 1305 #define PT_REGS_SP(x) ((x)->sp) 1306 #define PT_REGS_IP(x) ((x)->pc) 1307 #elif defined(bpf_target_mips) 1308 #define PT_REGS_PARM1(x) ((x)->regs[4]) 1309 #define PT_REGS_PARM2(x) ((x)->regs[5]) 1310 #define PT_REGS_PARM3(x) ((x)->regs[6]) 1311 #define PT_REGS_PARM4(x) ((x)->regs[7]) 1312 #define PT_REGS_PARM5(x) ((x)->regs[8]) 1313 #define PT_REGS_PARM6(x) ((x)->regs[9]) 1314 #define PT_REGS_RET(x) ((x)->regs[31]) 1315 #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ 1316 #define PT_REGS_RC(x) ((x)->regs[2]) 1317 #define PT_REGS_SP(x) ((x)->regs[29]) 1318 #define PT_REGS_IP(x) ((x)->cp0_epc) 1319 #else 1320 #error "bcc does not support this platform yet" 1321 #endif 1322 1323 #if defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) && !defined(__s390x__) 1324 #define PT_REGS_SYSCALL_CTX(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx)) 1325 #else 1326 #define PT_REGS_SYSCALL_CTX(ctx) (ctx) 1327 #endif 1328 /* Helpers for syscall params. Pass in a ctx returned from PT_REGS_SYSCALL_CTX. 1329 */ 1330 #define PT_REGS_PARM1_SYSCALL(ctx) PT_REGS_PARM1(ctx) 1331 #define PT_REGS_PARM2_SYSCALL(ctx) PT_REGS_PARM2(ctx) 1332 #define PT_REGS_PARM3_SYSCALL(ctx) PT_REGS_PARM3(ctx) 1333 #if defined(bpf_target_x86) 1334 #define PT_REGS_PARM4_SYSCALL(ctx) ((ctx)->r10) /* for syscall only */ 1335 #else 1336 #define PT_REGS_PARM4_SYSCALL(ctx) PT_REGS_PARM4(ctx) 1337 #endif 1338 #define PT_REGS_PARM5_SYSCALL(ctx) PT_REGS_PARM5(ctx) 1339 #ifdef PT_REGS_PARM6 1340 #define PT_REGS_PARM6_SYSCALL(ctx) PT_REGS_PARM6(ctx) 1341 #endif 1342 1343 #define lock_xadd(ptr, val) ((void)__sync_fetch_and_add(ptr, val)) 1344 1345 #define TRACEPOINT_PROBE(category, event) \ 1346 int tracepoint__##category##__##event(struct tracepoint__##category##__##event *args) 1347 1348 #define RAW_TRACEPOINT_PROBE(event) \ 1349 int raw_tracepoint__##event(struct bpf_raw_tracepoint_args *ctx) 1350 1351 /* BPF_PROG macro allows to define trampoline function, 1352 * borrowed from kernel bpf selftest code. 1353 */ 1354 #define ___bpf_concat(a, b) a ## b 1355 #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) 1356 #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N 1357 #define ___bpf_narg(...) \ 1358 ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 1359 1360 #define ___bpf_ctx_cast0() ctx 1361 #define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] 1362 #define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] 1363 #define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] 1364 #define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] 1365 #define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] 1366 #define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] 1367 #define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] 1368 #define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] 1369 #define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] 1370 #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] 1371 #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] 1372 #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] 1373 #define ___bpf_ctx_cast(args...) \ 1374 ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) 1375 1376 #define BPF_PROG(name, args...) \ 1377 int name(unsigned long long *ctx); \ 1378 __attribute__((always_inline)) \ 1379 static int ____##name(unsigned long long *ctx, ##args); \ 1380 int name(unsigned long long *ctx) \ 1381 { \ 1382 int __ret; \ 1383 \ 1384 _Pragma("GCC diagnostic push") \ 1385 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 1386 __ret = ____##name(___bpf_ctx_cast(args)); \ 1387 _Pragma("GCC diagnostic pop") \ 1388 return __ret; \ 1389 } \ 1390 static int ____##name(unsigned long long *ctx, ##args) 1391 1392 #define KFUNC_PROBE(event, args...) \ 1393 BPF_PROG(kfunc__ ## event, ##args) 1394 1395 #define KRETFUNC_PROBE(event, args...) \ 1396 BPF_PROG(kretfunc__ ## event, ##args) 1397 1398 #define KMOD_RET(event, args...) \ 1399 BPF_PROG(kmod_ret__ ## event, ##args) 1400 1401 #define LSM_PROBE(event, args...) \ 1402 BPF_PROG(lsm__ ## event, ##args) 1403 1404 #define BPF_ITER(target) \ 1405 int bpf_iter__ ## target (struct bpf_iter__ ## target *ctx) 1406 1407 #define TP_DATA_LOC_READ_CONST(dst, field, length) \ 1408 do { \ 1409 unsigned short __offset = args->data_loc_##field & 0xFFFF; \ 1410 bpf_probe_read((void *)dst, length, (char *)args + __offset); \ 1411 } while (0) 1412 1413 #define TP_DATA_LOC_READ(dst, field) \ 1414 do { \ 1415 unsigned short __offset = args->data_loc_##field & 0xFFFF; \ 1416 unsigned short __length = args->data_loc_##field >> 16; \ 1417 bpf_probe_read((void *)dst, __length, (char *)args + __offset); \ 1418 } while (0) 1419 1420 #define TP_DATA_LOC_READ_STR(dst, field, length) \ 1421 do { \ 1422 unsigned short __offset = args->data_loc_##field & 0xFFFF; \ 1423 bpf_probe_read_str((void *)dst, length, (char *)args + __offset); \ 1424 } while (0) 1425 1426 #endif 1427 )********" 1428