1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 #ifndef __BPF_TRACING_H__ 3 #define __BPF_TRACING_H__ 4 5 #include "bpf_helpers.h" 6 7 /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ 8 #if defined(__TARGET_ARCH_x86) 9 #define bpf_target_x86 10 #define bpf_target_defined 11 #elif defined(__TARGET_ARCH_s390) 12 #define bpf_target_s390 13 #define bpf_target_defined 14 #elif defined(__TARGET_ARCH_arm) 15 #define bpf_target_arm 16 #define bpf_target_defined 17 #elif defined(__TARGET_ARCH_arm64) 18 #define bpf_target_arm64 19 #define bpf_target_defined 20 #elif defined(__TARGET_ARCH_mips) 21 #define bpf_target_mips 22 #define bpf_target_defined 23 #elif defined(__TARGET_ARCH_powerpc) 24 #define bpf_target_powerpc 25 #define bpf_target_defined 26 #elif defined(__TARGET_ARCH_sparc) 27 #define bpf_target_sparc 28 #define bpf_target_defined 29 #elif defined(__TARGET_ARCH_riscv) 30 #define bpf_target_riscv 31 #define bpf_target_defined 32 #elif defined(__TARGET_ARCH_arc) 33 #define bpf_target_arc 34 #define bpf_target_defined 35 #else 36 37 /* Fall back to what the compiler says */ 38 #if defined(__x86_64__) 39 #define bpf_target_x86 40 #define bpf_target_defined 41 #elif defined(__s390__) 42 #define bpf_target_s390 43 #define bpf_target_defined 44 #elif defined(__arm__) 45 #define bpf_target_arm 46 #define bpf_target_defined 47 #elif defined(__aarch64__) 48 #define bpf_target_arm64 49 #define bpf_target_defined 50 #elif defined(__mips__) 51 #define bpf_target_mips 52 #define bpf_target_defined 53 #elif defined(__powerpc__) 54 #define bpf_target_powerpc 55 #define bpf_target_defined 56 #elif defined(__sparc__) 57 #define bpf_target_sparc 58 #define bpf_target_defined 59 #elif defined(__riscv) && __riscv_xlen == 64 60 #define bpf_target_riscv 61 #define bpf_target_defined 62 #elif defined(__arc__) 63 #define bpf_target_arc 64 #define bpf_target_defined 65 #endif /* no compiler target */ 66 67 #endif 68 69 #ifndef __BPF_TARGET_MISSING 70 #define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\"" 71 #endif 72 73 #if defined(bpf_target_x86) 74 75 #if defined(__KERNEL__) || defined(__VMLINUX_H__) 76 77 #define __PT_PARM1_REG di 78 #define __PT_PARM2_REG si 79 #define __PT_PARM3_REG dx 80 #define __PT_PARM4_REG cx 81 #define __PT_PARM5_REG r8 82 #define __PT_RET_REG sp 83 #define __PT_FP_REG bp 84 #define __PT_RC_REG ax 85 #define __PT_SP_REG sp 86 #define __PT_IP_REG ip 87 /* syscall uses r10 for PARM4 */ 88 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) 89 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) 90 91 #else 92 93 #ifdef __i386__ 94 95 #define __PT_PARM1_REG eax 96 #define __PT_PARM2_REG edx 97 #define __PT_PARM3_REG ecx 98 /* i386 kernel is built with -mregparm=3 */ 99 #define __PT_PARM4_REG __unsupported__ 100 #define __PT_PARM5_REG __unsupported__ 101 #define __PT_RET_REG esp 102 #define __PT_FP_REG ebp 103 #define __PT_RC_REG eax 104 #define __PT_SP_REG esp 105 #define __PT_IP_REG eip 106 107 #else /* __i386__ */ 108 109 #define __PT_PARM1_REG rdi 110 #define __PT_PARM2_REG rsi 111 #define __PT_PARM3_REG rdx 112 #define __PT_PARM4_REG rcx 113 #define __PT_PARM5_REG r8 114 #define __PT_RET_REG rsp 115 #define __PT_FP_REG rbp 116 #define __PT_RC_REG rax 117 #define __PT_SP_REG rsp 118 #define __PT_IP_REG rip 119 /* syscall uses r10 for PARM4 */ 120 #define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) 121 #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) 122 123 #endif /* __i386__ */ 124 125 #endif /* __KERNEL__ || __VMLINUX_H__ */ 126 127 #elif defined(bpf_target_s390) 128 129 struct pt_regs___s390 { 130 unsigned long orig_gpr2; 131 }; 132 133 /* s390 provides user_pt_regs instead of struct pt_regs to userspace */ 134 #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x)) 135 #define __PT_PARM1_REG gprs[2] 136 #define __PT_PARM2_REG gprs[3] 137 #define __PT_PARM3_REG gprs[4] 138 #define __PT_PARM4_REG gprs[5] 139 #define __PT_PARM5_REG gprs[6] 140 #define __PT_RET_REG grps[14] 141 #define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */ 142 #define __PT_RC_REG gprs[2] 143 #define __PT_SP_REG gprs[15] 144 #define __PT_IP_REG psw.addr 145 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) 146 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2) 147 148 #elif defined(bpf_target_arm) 149 150 #define __PT_PARM1_REG uregs[0] 151 #define __PT_PARM2_REG uregs[1] 152 #define __PT_PARM3_REG uregs[2] 153 #define __PT_PARM4_REG uregs[3] 154 #define __PT_PARM5_REG uregs[4] 155 #define __PT_RET_REG uregs[14] 156 #define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */ 157 #define __PT_RC_REG uregs[0] 158 #define __PT_SP_REG uregs[13] 159 #define __PT_IP_REG uregs[12] 160 161 #elif defined(bpf_target_arm64) 162 163 struct pt_regs___arm64 { 164 unsigned long orig_x0; 165 }; 166 167 /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ 168 #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) 169 #define __PT_PARM1_REG regs[0] 170 #define __PT_PARM2_REG regs[1] 171 #define __PT_PARM3_REG regs[2] 172 #define __PT_PARM4_REG regs[3] 173 #define __PT_PARM5_REG regs[4] 174 #define __PT_PARM6_REG regs[5] 175 #define __PT_RET_REG regs[30] 176 #define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */ 177 #define __PT_RC_REG regs[0] 178 #define __PT_SP_REG sp 179 #define __PT_IP_REG pc 180 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) 181 #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0) 182 183 #elif defined(bpf_target_mips) 184 185 #define __PT_PARM1_REG regs[4] 186 #define __PT_PARM2_REG regs[5] 187 #define __PT_PARM3_REG regs[6] 188 #define __PT_PARM4_REG regs[7] 189 #define __PT_PARM5_REG regs[8] 190 #define __PT_RET_REG regs[31] 191 #define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */ 192 #define __PT_RC_REG regs[2] 193 #define __PT_SP_REG regs[29] 194 #define __PT_IP_REG cp0_epc 195 196 #elif defined(bpf_target_powerpc) 197 198 #define __PT_PARM1_REG gpr[3] 199 #define __PT_PARM2_REG gpr[4] 200 #define __PT_PARM3_REG gpr[5] 201 #define __PT_PARM4_REG gpr[6] 202 #define __PT_PARM5_REG gpr[7] 203 #define __PT_RET_REG regs[31] 204 #define __PT_FP_REG __unsupported__ 205 #define __PT_RC_REG gpr[3] 206 #define __PT_SP_REG sp 207 #define __PT_IP_REG nip 208 /* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ 209 #define PT_REGS_SYSCALL_REGS(ctx) ctx 210 211 #elif defined(bpf_target_sparc) 212 213 #define __PT_PARM1_REG u_regs[UREG_I0] 214 #define __PT_PARM2_REG u_regs[UREG_I1] 215 #define __PT_PARM3_REG u_regs[UREG_I2] 216 #define __PT_PARM4_REG u_regs[UREG_I3] 217 #define __PT_PARM5_REG u_regs[UREG_I4] 218 #define __PT_RET_REG u_regs[UREG_I7] 219 #define __PT_FP_REG __unsupported__ 220 #define __PT_RC_REG u_regs[UREG_I0] 221 #define __PT_SP_REG u_regs[UREG_FP] 222 /* Should this also be a bpf_target check for the sparc case? */ 223 #if defined(__arch64__) 224 #define __PT_IP_REG tpc 225 #else 226 #define __PT_IP_REG pc 227 #endif 228 229 #elif defined(bpf_target_riscv) 230 231 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) 232 #define __PT_PARM1_REG a0 233 #define __PT_PARM2_REG a1 234 #define __PT_PARM3_REG a2 235 #define __PT_PARM4_REG a3 236 #define __PT_PARM5_REG a4 237 #define __PT_PARM6_REG a5 238 #define __PT_RET_REG ra 239 #define __PT_FP_REG s0 240 #define __PT_RC_REG a0 241 #define __PT_SP_REG sp 242 #define __PT_IP_REG pc 243 /* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ 244 #define PT_REGS_SYSCALL_REGS(ctx) ctx 245 246 #elif defined(bpf_target_arc) 247 248 /* arc provides struct user_pt_regs instead of struct pt_regs to userspace */ 249 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) 250 #define __PT_PARM1_REG scratch.r0 251 #define __PT_PARM2_REG scratch.r1 252 #define __PT_PARM3_REG scratch.r2 253 #define __PT_PARM4_REG scratch.r3 254 #define __PT_PARM5_REG scratch.r4 255 #define __PT_RET_REG scratch.blink 256 #define __PT_FP_REG __unsupported__ 257 #define __PT_RC_REG scratch.r0 258 #define __PT_SP_REG scratch.sp 259 #define __PT_IP_REG scratch.ret 260 /* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */ 261 #define PT_REGS_SYSCALL_REGS(ctx) ctx 262 263 #endif 264 265 #if defined(bpf_target_defined) 266 267 struct pt_regs; 268 269 /* allow some architecutres to override `struct pt_regs` */ 270 #ifndef __PT_REGS_CAST 271 #define __PT_REGS_CAST(x) (x) 272 #endif 273 274 #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG) 275 #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG) 276 #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG) 277 #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG) 278 #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG) 279 #define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG) 280 #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG) 281 #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG) 282 #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG) 283 #define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG) 284 #define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG) 285 286 #define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG) 287 #define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG) 288 #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG) 289 #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG) 290 #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG) 291 #define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG) 292 #define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG) 293 #define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG) 294 #define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG) 295 #define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG) 296 297 #if defined(bpf_target_powerpc) 298 299 #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) 300 #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP 301 302 #elif defined(bpf_target_sparc) 303 304 #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) 305 #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP 306 307 #else 308 309 #define BPF_KPROBE_READ_RET_IP(ip, ctx) \ 310 ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) 311 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \ 312 ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) 313 314 #endif 315 316 #ifndef PT_REGS_PARM1_SYSCALL 317 #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x) 318 #endif 319 #define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x) 320 #define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x) 321 #ifndef PT_REGS_PARM4_SYSCALL 322 #define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x) 323 #endif 324 #define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x) 325 326 #ifndef PT_REGS_PARM1_CORE_SYSCALL 327 #define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x) 328 #endif 329 #define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x) 330 #define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x) 331 #ifndef PT_REGS_PARM4_CORE_SYSCALL 332 #define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x) 333 #endif 334 #define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x) 335 336 #else /* defined(bpf_target_defined) */ 337 338 #define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 339 #define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 340 #define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 341 #define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 342 #define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 343 #define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 344 #define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 345 #define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 346 #define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 347 #define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 348 #define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 349 350 #define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 351 #define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 352 #define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 353 #define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 354 #define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 355 #define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 356 #define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 357 #define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 358 #define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 359 #define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 360 361 #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 362 #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 363 364 #define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 365 #define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 366 #define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 367 #define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 368 #define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 369 370 #define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 371 #define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 372 #define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 373 #define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 374 #define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) 375 376 #endif /* defined(bpf_target_defined) */ 377 378 /* 379 * When invoked from a syscall handler kprobe, returns a pointer to a 380 * struct pt_regs containing syscall arguments and suitable for passing to 381 * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL(). 382 */ 383 #ifndef PT_REGS_SYSCALL_REGS 384 /* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */ 385 #define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx)) 386 #endif 387 388 #ifndef ___bpf_concat 389 #define ___bpf_concat(a, b) a ## b 390 #endif 391 #ifndef ___bpf_apply 392 #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) 393 #endif 394 #ifndef ___bpf_nth 395 #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N 396 #endif 397 #ifndef ___bpf_narg 398 #define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 399 #endif 400 401 #define ___bpf_ctx_cast0() ctx 402 #define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] 403 #define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] 404 #define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] 405 #define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] 406 #define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] 407 #define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] 408 #define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] 409 #define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] 410 #define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] 411 #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] 412 #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] 413 #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] 414 #define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) 415 416 /* 417 * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and 418 * similar kinds of BPF programs, that accept input arguments as a single 419 * pointer to untyped u64 array, where each u64 can actually be a typed 420 * pointer or integer of different size. Instead of requring user to write 421 * manual casts and work with array elements by index, BPF_PROG macro 422 * allows user to declare a list of named and typed input arguments in the 423 * same syntax as for normal C function. All the casting is hidden and 424 * performed transparently, while user code can just assume working with 425 * function arguments of specified type and name. 426 * 427 * Original raw context argument is preserved as well as 'ctx' argument. 428 * This is useful when using BPF helpers that expect original context 429 * as one of the parameters (e.g., for bpf_perf_event_output()). 430 */ 431 #define BPF_PROG(name, args...) \ 432 name(unsigned long long *ctx); \ 433 static __always_inline typeof(name(0)) \ 434 ____##name(unsigned long long *ctx, ##args); \ 435 typeof(name(0)) name(unsigned long long *ctx) \ 436 { \ 437 _Pragma("GCC diagnostic push") \ 438 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 439 return ____##name(___bpf_ctx_cast(args)); \ 440 _Pragma("GCC diagnostic pop") \ 441 } \ 442 static __always_inline typeof(name(0)) \ 443 ____##name(unsigned long long *ctx, ##args) 444 445 #ifndef ___bpf_nth2 446 #define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \ 447 _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N 448 #endif 449 #ifndef ___bpf_narg2 450 #define ___bpf_narg2(...) \ 451 ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \ 452 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0) 453 #endif 454 455 #define ___bpf_treg_cnt(t) \ 456 __builtin_choose_expr(sizeof(t) == 1, 1, \ 457 __builtin_choose_expr(sizeof(t) == 2, 1, \ 458 __builtin_choose_expr(sizeof(t) == 4, 1, \ 459 __builtin_choose_expr(sizeof(t) == 8, 1, \ 460 __builtin_choose_expr(sizeof(t) == 16, 2, \ 461 (void)0))))) 462 463 #define ___bpf_reg_cnt0() (0) 464 #define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t)) 465 #define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t)) 466 #define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t)) 467 #define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t)) 468 #define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t)) 469 #define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t)) 470 #define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t)) 471 #define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t)) 472 #define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t)) 473 #define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t)) 474 #define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t)) 475 #define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t)) 476 #define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args) 477 478 #define ___bpf_union_arg(t, x, n) \ 479 __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \ 480 __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ 481 __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ 482 __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \ 483 __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \ 484 (void)0))))) 485 486 #define ___bpf_ctx_arg0(n, args...) 487 #define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x)) 488 #define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args) 489 #define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args) 490 #define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args) 491 #define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args) 492 #define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args) 493 #define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args) 494 #define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args) 495 #define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args) 496 #define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args) 497 #define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args) 498 #define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args) 499 #define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args) 500 501 #define ___bpf_ctx_decl0() 502 #define ___bpf_ctx_decl1(t, x) , t x 503 #define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args) 504 #define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args) 505 #define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args) 506 #define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args) 507 #define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args) 508 #define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args) 509 #define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args) 510 #define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args) 511 #define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args) 512 #define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args) 513 #define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args) 514 #define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args) 515 516 /* 517 * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct 518 * arguments. Since each struct argument might take one or two u64 values 519 * in the trampoline stack, argument type size is needed to place proper number 520 * of u64 values for each argument. Therefore, BPF_PROG2 has different 521 * syntax from BPF_PROG. For example, for the following BPF_PROG syntax: 522 * 523 * int BPF_PROG(test2, int a, int b) { ... } 524 * 525 * the corresponding BPF_PROG2 syntax is: 526 * 527 * int BPF_PROG2(test2, int, a, int, b) { ... } 528 * 529 * where type and the corresponding argument name are separated by comma. 530 * 531 * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger 532 * than 8 bytes: 533 * 534 * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b, 535 * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret) 536 * { 537 * // access a, b, c, d, e, and ret directly 538 * ... 539 * } 540 */ 541 #define BPF_PROG2(name, args...) \ 542 name(unsigned long long *ctx); \ 543 static __always_inline typeof(name(0)) \ 544 ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \ 545 typeof(name(0)) name(unsigned long long *ctx) \ 546 { \ 547 return ____##name(ctx ___bpf_ctx_arg(args)); \ 548 } \ 549 static __always_inline typeof(name(0)) \ 550 ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)) 551 552 struct pt_regs; 553 554 #define ___bpf_kprobe_args0() ctx 555 #define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) 556 #define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) 557 #define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) 558 #define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) 559 #define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) 560 #define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx) 561 #define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) 562 563 /* 564 * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for 565 * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific 566 * low-level way of getting kprobe input arguments from struct pt_regs, and 567 * provides a familiar typed and named function arguments syntax and 568 * semantics of accessing kprobe input paremeters. 569 * 570 * Original struct pt_regs* context is preserved as 'ctx' argument. This might 571 * be necessary when using BPF helpers like bpf_perf_event_output(). 572 */ 573 #define BPF_KPROBE(name, args...) \ 574 name(struct pt_regs *ctx); \ 575 static __always_inline typeof(name(0)) \ 576 ____##name(struct pt_regs *ctx, ##args); \ 577 typeof(name(0)) name(struct pt_regs *ctx) \ 578 { \ 579 _Pragma("GCC diagnostic push") \ 580 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 581 return ____##name(___bpf_kprobe_args(args)); \ 582 _Pragma("GCC diagnostic pop") \ 583 } \ 584 static __always_inline typeof(name(0)) \ 585 ____##name(struct pt_regs *ctx, ##args) 586 587 #define ___bpf_kretprobe_args0() ctx 588 #define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx) 589 #define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) 590 591 /* 592 * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional 593 * return value (in addition to `struct pt_regs *ctx`), but no input 594 * arguments, because they will be clobbered by the time probed function 595 * returns. 596 */ 597 #define BPF_KRETPROBE(name, args...) \ 598 name(struct pt_regs *ctx); \ 599 static __always_inline typeof(name(0)) \ 600 ____##name(struct pt_regs *ctx, ##args); \ 601 typeof(name(0)) name(struct pt_regs *ctx) \ 602 { \ 603 _Pragma("GCC diagnostic push") \ 604 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 605 return ____##name(___bpf_kretprobe_args(args)); \ 606 _Pragma("GCC diagnostic pop") \ 607 } \ 608 static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) 609 610 /* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */ 611 #define ___bpf_syscall_args0() ctx 612 #define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs) 613 #define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs) 614 #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs) 615 #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs) 616 #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs) 617 #define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) 618 619 /* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */ 620 #define ___bpf_syswrap_args0() ctx 621 #define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs) 622 #define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs) 623 #define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs) 624 #define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs) 625 #define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs) 626 #define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args) 627 628 /* 629 * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for 630 * tracing syscall functions, like __x64_sys_close. It hides the underlying 631 * platform-specific low-level way of getting syscall input arguments from 632 * struct pt_regs, and provides a familiar typed and named function arguments 633 * syntax and semantics of accessing syscall input parameters. 634 * 635 * Original struct pt_regs * context is preserved as 'ctx' argument. This might 636 * be necessary when using BPF helpers like bpf_perf_event_output(). 637 * 638 * At the moment BPF_KSYSCALL does not transparently handle all the calling 639 * convention quirks for the following syscalls: 640 * 641 * - mmap(): __ARCH_WANT_SYS_OLD_MMAP. 642 * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and 643 * CONFIG_CLONE_BACKWARDS3. 644 * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL. 645 * - compat syscalls. 646 * 647 * This may or may not change in the future. User needs to take extra measures 648 * to handle such quirks explicitly, if necessary. 649 * 650 * This macro relies on BPF CO-RE support and virtual __kconfig externs. 651 */ 652 #define BPF_KSYSCALL(name, args...) \ 653 name(struct pt_regs *ctx); \ 654 extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \ 655 static __always_inline typeof(name(0)) \ 656 ____##name(struct pt_regs *ctx, ##args); \ 657 typeof(name(0)) name(struct pt_regs *ctx) \ 658 { \ 659 struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \ 660 ? (struct pt_regs *)PT_REGS_PARM1(ctx) \ 661 : ctx; \ 662 _Pragma("GCC diagnostic push") \ 663 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 664 if (LINUX_HAS_SYSCALL_WRAPPER) \ 665 return ____##name(___bpf_syswrap_args(args)); \ 666 else \ 667 return ____##name(___bpf_syscall_args(args)); \ 668 _Pragma("GCC diagnostic pop") \ 669 } \ 670 static __always_inline typeof(name(0)) \ 671 ____##name(struct pt_regs *ctx, ##args) 672 673 #define BPF_KPROBE_SYSCALL BPF_KSYSCALL 674 675 #endif 676