Lines Matching refs:fp
89 struct bpf_prog *fp; in bpf_prog_alloc_no_stats() local
92 fp = __vmalloc(size, gfp_flags); in bpf_prog_alloc_no_stats()
93 if (fp == NULL) in bpf_prog_alloc_no_stats()
98 vfree(fp); in bpf_prog_alloc_no_stats()
101 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); in bpf_prog_alloc_no_stats()
102 if (!fp->active) { in bpf_prog_alloc_no_stats()
103 vfree(fp); in bpf_prog_alloc_no_stats()
108 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats()
109 fp->aux = aux; in bpf_prog_alloc_no_stats()
110 fp->aux->prog = fp; in bpf_prog_alloc_no_stats()
111 fp->jit_requested = ebpf_jit_enabled(); in bpf_prog_alloc_no_stats()
112 fp->blinding_requested = bpf_jit_blinding_enabled(fp); in bpf_prog_alloc_no_stats()
117 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); in bpf_prog_alloc_no_stats()
118 mutex_init(&fp->aux->used_maps_mutex); in bpf_prog_alloc_no_stats()
119 mutex_init(&fp->aux->dst_mutex); in bpf_prog_alloc_no_stats()
121 return fp; in bpf_prog_alloc_no_stats()
235 struct bpf_prog *fp; in bpf_prog_realloc() local
243 fp = __vmalloc(size, gfp_flags); in bpf_prog_realloc()
244 if (fp) { in bpf_prog_realloc()
245 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc()
246 fp->pages = pages; in bpf_prog_realloc()
247 fp->aux->prog = fp; in bpf_prog_realloc()
258 return fp; in bpf_prog_realloc()
261 void __bpf_prog_free(struct bpf_prog *fp) in __bpf_prog_free() argument
263 if (fp->aux) { in __bpf_prog_free()
264 mutex_destroy(&fp->aux->used_maps_mutex); in __bpf_prog_free()
265 mutex_destroy(&fp->aux->dst_mutex); in __bpf_prog_free()
266 kfree(fp->aux->poke_tab); in __bpf_prog_free()
267 kfree(fp->aux); in __bpf_prog_free()
269 free_percpu(fp->stats); in __bpf_prog_free()
270 free_percpu(fp->active); in __bpf_prog_free()
271 vfree(fp); in __bpf_prog_free()
274 int bpf_prog_calc_tag(struct bpf_prog *fp) in bpf_prog_calc_tag() argument
277 u32 raw_size = bpf_prog_tag_scratch_size(fp); in bpf_prog_calc_tag()
298 for (i = 0, was_ld_map = false; i < fp->len; i++) { in bpf_prog_calc_tag()
299 dst[i] = fp->insnsi[i]; in bpf_prog_calc_tag()
318 psize = bpf_prog_insn_size(fp); in bpf_prog_calc_tag()
341 memcpy(fp->tag, result, sizeof(fp->tag)); in bpf_prog_calc_tag()
523 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) in bpf_prog_kallsyms_del_subprogs() argument
527 for (i = 0; i < fp->aux->func_cnt; i++) in bpf_prog_kallsyms_del_subprogs()
528 bpf_prog_kallsyms_del(fp->aux->func[i]); in bpf_prog_kallsyms_del_subprogs()
531 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) in bpf_prog_kallsyms_del_all() argument
533 bpf_prog_kallsyms_del_subprogs(fp); in bpf_prog_kallsyms_del_all()
534 bpf_prog_kallsyms_del(fp); in bpf_prog_kallsyms_del_all()
655 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) in bpf_prog_kallsyms_candidate() argument
657 return fp->jited && !bpf_prog_was_classic(fp); in bpf_prog_kallsyms_candidate()
660 void bpf_prog_kallsyms_add(struct bpf_prog *fp) in bpf_prog_kallsyms_add() argument
662 if (!bpf_prog_kallsyms_candidate(fp) || in bpf_prog_kallsyms_add()
666 bpf_prog_ksym_set_addr(fp); in bpf_prog_kallsyms_add()
667 bpf_prog_ksym_set_name(fp); in bpf_prog_kallsyms_add()
668 fp->aux->ksym.prog = true; in bpf_prog_kallsyms_add()
670 bpf_ksym_add(&fp->aux->ksym); in bpf_prog_kallsyms_add()
673 void bpf_prog_kallsyms_del(struct bpf_prog *fp) in bpf_prog_kallsyms_del() argument
675 if (!bpf_prog_kallsyms_candidate(fp)) in bpf_prog_kallsyms_del()
678 bpf_ksym_del(&fp->aux->ksym); in bpf_prog_kallsyms_del()
1149 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp) in bpf_jit_binary_pack_hdr() argument
1151 unsigned long real_start = (unsigned long)fp->bpf_func; in bpf_jit_binary_pack_hdr()
1159 bpf_jit_binary_hdr(const struct bpf_prog *fp) in bpf_jit_binary_hdr() argument
1161 unsigned long real_start = (unsigned long)fp->bpf_func; in bpf_jit_binary_hdr()
1172 void __weak bpf_jit_free(struct bpf_prog *fp) in bpf_jit_free() argument
1174 if (fp->jited) { in bpf_jit_free()
1175 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); in bpf_jit_free()
1178 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); in bpf_jit_free()
1181 bpf_prog_unlock_free(fp); in bpf_jit_free()
1358 struct bpf_prog *fp; in bpf_prog_clone_create() local
1360 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); in bpf_prog_clone_create()
1361 if (fp != NULL) { in bpf_prog_clone_create()
1366 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); in bpf_prog_clone_create()
1369 return fp; in bpf_prog_clone_create()
1372 static void bpf_prog_clone_free(struct bpf_prog *fp) in bpf_prog_clone_free() argument
1381 fp->aux = NULL; in bpf_prog_clone_free()
1382 fp->stats = NULL; in bpf_prog_clone_free()
1383 fp->active = NULL; in bpf_prog_clone_free()
1384 __bpf_prog_free(fp); in bpf_prog_clone_free()
1387 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) in bpf_jit_prog_release_other() argument
1392 fp->aux->prog = fp; in bpf_jit_prog_release_other()
2092 const struct bpf_prog *fp) in bpf_prog_map_compatible() argument
2094 enum bpf_prog_type prog_type = resolve_prog_type(fp); in bpf_prog_map_compatible()
2097 if (fp->kprobe_override) in bpf_prog_map_compatible()
2106 map->owner.jited = fp->jited; in bpf_prog_map_compatible()
2107 map->owner.xdp_has_frags = fp->aux->xdp_has_frags; in bpf_prog_map_compatible()
2111 map->owner.jited == fp->jited && in bpf_prog_map_compatible()
2112 map->owner.xdp_has_frags == fp->aux->xdp_has_frags; in bpf_prog_map_compatible()
2119 static int bpf_check_tail_call(const struct bpf_prog *fp) in bpf_check_tail_call() argument
2121 struct bpf_prog_aux *aux = fp->aux; in bpf_check_tail_call()
2131 if (!bpf_prog_map_compatible(map, fp)) { in bpf_check_tail_call()
2142 static void bpf_prog_select_func(struct bpf_prog *fp) in bpf_prog_select_func() argument
2145 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); in bpf_prog_select_func()
2147 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; in bpf_prog_select_func()
2149 fp->bpf_func = __bpf_prog_ret0_warn; in bpf_prog_select_func()
2164 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) in bpf_prog_select_runtime() argument
2171 if (fp->bpf_func) in bpf_prog_select_runtime()
2175 bpf_prog_has_kfunc_call(fp)) in bpf_prog_select_runtime()
2178 bpf_prog_select_func(fp); in bpf_prog_select_runtime()
2186 if (!bpf_prog_is_dev_bound(fp->aux)) { in bpf_prog_select_runtime()
2187 *err = bpf_prog_alloc_jited_linfo(fp); in bpf_prog_select_runtime()
2189 return fp; in bpf_prog_select_runtime()
2191 fp = bpf_int_jit_compile(fp); in bpf_prog_select_runtime()
2192 bpf_prog_jit_attempt_done(fp); in bpf_prog_select_runtime()
2193 if (!fp->jited && jit_needed) { in bpf_prog_select_runtime()
2195 return fp; in bpf_prog_select_runtime()
2198 *err = bpf_prog_offload_compile(fp); in bpf_prog_select_runtime()
2200 return fp; in bpf_prog_select_runtime()
2204 bpf_prog_lock_ro(fp); in bpf_prog_select_runtime()
2211 *err = bpf_check_tail_call(fp); in bpf_prog_select_runtime()
2213 return fp; in bpf_prog_select_runtime()
2575 void bpf_prog_free(struct bpf_prog *fp) in bpf_prog_free() argument
2577 struct bpf_prog_aux *aux = fp->aux; in bpf_prog_free()