Lines Matching refs:rcpu
127 static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in get_cpu_map_entry() argument
129 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry()
152 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in put_cpu_map_entry() argument
154 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
155 if (rcpu->prog) in put_cpu_map_entry()
156 bpf_prog_put(rcpu->prog); in put_cpu_map_entry()
158 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry()
159 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry()
160 kfree(rcpu->queue); in put_cpu_map_entry()
161 kfree(rcpu); in put_cpu_map_entry()
168 struct bpf_cpu_map_entry *rcpu; in cpu_map_kthread_stop() local
170 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); in cpu_map_kthread_stop()
178 kthread_stop(rcpu->kthread); in cpu_map_kthread_stop()
181 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, in cpu_map_bpf_prog_run_skb() argument
191 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); in cpu_map_bpf_prog_run_skb()
198 rcpu->prog); in cpu_map_bpf_prog_run_skb()
207 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
210 trace_xdp_exception(skb->dev, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
221 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, in cpu_map_bpf_prog_run_xdp() argument
243 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
257 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
266 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
282 static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, in cpu_map_bpf_prog_run() argument
288 if (!rcpu->prog) in cpu_map_bpf_prog_run()
293 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); in cpu_map_bpf_prog_run()
299 cpu_map_bpf_prog_run_skb(rcpu, list, stats); in cpu_map_bpf_prog_run()
308 struct bpf_cpu_map_entry *rcpu = data; in cpu_map_kthread_run() local
310 complete(&rcpu->kthread_running); in cpu_map_kthread_run()
318 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
328 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
331 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
346 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
371 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); in cpu_map_kthread_run()
398 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run()
405 put_cpu_map_entry(rcpu); in cpu_map_kthread_run()
409 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, in __cpu_map_load_bpf_program() argument
424 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
425 rcpu->prog = prog; in __cpu_map_load_bpf_program()
436 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_alloc() local
442 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
443 if (!rcpu) in __cpu_map_entry_alloc()
447 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
449 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
453 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
454 bq->obj = rcpu; in __cpu_map_entry_alloc()
458 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
460 if (!rcpu->queue) in __cpu_map_entry_alloc()
463 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
467 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
468 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
469 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
471 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) in __cpu_map_entry_alloc()
475 init_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
476 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
479 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
482 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ in __cpu_map_entry_alloc()
483 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ in __cpu_map_entry_alloc()
486 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
487 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
493 wait_for_completion(&rcpu->kthread_running); in __cpu_map_entry_alloc()
495 return rcpu; in __cpu_map_entry_alloc()
498 if (rcpu->prog) in __cpu_map_entry_alloc()
499 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
501 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
503 kfree(rcpu->queue); in __cpu_map_entry_alloc()
505 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
507 kfree(rcpu); in __cpu_map_entry_alloc()
513 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_free() local
520 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); in __cpu_map_entry_free()
522 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
524 put_cpu_map_entry(rcpu); in __cpu_map_entry_free()
547 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) in __cpu_map_entry_replace() argument
551 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace()
577 struct bpf_cpu_map_entry *rcpu; in cpu_map_update_elem() local
597 rcpu = NULL; /* Same as deleting */ in cpu_map_update_elem()
600 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); in cpu_map_update_elem()
601 if (!rcpu) in cpu_map_update_elem()
603 rcpu->cmap = cmap; in cpu_map_update_elem()
606 __cpu_map_entry_replace(cmap, key_cpu, rcpu); in cpu_map_update_elem()
631 struct bpf_cpu_map_entry *rcpu; in cpu_map_free() local
633 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free()
634 if (!rcpu) in cpu_map_free()
651 struct bpf_cpu_map_entry *rcpu; in __cpu_map_lookup_elem() local
656 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
658 return rcpu; in __cpu_map_lookup_elem()
663 struct bpf_cpu_map_entry *rcpu = in cpu_map_lookup_elem() local
666 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
708 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue() local
710 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
717 q = rcpu->queue; in bq_flush_to_queue()
737 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
743 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) in bq_enqueue() argument
746 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
766 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, in cpu_map_enqueue() argument
772 bq_enqueue(rcpu, xdpf); in cpu_map_enqueue()
776 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, in cpu_map_generic_redirect() argument
785 ret = ptr_ring_produce(rcpu->queue, skb); in cpu_map_generic_redirect()
789 wake_up_process(rcpu->kthread); in cpu_map_generic_redirect()
791 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()