• Home
  • Raw
  • Download

Lines Matching refs:c

124 static void *__alloc(struct bpf_mem_cache *c, int node)  in __alloc()  argument
133 if (c->percpu_size) { in __alloc()
134 void **obj = kmalloc_node(c->percpu_size, flags, node); in __alloc()
135 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); in __alloc()
146 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); in __alloc()
149 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) in get_memcg() argument
152 if (c->objcg) in get_memcg()
153 return get_mem_cgroup_from_objcg(c->objcg); in get_memcg()
164 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) in alloc_bulk() argument
171 memcg = get_memcg(c); in alloc_bulk()
174 obj = __alloc(c, node); in alloc_bulk()
190 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in alloc_bulk()
191 __llist_add(obj, &c->free_llist); in alloc_bulk()
192 c->free_cnt++; in alloc_bulk()
193 local_dec(&c->active); in alloc_bulk()
201 static void free_one(struct bpf_mem_cache *c, void *obj) in free_one() argument
203 if (c->percpu_size) { in free_one()
214 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); in __free_rcu() local
215 struct llist_node *llnode = llist_del_all(&c->waiting_for_gp); in __free_rcu()
219 free_one(c, pos); in __free_rcu()
220 atomic_set(&c->call_rcu_in_progress, 0); in __free_rcu()
225 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); in __free_rcu_tasks_trace() local
227 call_rcu(&c->rcu, __free_rcu); in __free_rcu_tasks_trace()
230 static void enque_to_free(struct bpf_mem_cache *c, void *obj) in enque_to_free() argument
237 __llist_add(llnode, &c->free_by_rcu); in enque_to_free()
240 static void do_call_rcu(struct bpf_mem_cache *c) in do_call_rcu() argument
244 if (atomic_xchg(&c->call_rcu_in_progress, 1)) in do_call_rcu()
247 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in do_call_rcu()
248 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) in do_call_rcu()
254 __llist_add(llnode, &c->waiting_for_gp); in do_call_rcu()
259 call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); in do_call_rcu()
262 static void free_bulk(struct bpf_mem_cache *c) in free_bulk() argument
271 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in free_bulk()
272 llnode = __llist_del_first(&c->free_llist); in free_bulk()
274 cnt = --c->free_cnt; in free_bulk()
277 local_dec(&c->active); in free_bulk()
281 enque_to_free(c, llnode); in free_bulk()
282 } while (cnt > (c->high_watermark + c->low_watermark) / 2); in free_bulk()
285 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) in free_bulk()
286 enque_to_free(c, llnode); in free_bulk()
287 do_call_rcu(c); in free_bulk()
292 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() local
296 cnt = c->free_cnt; in bpf_mem_refill()
297 if (cnt < c->low_watermark) in bpf_mem_refill()
301 alloc_bulk(c, c->batch, NUMA_NO_NODE); in bpf_mem_refill()
302 else if (cnt > c->high_watermark) in bpf_mem_refill()
303 free_bulk(c); in bpf_mem_refill()
306 static void notrace irq_work_raise(struct bpf_mem_cache *c) in irq_work_raise() argument
308 irq_work_queue(&c->refill_work); in irq_work_raise()
326 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) in prefill_mem_cache() argument
328 init_irq_work(&c->refill_work, bpf_mem_refill); in prefill_mem_cache()
329 if (c->unit_size <= 256) { in prefill_mem_cache()
330 c->low_watermark = 32; in prefill_mem_cache()
331 c->high_watermark = 96; in prefill_mem_cache()
338 c->low_watermark = max(32 * 256 / c->unit_size, 1); in prefill_mem_cache()
339 c->high_watermark = max(96 * 256 / c->unit_size, 3); in prefill_mem_cache()
341 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); in prefill_mem_cache()
347 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); in prefill_mem_cache()
361 struct bpf_mem_cache *c, __percpu *pc; in bpf_mem_alloc_init() local
381 c = per_cpu_ptr(pc, cpu); in bpf_mem_alloc_init()
382 c->unit_size = unit_size; in bpf_mem_alloc_init()
383 c->objcg = objcg; in bpf_mem_alloc_init()
384 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
385 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
404 c = &cc->cache[i]; in bpf_mem_alloc_init()
405 c->unit_size = sizes[i]; in bpf_mem_alloc_init()
406 c->objcg = objcg; in bpf_mem_alloc_init()
407 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
414 static void drain_mem_cache(struct bpf_mem_cache *c) in drain_mem_cache() argument
425 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) in drain_mem_cache()
426 free_one(c, llnode); in drain_mem_cache()
427 llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp)) in drain_mem_cache()
428 free_one(c, llnode); in drain_mem_cache()
429 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist)) in drain_mem_cache()
430 free_one(c, llnode); in drain_mem_cache()
431 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra)) in drain_mem_cache()
432 free_one(c, llnode); in drain_mem_cache()
492 struct bpf_mem_cache *c; in bpf_mem_alloc_destroy() local
498 c = per_cpu_ptr(ma->cache, cpu); in bpf_mem_alloc_destroy()
508 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
509 drain_mem_cache(c); in bpf_mem_alloc_destroy()
510 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
513 if (c->objcg) in bpf_mem_alloc_destroy()
514 obj_cgroup_put(c->objcg); in bpf_mem_alloc_destroy()
522 c = &cc->cache[i]; in bpf_mem_alloc_destroy()
523 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
524 drain_mem_cache(c); in bpf_mem_alloc_destroy()
525 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
528 if (c->objcg) in bpf_mem_alloc_destroy()
529 obj_cgroup_put(c->objcg); in bpf_mem_alloc_destroy()
537 static void notrace *unit_alloc(struct bpf_mem_cache *c) in unit_alloc() argument
554 if (local_inc_return(&c->active) == 1) { in unit_alloc()
555 llnode = __llist_del_first(&c->free_llist); in unit_alloc()
557 cnt = --c->free_cnt; in unit_alloc()
559 local_dec(&c->active); in unit_alloc()
564 if (cnt < c->low_watermark) in unit_alloc()
565 irq_work_raise(c); in unit_alloc()
573 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument
582 if (local_inc_return(&c->active) == 1) { in unit_free()
583 __llist_add(llnode, &c->free_llist); in unit_free()
584 cnt = ++c->free_cnt; in unit_free()
592 llist_add(llnode, &c->free_llist_extra); in unit_free()
594 local_dec(&c->active); in unit_free()
597 if (cnt > c->high_watermark) in unit_free()
599 irq_work_raise(c); in unit_free()