Lines Matching refs:l
47 static void bpf_lru_list_count_inc(struct bpf_lru_list *l, in bpf_lru_list_count_inc() argument
51 l->counts[type]++; in bpf_lru_list_count_inc()
54 static void bpf_lru_list_count_dec(struct bpf_lru_list *l, in bpf_lru_list_count_dec() argument
58 l->counts[type]--; in bpf_lru_list_count_dec()
61 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, in __bpf_lru_node_move_to_free() argument
72 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
73 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move_to_free()
75 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
82 static void __bpf_lru_node_move_in(struct bpf_lru_list *l, in __bpf_lru_node_move_in() argument
90 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move_in()
93 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move_in()
100 static void __bpf_lru_node_move(struct bpf_lru_list *l, in __bpf_lru_node_move() argument
109 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move()
110 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move()
118 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move()
119 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move()
121 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move()
124 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l) in bpf_lru_list_inactive_low() argument
126 return l->counts[BPF_LRU_LIST_T_INACTIVE] < in bpf_lru_list_inactive_low()
127 l->counts[BPF_LRU_LIST_T_ACTIVE]; in bpf_lru_list_inactive_low()
140 struct bpf_lru_list *l) in __bpf_lru_list_rotate_active() argument
142 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active()
149 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_active()
151 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in __bpf_lru_list_rotate_active()
167 struct bpf_lru_list *l) in __bpf_lru_list_rotate_inactive() argument
169 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_rotate_inactive()
177 last = l->next_inactive_rotation->next; in __bpf_lru_list_rotate_inactive()
181 cur = l->next_inactive_rotation; in __bpf_lru_list_rotate_inactive()
191 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_inactive()
198 l->next_inactive_rotation = next; in __bpf_lru_list_rotate_inactive()
207 struct bpf_lru_list *l, in __bpf_lru_list_shrink_inactive() argument
212 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink_inactive()
219 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_shrink_inactive()
221 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink_inactive()
237 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() argument
239 if (bpf_lru_list_inactive_low(l)) in __bpf_lru_list_rotate()
240 __bpf_lru_list_rotate_active(lru, l); in __bpf_lru_list_rotate()
242 __bpf_lru_list_rotate_inactive(lru, l); in __bpf_lru_list_rotate()
256 struct bpf_lru_list *l, in __bpf_lru_list_shrink() argument
266 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, in __bpf_lru_list_shrink()
272 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE])) in __bpf_lru_list_shrink()
273 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink()
275 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_shrink()
280 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink()
290 static void __local_list_flush(struct bpf_lru_list *l, in __local_list_flush() argument
298 __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE); in __local_list_flush()
300 __bpf_lru_node_move_in(l, node, in __local_list_flush()
305 static void bpf_lru_list_push_free(struct bpf_lru_list *l, in bpf_lru_list_push_free() argument
313 raw_spin_lock_irqsave(&l->lock, flags); in bpf_lru_list_push_free()
314 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_lru_list_push_free()
315 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_lru_list_push_free()
321 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local() local
325 raw_spin_lock(&l->lock); in bpf_lru_list_pop_free_to_local()
327 __local_list_flush(l, loc_l); in bpf_lru_list_pop_free_to_local()
329 __bpf_lru_list_rotate(lru, l); in bpf_lru_list_pop_free_to_local()
331 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], in bpf_lru_list_pop_free_to_local()
333 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), in bpf_lru_list_pop_free_to_local()
340 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, in bpf_lru_list_pop_free_to_local()
344 raw_spin_unlock(&l->lock); in bpf_lru_list_pop_free_to_local()
404 struct bpf_lru_list *l; in bpf_percpu_lru_pop_free() local
408 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
410 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_pop_free()
412 __bpf_lru_list_rotate(lru, l); in bpf_percpu_lru_pop_free()
414 free_list = &l->lists[BPF_LRU_LIST_T_FREE]; in bpf_percpu_lru_pop_free()
416 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, in bpf_percpu_lru_pop_free()
423 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in bpf_percpu_lru_pop_free()
426 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_pop_free()
538 struct bpf_lru_list *l; in bpf_percpu_lru_push_free() local
541 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
543 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_push_free()
545 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_percpu_lru_push_free()
547 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_push_free()
562 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate() local
571 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_common_lru_populate()
582 struct bpf_lru_list *l; in bpf_percpu_lru_populate() local
591 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
597 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_percpu_lru_populate()
630 static void bpf_lru_list_init(struct bpf_lru_list *l) in bpf_lru_list_init() argument
635 INIT_LIST_HEAD(&l->lists[i]); in bpf_lru_list_init()
638 l->counts[i] = 0; in bpf_lru_list_init()
640 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in bpf_lru_list_init()
642 raw_spin_lock_init(&l->lock); in bpf_lru_list_init()
656 struct bpf_lru_list *l; in bpf_lru_init() local
658 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
659 bpf_lru_list_init(l); in bpf_lru_init()