Lines Matching refs:l
52 static void bpf_lru_list_count_inc(struct bpf_lru_list *l, in bpf_lru_list_count_inc() argument
56 l->counts[type]++; in bpf_lru_list_count_inc()
59 static void bpf_lru_list_count_dec(struct bpf_lru_list *l, in bpf_lru_list_count_dec() argument
63 l->counts[type]--; in bpf_lru_list_count_dec()
66 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, in __bpf_lru_node_move_to_free() argument
77 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
78 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move_to_free()
80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
87 static void __bpf_lru_node_move_in(struct bpf_lru_list *l, in __bpf_lru_node_move_in() argument
95 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move_in()
98 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move_in()
105 static void __bpf_lru_node_move(struct bpf_lru_list *l, in __bpf_lru_node_move() argument
114 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move()
115 bpf_lru_list_count_inc(l, tgt_type); in __bpf_lru_node_move()
123 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move()
124 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move()
126 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move()
129 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l) in bpf_lru_list_inactive_low() argument
131 return l->counts[BPF_LRU_LIST_T_INACTIVE] < in bpf_lru_list_inactive_low()
132 l->counts[BPF_LRU_LIST_T_ACTIVE]; in bpf_lru_list_inactive_low()
145 struct bpf_lru_list *l) in __bpf_lru_list_rotate_active() argument
147 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active()
154 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_active()
156 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in __bpf_lru_list_rotate_active()
172 struct bpf_lru_list *l) in __bpf_lru_list_rotate_inactive() argument
174 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_rotate_inactive()
182 last = l->next_inactive_rotation->next; in __bpf_lru_list_rotate_inactive()
186 cur = l->next_inactive_rotation; in __bpf_lru_list_rotate_inactive()
196 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_inactive()
203 l->next_inactive_rotation = next; in __bpf_lru_list_rotate_inactive()
212 struct bpf_lru_list *l, in __bpf_lru_list_shrink_inactive() argument
217 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink_inactive()
224 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_shrink_inactive()
226 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink_inactive()
242 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() argument
244 if (bpf_lru_list_inactive_low(l)) in __bpf_lru_list_rotate()
245 __bpf_lru_list_rotate_active(lru, l); in __bpf_lru_list_rotate()
247 __bpf_lru_list_rotate_inactive(lru, l); in __bpf_lru_list_rotate()
261 struct bpf_lru_list *l, in __bpf_lru_list_shrink() argument
271 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, in __bpf_lru_list_shrink()
277 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE])) in __bpf_lru_list_shrink()
278 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink()
280 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_shrink()
285 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink()
295 static void __local_list_flush(struct bpf_lru_list *l, in __local_list_flush() argument
303 __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE); in __local_list_flush()
305 __bpf_lru_node_move_in(l, node, in __local_list_flush()
310 static void bpf_lru_list_push_free(struct bpf_lru_list *l, in bpf_lru_list_push_free() argument
318 raw_spin_lock_irqsave(&l->lock, flags); in bpf_lru_list_push_free()
319 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_lru_list_push_free()
320 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_lru_list_push_free()
326 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local() local
330 raw_spin_lock(&l->lock); in bpf_lru_list_pop_free_to_local()
332 __local_list_flush(l, loc_l); in bpf_lru_list_pop_free_to_local()
334 __bpf_lru_list_rotate(lru, l); in bpf_lru_list_pop_free_to_local()
336 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], in bpf_lru_list_pop_free_to_local()
338 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), in bpf_lru_list_pop_free_to_local()
345 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, in bpf_lru_list_pop_free_to_local()
349 raw_spin_unlock(&l->lock); in bpf_lru_list_pop_free_to_local()
409 struct bpf_lru_list *l; in bpf_percpu_lru_pop_free() local
413 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
415 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_pop_free()
417 __bpf_lru_list_rotate(lru, l); in bpf_percpu_lru_pop_free()
419 free_list = &l->lists[BPF_LRU_LIST_T_FREE]; in bpf_percpu_lru_pop_free()
421 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, in bpf_percpu_lru_pop_free()
428 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in bpf_percpu_lru_pop_free()
431 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_pop_free()
544 struct bpf_lru_list *l; in bpf_percpu_lru_push_free() local
547 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
549 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_push_free()
551 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_percpu_lru_push_free()
553 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_push_free()
568 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate() local
577 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_common_lru_populate()
588 struct bpf_lru_list *l; in bpf_percpu_lru_populate() local
597 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
603 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_percpu_lru_populate()
636 static void bpf_lru_list_init(struct bpf_lru_list *l) in bpf_lru_list_init() argument
641 INIT_LIST_HEAD(&l->lists[i]); in bpf_lru_list_init()
644 l->counts[i] = 0; in bpf_lru_list_init()
646 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in bpf_lru_list_init()
648 raw_spin_lock_init(&l->lock); in bpf_lru_list_init()
662 struct bpf_lru_list *l; in bpf_lru_init() local
664 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
665 bpf_lru_list_init(l); in bpf_lru_init()