1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3 */
4 #include "percpu_freelist.h"
5
pcpu_freelist_init(struct pcpu_freelist * s)6 int pcpu_freelist_init(struct pcpu_freelist *s)
7 {
8 int cpu;
9
10 s->freelist = alloc_percpu(struct pcpu_freelist_head);
11 if (!s->freelist)
12 return -ENOMEM;
13
14 for_each_possible_cpu(cpu) {
15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
16
17 raw_spin_lock_init(&head->lock);
18 head->first = NULL;
19 }
20 raw_spin_lock_init(&s->extralist.lock);
21 s->extralist.first = NULL;
22 return 0;
23 }
24
pcpu_freelist_destroy(struct pcpu_freelist * s)25 void pcpu_freelist_destroy(struct pcpu_freelist *s)
26 {
27 free_percpu(s->freelist);
28 }
29
pcpu_freelist_push_node(struct pcpu_freelist_head * head,struct pcpu_freelist_node * node)30 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
31 struct pcpu_freelist_node *node)
32 {
33 node->next = head->first;
34 head->first = node;
35 }
36
___pcpu_freelist_push(struct pcpu_freelist_head * head,struct pcpu_freelist_node * node)37 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
38 struct pcpu_freelist_node *node)
39 {
40 raw_spin_lock(&head->lock);
41 pcpu_freelist_push_node(head, node);
42 raw_spin_unlock(&head->lock);
43 }
44
pcpu_freelist_try_push_extra(struct pcpu_freelist * s,struct pcpu_freelist_node * node)45 static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
46 struct pcpu_freelist_node *node)
47 {
48 if (!raw_spin_trylock(&s->extralist.lock))
49 return false;
50
51 pcpu_freelist_push_node(&s->extralist, node);
52 raw_spin_unlock(&s->extralist.lock);
53 return true;
54 }
55
___pcpu_freelist_push_nmi(struct pcpu_freelist * s,struct pcpu_freelist_node * node)56 static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
57 struct pcpu_freelist_node *node)
58 {
59 int cpu, orig_cpu;
60
61 orig_cpu = cpu = raw_smp_processor_id();
62 while (1) {
63 struct pcpu_freelist_head *head;
64
65 head = per_cpu_ptr(s->freelist, cpu);
66 if (raw_spin_trylock(&head->lock)) {
67 pcpu_freelist_push_node(head, node);
68 raw_spin_unlock(&head->lock);
69 return;
70 }
71 cpu = cpumask_next(cpu, cpu_possible_mask);
72 if (cpu >= nr_cpu_ids)
73 cpu = 0;
74
75 /* cannot lock any per cpu lock, try extralist */
76 if (cpu == orig_cpu &&
77 pcpu_freelist_try_push_extra(s, node))
78 return;
79 }
80 }
81
__pcpu_freelist_push(struct pcpu_freelist * s,struct pcpu_freelist_node * node)82 void __pcpu_freelist_push(struct pcpu_freelist *s,
83 struct pcpu_freelist_node *node)
84 {
85 if (in_nmi())
86 ___pcpu_freelist_push_nmi(s, node);
87 else
88 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
89 }
90
pcpu_freelist_push(struct pcpu_freelist * s,struct pcpu_freelist_node * node)91 void pcpu_freelist_push(struct pcpu_freelist *s,
92 struct pcpu_freelist_node *node)
93 {
94 unsigned long flags;
95
96 local_irq_save(flags);
97 __pcpu_freelist_push(s, node);
98 local_irq_restore(flags);
99 }
100
pcpu_freelist_populate(struct pcpu_freelist * s,void * buf,u32 elem_size,u32 nr_elems)101 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
102 u32 nr_elems)
103 {
104 struct pcpu_freelist_head *head;
105 unsigned int cpu, cpu_idx, i, j, n, m;
106
107 n = nr_elems / num_possible_cpus();
108 m = nr_elems % num_possible_cpus();
109
110 cpu_idx = 0;
111 for_each_possible_cpu(cpu) {
112 head = per_cpu_ptr(s->freelist, cpu);
113 j = n + (cpu_idx < m ? 1 : 0);
114 for (i = 0; i < j; i++) {
115 /* No locking required as this is not visible yet. */
116 pcpu_freelist_push_node(head, buf);
117 buf += elem_size;
118 }
119 cpu_idx++;
120 }
121 }
122
___pcpu_freelist_pop(struct pcpu_freelist * s)123 static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
124 {
125 struct pcpu_freelist_head *head;
126 struct pcpu_freelist_node *node;
127 int orig_cpu, cpu;
128
129 orig_cpu = cpu = raw_smp_processor_id();
130 while (1) {
131 head = per_cpu_ptr(s->freelist, cpu);
132 raw_spin_lock(&head->lock);
133 node = head->first;
134 if (node) {
135 head->first = node->next;
136 raw_spin_unlock(&head->lock);
137 return node;
138 }
139 raw_spin_unlock(&head->lock);
140 cpu = cpumask_next(cpu, cpu_possible_mask);
141 if (cpu >= nr_cpu_ids)
142 cpu = 0;
143 if (cpu == orig_cpu)
144 break;
145 }
146
147 /* per cpu lists are all empty, try extralist */
148 raw_spin_lock(&s->extralist.lock);
149 node = s->extralist.first;
150 if (node)
151 s->extralist.first = node->next;
152 raw_spin_unlock(&s->extralist.lock);
153 return node;
154 }
155
156 static struct pcpu_freelist_node *
___pcpu_freelist_pop_nmi(struct pcpu_freelist * s)157 ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
158 {
159 struct pcpu_freelist_head *head;
160 struct pcpu_freelist_node *node;
161 int orig_cpu, cpu;
162
163 orig_cpu = cpu = raw_smp_processor_id();
164 while (1) {
165 head = per_cpu_ptr(s->freelist, cpu);
166 if (raw_spin_trylock(&head->lock)) {
167 node = head->first;
168 if (node) {
169 head->first = node->next;
170 raw_spin_unlock(&head->lock);
171 return node;
172 }
173 raw_spin_unlock(&head->lock);
174 }
175 cpu = cpumask_next(cpu, cpu_possible_mask);
176 if (cpu >= nr_cpu_ids)
177 cpu = 0;
178 if (cpu == orig_cpu)
179 break;
180 }
181
182 /* cannot pop from per cpu lists, try extralist */
183 if (!raw_spin_trylock(&s->extralist.lock))
184 return NULL;
185 node = s->extralist.first;
186 if (node)
187 s->extralist.first = node->next;
188 raw_spin_unlock(&s->extralist.lock);
189 return node;
190 }
191
__pcpu_freelist_pop(struct pcpu_freelist * s)192 struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
193 {
194 if (in_nmi())
195 return ___pcpu_freelist_pop_nmi(s);
196 return ___pcpu_freelist_pop(s);
197 }
198
pcpu_freelist_pop(struct pcpu_freelist * s)199 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
200 {
201 struct pcpu_freelist_node *ret;
202 unsigned long flags;
203
204 local_irq_save(flags);
205 ret = __pcpu_freelist_pop(s);
206 local_irq_restore(flags);
207 return ret;
208 }
209