1 /*
2 * Percpu IDA library
3 *
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17 #include <linux/bitmap.h>
18 #include <linux/bitops.h>
19 #include <linux/bug.h>
20 #include <linux/err.h>
21 #include <linux/export.h>
22 #include <linux/hardirq.h>
23 #include <linux/idr.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/percpu.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/spinlock.h>
31 #include <linux/percpu_ida.h>
32
33 struct percpu_ida_cpu {
34 /*
35 * Even though this is percpu, we need a lock for tag stealing by remote
36 * CPUs:
37 */
38 spinlock_t lock;
39
40 /* nr_free/freelist form a stack of free IDs */
41 unsigned nr_free;
42 unsigned freelist[];
43 };
44
move_tags(unsigned * dst,unsigned * dst_nr,unsigned * src,unsigned * src_nr,unsigned nr)45 static inline void move_tags(unsigned *dst, unsigned *dst_nr,
46 unsigned *src, unsigned *src_nr,
47 unsigned nr)
48 {
49 *src_nr -= nr;
50 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
51 *dst_nr += nr;
52 }
53
54 /*
55 * Try to steal tags from a remote cpu's percpu freelist.
56 *
57 * We first check how many percpu freelists have tags
58 *
59 * Then we iterate through the cpus until we find some tags - we don't attempt
60 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
61 * minimum.
62 */
steal_tags(struct percpu_ida * pool,struct percpu_ida_cpu * tags)63 static inline void steal_tags(struct percpu_ida *pool,
64 struct percpu_ida_cpu *tags)
65 {
66 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
67 struct percpu_ida_cpu *remote;
68
69 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
70 cpus_have_tags; cpus_have_tags--) {
71 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
72
73 if (cpu >= nr_cpu_ids) {
74 cpu = cpumask_first(&pool->cpus_have_tags);
75 if (cpu >= nr_cpu_ids)
76 BUG();
77 }
78
79 pool->cpu_last_stolen = cpu;
80 remote = per_cpu_ptr(pool->tag_cpu, cpu);
81
82 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
83
84 if (remote == tags)
85 continue;
86
87 spin_lock(&remote->lock);
88
89 if (remote->nr_free) {
90 memcpy(tags->freelist,
91 remote->freelist,
92 sizeof(unsigned) * remote->nr_free);
93
94 tags->nr_free = remote->nr_free;
95 remote->nr_free = 0;
96 }
97
98 spin_unlock(&remote->lock);
99
100 if (tags->nr_free)
101 break;
102 }
103 }
104
105 /*
106 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
107 * our percpu freelist:
108 */
alloc_global_tags(struct percpu_ida * pool,struct percpu_ida_cpu * tags)109 static inline void alloc_global_tags(struct percpu_ida *pool,
110 struct percpu_ida_cpu *tags)
111 {
112 move_tags(tags->freelist, &tags->nr_free,
113 pool->freelist, &pool->nr_free,
114 min(pool->nr_free, pool->percpu_batch_size));
115 }
116
alloc_local_tag(struct percpu_ida_cpu * tags)117 static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
118 {
119 int tag = -ENOSPC;
120
121 spin_lock(&tags->lock);
122 if (tags->nr_free)
123 tag = tags->freelist[--tags->nr_free];
124 spin_unlock(&tags->lock);
125
126 return tag;
127 }
128
129 /**
130 * percpu_ida_alloc - allocate a tag
131 * @pool: pool to allocate from
132 * @state: task state for prepare_to_wait
133 *
134 * Returns a tag - an integer in the range [0..nr_tags) (passed to
135 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
136 *
137 * Safe to be called from interrupt context (assuming it isn't passed
138 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
139 *
140 * @gfp indicates whether or not to wait until a free id is available (it's not
141 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
142 * however long it takes until another thread frees an id (same semantics as a
143 * mempool).
144 *
145 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
146 */
percpu_ida_alloc(struct percpu_ida * pool,int state)147 int percpu_ida_alloc(struct percpu_ida *pool, int state)
148 {
149 DEFINE_WAIT(wait);
150 struct percpu_ida_cpu *tags;
151 unsigned long flags;
152 int tag;
153
154 local_irq_save(flags);
155 tags = this_cpu_ptr(pool->tag_cpu);
156
157 /* Fastpath */
158 tag = alloc_local_tag(tags);
159 if (likely(tag >= 0)) {
160 local_irq_restore(flags);
161 return tag;
162 }
163
164 while (1) {
165 spin_lock(&pool->lock);
166
167 /*
168 * prepare_to_wait() must come before steal_tags(), in case
169 * percpu_ida_free() on another cpu flips a bit in
170 * cpus_have_tags
171 *
172 * global lock held and irqs disabled, don't need percpu lock
173 */
174 if (state != TASK_RUNNING)
175 prepare_to_wait(&pool->wait, &wait, state);
176
177 if (!tags->nr_free)
178 alloc_global_tags(pool, tags);
179 if (!tags->nr_free)
180 steal_tags(pool, tags);
181
182 if (tags->nr_free) {
183 tag = tags->freelist[--tags->nr_free];
184 if (tags->nr_free)
185 cpumask_set_cpu(smp_processor_id(),
186 &pool->cpus_have_tags);
187 }
188
189 spin_unlock(&pool->lock);
190 local_irq_restore(flags);
191
192 if (tag >= 0 || state == TASK_RUNNING)
193 break;
194
195 if (signal_pending_state(state, current)) {
196 tag = -ERESTARTSYS;
197 break;
198 }
199
200 schedule();
201
202 local_irq_save(flags);
203 tags = this_cpu_ptr(pool->tag_cpu);
204 }
205 if (state != TASK_RUNNING)
206 finish_wait(&pool->wait, &wait);
207
208 return tag;
209 }
210 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
211
212 /**
213 * percpu_ida_free - free a tag
214 * @pool: pool @tag was allocated from
215 * @tag: a tag previously allocated with percpu_ida_alloc()
216 *
217 * Safe to be called from interrupt context.
218 */
percpu_ida_free(struct percpu_ida * pool,unsigned tag)219 void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
220 {
221 struct percpu_ida_cpu *tags;
222 unsigned long flags;
223 unsigned nr_free;
224
225 BUG_ON(tag >= pool->nr_tags);
226
227 local_irq_save(flags);
228 tags = this_cpu_ptr(pool->tag_cpu);
229
230 spin_lock(&tags->lock);
231 tags->freelist[tags->nr_free++] = tag;
232
233 nr_free = tags->nr_free;
234 spin_unlock(&tags->lock);
235
236 if (nr_free == 1) {
237 cpumask_set_cpu(smp_processor_id(),
238 &pool->cpus_have_tags);
239 wake_up(&pool->wait);
240 }
241
242 if (nr_free == pool->percpu_max_size) {
243 spin_lock(&pool->lock);
244
245 /*
246 * Global lock held and irqs disabled, don't need percpu
247 * lock
248 */
249 if (tags->nr_free == pool->percpu_max_size) {
250 move_tags(pool->freelist, &pool->nr_free,
251 tags->freelist, &tags->nr_free,
252 pool->percpu_batch_size);
253
254 wake_up(&pool->wait);
255 }
256 spin_unlock(&pool->lock);
257 }
258
259 local_irq_restore(flags);
260 }
261 EXPORT_SYMBOL_GPL(percpu_ida_free);
262
263 /**
264 * percpu_ida_destroy - release a tag pool's resources
265 * @pool: pool to free
266 *
267 * Frees the resources allocated by percpu_ida_init().
268 */
percpu_ida_destroy(struct percpu_ida * pool)269 void percpu_ida_destroy(struct percpu_ida *pool)
270 {
271 free_percpu(pool->tag_cpu);
272 free_pages((unsigned long) pool->freelist,
273 get_order(pool->nr_tags * sizeof(unsigned)));
274 }
275 EXPORT_SYMBOL_GPL(percpu_ida_destroy);
276
277 /**
278 * percpu_ida_init - initialize a percpu tag pool
279 * @pool: pool to initialize
280 * @nr_tags: number of tags that will be available for allocation
281 *
282 * Initializes @pool so that it can be used to allocate tags - integers in the
283 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
284 * preallocated array of tag structures.
285 *
286 * Allocation is percpu, but sharding is limited by nr_tags - for best
287 * performance, the workload should not span more cpus than nr_tags / 128.
288 */
__percpu_ida_init(struct percpu_ida * pool,unsigned long nr_tags,unsigned long max_size,unsigned long batch_size)289 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
290 unsigned long max_size, unsigned long batch_size)
291 {
292 unsigned i, cpu, order;
293
294 memset(pool, 0, sizeof(*pool));
295
296 init_waitqueue_head(&pool->wait);
297 spin_lock_init(&pool->lock);
298 pool->nr_tags = nr_tags;
299 pool->percpu_max_size = max_size;
300 pool->percpu_batch_size = batch_size;
301
302 /* Guard against overflow */
303 if (nr_tags > (unsigned) INT_MAX + 1) {
304 pr_err("percpu_ida_init(): nr_tags too large\n");
305 return -EINVAL;
306 }
307
308 order = get_order(nr_tags * sizeof(unsigned));
309 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
310 if (!pool->freelist)
311 return -ENOMEM;
312
313 for (i = 0; i < nr_tags; i++)
314 pool->freelist[i] = i;
315
316 pool->nr_free = nr_tags;
317
318 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
319 pool->percpu_max_size * sizeof(unsigned),
320 sizeof(unsigned));
321 if (!pool->tag_cpu)
322 goto err;
323
324 for_each_possible_cpu(cpu)
325 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
326
327 return 0;
328 err:
329 percpu_ida_destroy(pool);
330 return -ENOMEM;
331 }
332 EXPORT_SYMBOL_GPL(__percpu_ida_init);
333
334 /**
335 * percpu_ida_for_each_free - iterate free ids of a pool
336 * @pool: pool to iterate
337 * @fn: interate callback function
338 * @data: parameter for @fn
339 *
340 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
341 * ids might be missed, some might be iterated duplicated, and some might
342 * be iterated and not free soon.
343 */
percpu_ida_for_each_free(struct percpu_ida * pool,percpu_ida_cb fn,void * data)344 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
345 void *data)
346 {
347 unsigned long flags;
348 struct percpu_ida_cpu *remote;
349 unsigned cpu, i, err = 0;
350
351 local_irq_save(flags);
352 for_each_possible_cpu(cpu) {
353 remote = per_cpu_ptr(pool->tag_cpu, cpu);
354 spin_lock(&remote->lock);
355 for (i = 0; i < remote->nr_free; i++) {
356 err = fn(remote->freelist[i], data);
357 if (err)
358 break;
359 }
360 spin_unlock(&remote->lock);
361 if (err)
362 goto out;
363 }
364
365 spin_lock(&pool->lock);
366 for (i = 0; i < pool->nr_free; i++) {
367 err = fn(pool->freelist[i], data);
368 if (err)
369 break;
370 }
371 spin_unlock(&pool->lock);
372 out:
373 local_irq_restore(flags);
374 return err;
375 }
376 EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
377
378 /**
379 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
380 * @pool: pool related
381 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
382 *
383 * Note: this just returns a snapshot of free tags number.
384 */
percpu_ida_free_tags(struct percpu_ida * pool,int cpu)385 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
386 {
387 struct percpu_ida_cpu *remote;
388 if (cpu == nr_cpu_ids)
389 return pool->nr_free;
390 remote = per_cpu_ptr(pool->tag_cpu, cpu);
391 return remote->nr_free;
392 }
393 EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
394