• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Percpu IDA library
3  *
4  * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; either version 2, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  */
16 
17 #include <linux/mm.h>
18 #include <linux/bitmap.h>
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/err.h>
22 #include <linux/export.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/sched/signal.h>
27 #include <linux/string.h>
28 #include <linux/spinlock.h>
29 #include <linux/percpu_ida.h>
30 
31 struct percpu_ida_cpu {
32 	/*
33 	 * Even though this is percpu, we need a lock for tag stealing by remote
34 	 * CPUs:
35 	 */
36 	spinlock_t			lock;
37 
38 	/* nr_free/freelist form a stack of free IDs */
39 	unsigned			nr_free;
40 	unsigned			freelist[];
41 };
42 
move_tags(unsigned * dst,unsigned * dst_nr,unsigned * src,unsigned * src_nr,unsigned nr)43 static inline void move_tags(unsigned *dst, unsigned *dst_nr,
44 			     unsigned *src, unsigned *src_nr,
45 			     unsigned nr)
46 {
47 	*src_nr -= nr;
48 	memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
49 	*dst_nr += nr;
50 }
51 
52 /*
53  * Try to steal tags from a remote cpu's percpu freelist.
54  *
55  * We first check how many percpu freelists have tags
56  *
57  * Then we iterate through the cpus until we find some tags - we don't attempt
58  * to find the "best" cpu to steal from, to keep cacheline bouncing to a
59  * minimum.
60  */
steal_tags(struct percpu_ida * pool,struct percpu_ida_cpu * tags)61 static inline void steal_tags(struct percpu_ida *pool,
62 			      struct percpu_ida_cpu *tags)
63 {
64 	unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
65 	struct percpu_ida_cpu *remote;
66 
67 	for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
68 	     cpus_have_tags; cpus_have_tags--) {
69 		cpu = cpumask_next(cpu, &pool->cpus_have_tags);
70 
71 		if (cpu >= nr_cpu_ids) {
72 			cpu = cpumask_first(&pool->cpus_have_tags);
73 			if (cpu >= nr_cpu_ids)
74 				BUG();
75 		}
76 
77 		pool->cpu_last_stolen = cpu;
78 		remote = per_cpu_ptr(pool->tag_cpu, cpu);
79 
80 		cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
81 
82 		if (remote == tags)
83 			continue;
84 
85 		spin_lock(&remote->lock);
86 
87 		if (remote->nr_free) {
88 			memcpy(tags->freelist,
89 			       remote->freelist,
90 			       sizeof(unsigned) * remote->nr_free);
91 
92 			tags->nr_free = remote->nr_free;
93 			remote->nr_free = 0;
94 		}
95 
96 		spin_unlock(&remote->lock);
97 
98 		if (tags->nr_free)
99 			break;
100 	}
101 }
102 
103 /*
104  * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
105  * our percpu freelist:
106  */
alloc_global_tags(struct percpu_ida * pool,struct percpu_ida_cpu * tags)107 static inline void alloc_global_tags(struct percpu_ida *pool,
108 				     struct percpu_ida_cpu *tags)
109 {
110 	move_tags(tags->freelist, &tags->nr_free,
111 		  pool->freelist, &pool->nr_free,
112 		  min(pool->nr_free, pool->percpu_batch_size));
113 }
114 
alloc_local_tag(struct percpu_ida_cpu * tags)115 static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
116 {
117 	int tag = -ENOSPC;
118 
119 	spin_lock(&tags->lock);
120 	if (tags->nr_free)
121 		tag = tags->freelist[--tags->nr_free];
122 	spin_unlock(&tags->lock);
123 
124 	return tag;
125 }
126 
127 /**
128  * percpu_ida_alloc - allocate a tag
129  * @pool: pool to allocate from
130  * @state: task state for prepare_to_wait
131  *
132  * Returns a tag - an integer in the range [0..nr_tags) (passed to
133  * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
134  *
135  * Safe to be called from interrupt context (assuming it isn't passed
136  * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
137  *
138  * @gfp indicates whether or not to wait until a free id is available (it's not
139  * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
140  * however long it takes until another thread frees an id (same semantics as a
141  * mempool).
142  *
143  * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
144  */
percpu_ida_alloc(struct percpu_ida * pool,int state)145 int percpu_ida_alloc(struct percpu_ida *pool, int state)
146 {
147 	DEFINE_WAIT(wait);
148 	struct percpu_ida_cpu *tags;
149 	unsigned long flags;
150 	int tag;
151 
152 	local_irq_save(flags);
153 	tags = this_cpu_ptr(pool->tag_cpu);
154 
155 	/* Fastpath */
156 	tag = alloc_local_tag(tags);
157 	if (likely(tag >= 0)) {
158 		local_irq_restore(flags);
159 		return tag;
160 	}
161 
162 	while (1) {
163 		spin_lock(&pool->lock);
164 
165 		/*
166 		 * prepare_to_wait() must come before steal_tags(), in case
167 		 * percpu_ida_free() on another cpu flips a bit in
168 		 * cpus_have_tags
169 		 *
170 		 * global lock held and irqs disabled, don't need percpu lock
171 		 */
172 		if (state != TASK_RUNNING)
173 			prepare_to_wait(&pool->wait, &wait, state);
174 
175 		if (!tags->nr_free)
176 			alloc_global_tags(pool, tags);
177 		if (!tags->nr_free)
178 			steal_tags(pool, tags);
179 
180 		if (tags->nr_free) {
181 			tag = tags->freelist[--tags->nr_free];
182 			if (tags->nr_free)
183 				cpumask_set_cpu(smp_processor_id(),
184 						&pool->cpus_have_tags);
185 		}
186 
187 		spin_unlock(&pool->lock);
188 		local_irq_restore(flags);
189 
190 		if (tag >= 0 || state == TASK_RUNNING)
191 			break;
192 
193 		if (signal_pending_state(state, current)) {
194 			tag = -ERESTARTSYS;
195 			break;
196 		}
197 
198 		schedule();
199 
200 		local_irq_save(flags);
201 		tags = this_cpu_ptr(pool->tag_cpu);
202 	}
203 	if (state != TASK_RUNNING)
204 		finish_wait(&pool->wait, &wait);
205 
206 	return tag;
207 }
208 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
209 
210 /**
211  * percpu_ida_free - free a tag
212  * @pool: pool @tag was allocated from
213  * @tag: a tag previously allocated with percpu_ida_alloc()
214  *
215  * Safe to be called from interrupt context.
216  */
percpu_ida_free(struct percpu_ida * pool,unsigned tag)217 void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
218 {
219 	struct percpu_ida_cpu *tags;
220 	unsigned long flags;
221 	unsigned nr_free;
222 
223 	BUG_ON(tag >= pool->nr_tags);
224 
225 	local_irq_save(flags);
226 	tags = this_cpu_ptr(pool->tag_cpu);
227 
228 	spin_lock(&tags->lock);
229 	tags->freelist[tags->nr_free++] = tag;
230 
231 	nr_free = tags->nr_free;
232 	spin_unlock(&tags->lock);
233 
234 	if (nr_free == 1) {
235 		cpumask_set_cpu(smp_processor_id(),
236 				&pool->cpus_have_tags);
237 		wake_up(&pool->wait);
238 	}
239 
240 	if (nr_free == pool->percpu_max_size) {
241 		spin_lock(&pool->lock);
242 
243 		/*
244 		 * Global lock held and irqs disabled, don't need percpu
245 		 * lock
246 		 */
247 		if (tags->nr_free == pool->percpu_max_size) {
248 			move_tags(pool->freelist, &pool->nr_free,
249 				  tags->freelist, &tags->nr_free,
250 				  pool->percpu_batch_size);
251 
252 			wake_up(&pool->wait);
253 		}
254 		spin_unlock(&pool->lock);
255 	}
256 
257 	local_irq_restore(flags);
258 }
259 EXPORT_SYMBOL_GPL(percpu_ida_free);
260 
261 /**
262  * percpu_ida_destroy - release a tag pool's resources
263  * @pool: pool to free
264  *
265  * Frees the resources allocated by percpu_ida_init().
266  */
percpu_ida_destroy(struct percpu_ida * pool)267 void percpu_ida_destroy(struct percpu_ida *pool)
268 {
269 	free_percpu(pool->tag_cpu);
270 	free_pages((unsigned long) pool->freelist,
271 		   get_order(pool->nr_tags * sizeof(unsigned)));
272 }
273 EXPORT_SYMBOL_GPL(percpu_ida_destroy);
274 
275 /**
276  * percpu_ida_init - initialize a percpu tag pool
277  * @pool: pool to initialize
278  * @nr_tags: number of tags that will be available for allocation
279  *
280  * Initializes @pool so that it can be used to allocate tags - integers in the
281  * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
282  * preallocated array of tag structures.
283  *
284  * Allocation is percpu, but sharding is limited by nr_tags - for best
285  * performance, the workload should not span more cpus than nr_tags / 128.
286  */
__percpu_ida_init(struct percpu_ida * pool,unsigned long nr_tags,unsigned long max_size,unsigned long batch_size)287 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
288 	unsigned long max_size, unsigned long batch_size)
289 {
290 	unsigned i, cpu, order;
291 
292 	memset(pool, 0, sizeof(*pool));
293 
294 	init_waitqueue_head(&pool->wait);
295 	spin_lock_init(&pool->lock);
296 	pool->nr_tags = nr_tags;
297 	pool->percpu_max_size = max_size;
298 	pool->percpu_batch_size = batch_size;
299 
300 	/* Guard against overflow */
301 	if (nr_tags > (unsigned) INT_MAX + 1) {
302 		pr_err("percpu_ida_init(): nr_tags too large\n");
303 		return -EINVAL;
304 	}
305 
306 	order = get_order(nr_tags * sizeof(unsigned));
307 	pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
308 	if (!pool->freelist)
309 		return -ENOMEM;
310 
311 	for (i = 0; i < nr_tags; i++)
312 		pool->freelist[i] = i;
313 
314 	pool->nr_free = nr_tags;
315 
316 	pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
317 				       pool->percpu_max_size * sizeof(unsigned),
318 				       sizeof(unsigned));
319 	if (!pool->tag_cpu)
320 		goto err;
321 
322 	for_each_possible_cpu(cpu)
323 		spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
324 
325 	return 0;
326 err:
327 	percpu_ida_destroy(pool);
328 	return -ENOMEM;
329 }
330 EXPORT_SYMBOL_GPL(__percpu_ida_init);
331 
332 /**
333  * percpu_ida_for_each_free - iterate free ids of a pool
334  * @pool: pool to iterate
335  * @fn: interate callback function
336  * @data: parameter for @fn
337  *
338  * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
339  * ids might be missed, some might be iterated duplicated, and some might
340  * be iterated and not free soon.
341  */
percpu_ida_for_each_free(struct percpu_ida * pool,percpu_ida_cb fn,void * data)342 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
343 	void *data)
344 {
345 	unsigned long flags;
346 	struct percpu_ida_cpu *remote;
347 	unsigned cpu, i, err = 0;
348 
349 	local_irq_save(flags);
350 	for_each_possible_cpu(cpu) {
351 		remote = per_cpu_ptr(pool->tag_cpu, cpu);
352 		spin_lock(&remote->lock);
353 		for (i = 0; i < remote->nr_free; i++) {
354 			err = fn(remote->freelist[i], data);
355 			if (err)
356 				break;
357 		}
358 		spin_unlock(&remote->lock);
359 		if (err)
360 			goto out;
361 	}
362 
363 	spin_lock(&pool->lock);
364 	for (i = 0; i < pool->nr_free; i++) {
365 		err = fn(pool->freelist[i], data);
366 		if (err)
367 			break;
368 	}
369 	spin_unlock(&pool->lock);
370 out:
371 	local_irq_restore(flags);
372 	return err;
373 }
374 EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
375 
376 /**
377  * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
378  * @pool: pool related
379  * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
380  *
381  * Note: this just returns a snapshot of free tags number.
382  */
percpu_ida_free_tags(struct percpu_ida * pool,int cpu)383 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
384 {
385 	struct percpu_ida_cpu *remote;
386 	if (cpu == nr_cpu_ids)
387 		return pool->nr_free;
388 	remote = per_cpu_ptr(pool->tag_cpu, cpu);
389 	return remote->nr_free;
390 }
391 EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
392