1 /*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * Copyright (C) 2017 Facebook Inc.
8 * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
9 *
10 * This file is released under the GPLv2 license.
11 *
12 * The percpu allocator handles both static and dynamic areas. Percpu
13 * areas are allocated in chunks which are divided into units. There is
14 * a 1-to-1 mapping for units to possible cpus. These units are grouped
15 * based on NUMA properties of the machine.
16 *
17 * c0 c1 c2
18 * ------------------- ------------------- ------------
19 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
20 * ------------------- ...... ------------------- .... ------------
21 *
22 * Allocation is done by offsets into a unit's address space. Ie., an
23 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
24 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
25 * and even sparse. Access is handled by configuring percpu base
26 * registers according to the cpu to unit mappings and offsetting the
27 * base address using pcpu_unit_size.
28 *
29 * There is special consideration for the first chunk which must handle
30 * the static percpu variables in the kernel image as allocation services
31 * are not online yet. In short, the first chunk is structured like so:
32 *
33 * <Static | [Reserved] | Dynamic>
34 *
35 * The static data is copied from the original section managed by the
36 * linker. The reserved section, if non-zero, primarily manages static
37 * percpu variables from kernel modules. Finally, the dynamic section
38 * takes care of normal allocations.
39 *
40 * The allocator organizes chunks into lists according to free size and
41 * tries to allocate from the fullest chunk first. Each chunk is managed
42 * by a bitmap with metadata blocks. The allocation map is updated on
43 * every allocation and free to reflect the current state while the boundary
44 * map is only updated on allocation. Each metadata block contains
45 * information to help mitigate the need to iterate over large portions
46 * of the bitmap. The reverse mapping from page to chunk is stored in
47 * the page's index. Lastly, units are lazily backed and grow in unison.
48 *
49 * There is a unique conversion that goes on here between bytes and bits.
50 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
51 * tracks the number of pages it is responsible for in nr_pages. Helper
52 * functions are used to convert from between the bytes, bits, and blocks.
53 * All hints are managed in bits unless explicitly stated.
54 *
55 * To use this allocator, arch code should do the following:
56 *
57 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
58 * regular address to percpu pointer and back if they need to be
59 * different from the default
60 *
61 * - use pcpu_setup_first_chunk() during percpu area initialization to
62 * setup the first chunk containing the kernel static percpu area
63 */
64
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
67 #include <linux/bitmap.h>
68 #include <linux/bootmem.h>
69 #include <linux/err.h>
70 #include <linux/lcm.h>
71 #include <linux/list.h>
72 #include <linux/log2.h>
73 #include <linux/mm.h>
74 #include <linux/module.h>
75 #include <linux/mutex.h>
76 #include <linux/percpu.h>
77 #include <linux/pfn.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/vmalloc.h>
81 #include <linux/workqueue.h>
82 #include <linux/kmemleak.h>
83 #include <linux/sched.h>
84
85 #include <asm/cacheflush.h>
86 #include <asm/sections.h>
87 #include <asm/tlbflush.h>
88 #include <asm/io.h>
89
90 #define CREATE_TRACE_POINTS
91 #include <trace/events/percpu.h>
92
93 #include "percpu-internal.h"
94
95 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
96 #define PCPU_SLOT_BASE_SHIFT 5
97
98 #define PCPU_EMPTY_POP_PAGES_LOW 2
99 #define PCPU_EMPTY_POP_PAGES_HIGH 4
100
101 #ifdef CONFIG_SMP
102 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
103 #ifndef __addr_to_pcpu_ptr
104 #define __addr_to_pcpu_ptr(addr) \
105 (void __percpu *)((unsigned long)(addr) - \
106 (unsigned long)pcpu_base_addr + \
107 (unsigned long)__per_cpu_start)
108 #endif
109 #ifndef __pcpu_ptr_to_addr
110 #define __pcpu_ptr_to_addr(ptr) \
111 (void __force *)((unsigned long)(ptr) + \
112 (unsigned long)pcpu_base_addr - \
113 (unsigned long)__per_cpu_start)
114 #endif
115 #else /* CONFIG_SMP */
116 /* on UP, it's always identity mapped */
117 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
118 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
119 #endif /* CONFIG_SMP */
120
121 static int pcpu_unit_pages __ro_after_init;
122 static int pcpu_unit_size __ro_after_init;
123 static int pcpu_nr_units __ro_after_init;
124 static int pcpu_atom_size __ro_after_init;
125 int pcpu_nr_slots __ro_after_init;
126 static size_t pcpu_chunk_struct_size __ro_after_init;
127
128 /* cpus with the lowest and highest unit addresses */
129 static unsigned int pcpu_low_unit_cpu __ro_after_init;
130 static unsigned int pcpu_high_unit_cpu __ro_after_init;
131
132 /* the address of the first chunk which starts with the kernel static area */
133 void *pcpu_base_addr __ro_after_init;
134 EXPORT_SYMBOL_GPL(pcpu_base_addr);
135
136 static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
137 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
138
139 /* group information, used for vm allocation */
140 static int pcpu_nr_groups __ro_after_init;
141 static const unsigned long *pcpu_group_offsets __ro_after_init;
142 static const size_t *pcpu_group_sizes __ro_after_init;
143
144 /*
145 * The first chunk which always exists. Note that unlike other
146 * chunks, this one can be allocated and mapped in several different
147 * ways and thus often doesn't live in the vmalloc area.
148 */
149 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
150
151 /*
152 * Optional reserved chunk. This chunk reserves part of the first
153 * chunk and serves it for reserved allocations. When the reserved
154 * region doesn't exist, the following variable is NULL.
155 */
156 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
157
158 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
159 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
160
161 struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
162
163 /* chunks which need their map areas extended, protected by pcpu_lock */
164 static LIST_HEAD(pcpu_map_extend_chunks);
165
166 /*
167 * The number of empty populated pages, protected by pcpu_lock. The
168 * reserved chunk doesn't contribute to the count.
169 */
170 int pcpu_nr_empty_pop_pages;
171
172 /*
173 * The number of populated pages in use by the allocator, protected by
174 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
175 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
176 * and increments/decrements this count by 1).
177 */
178 static unsigned long pcpu_nr_populated;
179
180 /*
181 * Balance work is used to populate or destroy chunks asynchronously. We
182 * try to keep the number of populated free pages between
183 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
184 * empty chunk.
185 */
186 static void pcpu_balance_workfn(struct work_struct *work);
187 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
188 static bool pcpu_async_enabled __read_mostly;
189 static bool pcpu_atomic_alloc_failed;
190
pcpu_schedule_balance_work(void)191 static void pcpu_schedule_balance_work(void)
192 {
193 if (pcpu_async_enabled)
194 schedule_work(&pcpu_balance_work);
195 }
196
197 /**
198 * pcpu_addr_in_chunk - check if the address is served from this chunk
199 * @chunk: chunk of interest
200 * @addr: percpu address
201 *
202 * RETURNS:
203 * True if the address is served from this chunk.
204 */
pcpu_addr_in_chunk(struct pcpu_chunk * chunk,void * addr)205 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
206 {
207 void *start_addr, *end_addr;
208
209 if (!chunk)
210 return false;
211
212 start_addr = chunk->base_addr + chunk->start_offset;
213 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
214 chunk->end_offset;
215
216 return addr >= start_addr && addr < end_addr;
217 }
218
__pcpu_size_to_slot(int size)219 static int __pcpu_size_to_slot(int size)
220 {
221 int highbit = fls(size); /* size is in bytes */
222 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
223 }
224
pcpu_size_to_slot(int size)225 static int pcpu_size_to_slot(int size)
226 {
227 if (size == pcpu_unit_size)
228 return pcpu_nr_slots - 1;
229 return __pcpu_size_to_slot(size);
230 }
231
pcpu_chunk_slot(const struct pcpu_chunk * chunk)232 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
233 {
234 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk->contig_bits == 0)
235 return 0;
236
237 return pcpu_size_to_slot(chunk->free_bytes);
238 }
239
240 /* set the pointer to a chunk in a page struct */
pcpu_set_page_chunk(struct page * page,struct pcpu_chunk * pcpu)241 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
242 {
243 page->index = (unsigned long)pcpu;
244 }
245
246 /* obtain pointer to a chunk from a page struct */
pcpu_get_page_chunk(struct page * page)247 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
248 {
249 return (struct pcpu_chunk *)page->index;
250 }
251
pcpu_page_idx(unsigned int cpu,int page_idx)252 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
253 {
254 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
255 }
256
pcpu_unit_page_offset(unsigned int cpu,int page_idx)257 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
258 {
259 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
260 }
261
pcpu_chunk_addr(struct pcpu_chunk * chunk,unsigned int cpu,int page_idx)262 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
263 unsigned int cpu, int page_idx)
264 {
265 return (unsigned long)chunk->base_addr +
266 pcpu_unit_page_offset(cpu, page_idx);
267 }
268
pcpu_next_unpop(unsigned long * bitmap,int * rs,int * re,int end)269 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
270 {
271 *rs = find_next_zero_bit(bitmap, end, *rs);
272 *re = find_next_bit(bitmap, end, *rs + 1);
273 }
274
pcpu_next_pop(unsigned long * bitmap,int * rs,int * re,int end)275 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
276 {
277 *rs = find_next_bit(bitmap, end, *rs);
278 *re = find_next_zero_bit(bitmap, end, *rs + 1);
279 }
280
281 /*
282 * Bitmap region iterators. Iterates over the bitmap between
283 * [@start, @end) in @chunk. @rs and @re should be integer variables
284 * and will be set to start and end index of the current free region.
285 */
286 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
287 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
288 (rs) < (re); \
289 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
290
291 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
292 for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
293 (rs) < (re); \
294 (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
295
296 /*
297 * The following are helper functions to help access bitmaps and convert
298 * between bitmap offsets to address offsets.
299 */
pcpu_index_alloc_map(struct pcpu_chunk * chunk,int index)300 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
301 {
302 return chunk->alloc_map +
303 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
304 }
305
pcpu_off_to_block_index(int off)306 static unsigned long pcpu_off_to_block_index(int off)
307 {
308 return off / PCPU_BITMAP_BLOCK_BITS;
309 }
310
pcpu_off_to_block_off(int off)311 static unsigned long pcpu_off_to_block_off(int off)
312 {
313 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
314 }
315
pcpu_block_off_to_off(int index,int off)316 static unsigned long pcpu_block_off_to_off(int index, int off)
317 {
318 return index * PCPU_BITMAP_BLOCK_BITS + off;
319 }
320
321 /**
322 * pcpu_next_md_free_region - finds the next hint free area
323 * @chunk: chunk of interest
324 * @bit_off: chunk offset
325 * @bits: size of free area
326 *
327 * Helper function for pcpu_for_each_md_free_region. It checks
328 * block->contig_hint and performs aggregation across blocks to find the
329 * next hint. It modifies bit_off and bits in-place to be consumed in the
330 * loop.
331 */
pcpu_next_md_free_region(struct pcpu_chunk * chunk,int * bit_off,int * bits)332 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
333 int *bits)
334 {
335 int i = pcpu_off_to_block_index(*bit_off);
336 int block_off = pcpu_off_to_block_off(*bit_off);
337 struct pcpu_block_md *block;
338
339 *bits = 0;
340 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
341 block++, i++) {
342 /* handles contig area across blocks */
343 if (*bits) {
344 *bits += block->left_free;
345 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
346 continue;
347 return;
348 }
349
350 /*
351 * This checks three things. First is there a contig_hint to
352 * check. Second, have we checked this hint before by
353 * comparing the block_off. Third, is this the same as the
354 * right contig hint. In the last case, it spills over into
355 * the next block and should be handled by the contig area
356 * across blocks code.
357 */
358 *bits = block->contig_hint;
359 if (*bits && block->contig_hint_start >= block_off &&
360 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
361 *bit_off = pcpu_block_off_to_off(i,
362 block->contig_hint_start);
363 return;
364 }
365 /* reset to satisfy the second predicate above */
366 block_off = 0;
367
368 *bits = block->right_free;
369 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
370 }
371 }
372
373 /**
374 * pcpu_next_fit_region - finds fit areas for a given allocation request
375 * @chunk: chunk of interest
376 * @alloc_bits: size of allocation
377 * @align: alignment of area (max PAGE_SIZE)
378 * @bit_off: chunk offset
379 * @bits: size of free area
380 *
381 * Finds the next free region that is viable for use with a given size and
382 * alignment. This only returns if there is a valid area to be used for this
383 * allocation. block->first_free is returned if the allocation request fits
384 * within the block to see if the request can be fulfilled prior to the contig
385 * hint.
386 */
pcpu_next_fit_region(struct pcpu_chunk * chunk,int alloc_bits,int align,int * bit_off,int * bits)387 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
388 int align, int *bit_off, int *bits)
389 {
390 int i = pcpu_off_to_block_index(*bit_off);
391 int block_off = pcpu_off_to_block_off(*bit_off);
392 struct pcpu_block_md *block;
393
394 *bits = 0;
395 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
396 block++, i++) {
397 /* handles contig area across blocks */
398 if (*bits) {
399 *bits += block->left_free;
400 if (*bits >= alloc_bits)
401 return;
402 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
403 continue;
404 }
405
406 /* check block->contig_hint */
407 *bits = ALIGN(block->contig_hint_start, align) -
408 block->contig_hint_start;
409 /*
410 * This uses the block offset to determine if this has been
411 * checked in the prior iteration.
412 */
413 if (block->contig_hint &&
414 block->contig_hint_start >= block_off &&
415 block->contig_hint >= *bits + alloc_bits) {
416 *bits += alloc_bits + block->contig_hint_start -
417 block->first_free;
418 *bit_off = pcpu_block_off_to_off(i, block->first_free);
419 return;
420 }
421 /* reset to satisfy the second predicate above */
422 block_off = 0;
423
424 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
425 align);
426 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
427 *bit_off = pcpu_block_off_to_off(i, *bit_off);
428 if (*bits >= alloc_bits)
429 return;
430 }
431
432 /* no valid offsets were found - fail condition */
433 *bit_off = pcpu_chunk_map_bits(chunk);
434 }
435
436 /*
437 * Metadata free area iterators. These perform aggregation of free areas
438 * based on the metadata blocks and return the offset @bit_off and size in
439 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
440 * a fit is found for the allocation request.
441 */
442 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
443 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
444 (bit_off) < pcpu_chunk_map_bits((chunk)); \
445 (bit_off) += (bits) + 1, \
446 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
447
448 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
449 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
450 &(bits)); \
451 (bit_off) < pcpu_chunk_map_bits((chunk)); \
452 (bit_off) += (bits), \
453 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
454 &(bits)))
455
456 /**
457 * pcpu_mem_zalloc - allocate memory
458 * @size: bytes to allocate
459 * @gfp: allocation flags
460 *
461 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
462 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
463 * This is to facilitate passing through whitelisted flags. The
464 * returned memory is always zeroed.
465 *
466 * RETURNS:
467 * Pointer to the allocated area on success, NULL on failure.
468 */
pcpu_mem_zalloc(size_t size,gfp_t gfp)469 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
470 {
471 if (WARN_ON_ONCE(!slab_is_available()))
472 return NULL;
473
474 if (size <= PAGE_SIZE)
475 return kzalloc(size, gfp);
476 else
477 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
478 }
479
480 /**
481 * pcpu_mem_free - free memory
482 * @ptr: memory to free
483 *
484 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
485 */
pcpu_mem_free(void * ptr)486 static void pcpu_mem_free(void *ptr)
487 {
488 kvfree(ptr);
489 }
490
491 /**
492 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
493 * @chunk: chunk of interest
494 * @oslot: the previous slot it was on
495 *
496 * This function is called after an allocation or free changed @chunk.
497 * New slot according to the changed state is determined and @chunk is
498 * moved to the slot. Note that the reserved chunk is never put on
499 * chunk slots.
500 *
501 * CONTEXT:
502 * pcpu_lock.
503 */
pcpu_chunk_relocate(struct pcpu_chunk * chunk,int oslot)504 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
505 {
506 int nslot = pcpu_chunk_slot(chunk);
507
508 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
509 if (oslot < nslot)
510 list_move(&chunk->list, &pcpu_slot[nslot]);
511 else
512 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
513 }
514 }
515
516 /**
517 * pcpu_cnt_pop_pages- counts populated backing pages in range
518 * @chunk: chunk of interest
519 * @bit_off: start offset
520 * @bits: size of area to check
521 *
522 * Calculates the number of populated pages in the region
523 * [page_start, page_end). This keeps track of how many empty populated
524 * pages are available and decide if async work should be scheduled.
525 *
526 * RETURNS:
527 * The nr of populated pages.
528 */
pcpu_cnt_pop_pages(struct pcpu_chunk * chunk,int bit_off,int bits)529 static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off,
530 int bits)
531 {
532 int page_start = PFN_UP(bit_off * PCPU_MIN_ALLOC_SIZE);
533 int page_end = PFN_DOWN((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
534
535 if (page_start >= page_end)
536 return 0;
537
538 /*
539 * bitmap_weight counts the number of bits set in a bitmap up to
540 * the specified number of bits. This is counting the populated
541 * pages up to page_end and then subtracting the populated pages
542 * up to page_start to count the populated pages in
543 * [page_start, page_end).
544 */
545 return bitmap_weight(chunk->populated, page_end) -
546 bitmap_weight(chunk->populated, page_start);
547 }
548
549 /**
550 * pcpu_chunk_update - updates the chunk metadata given a free area
551 * @chunk: chunk of interest
552 * @bit_off: chunk offset
553 * @bits: size of free area
554 *
555 * This updates the chunk's contig hint and starting offset given a free area.
556 * Choose the best starting offset if the contig hint is equal.
557 */
pcpu_chunk_update(struct pcpu_chunk * chunk,int bit_off,int bits)558 static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits)
559 {
560 if (bits > chunk->contig_bits) {
561 chunk->contig_bits_start = bit_off;
562 chunk->contig_bits = bits;
563 } else if (bits == chunk->contig_bits && chunk->contig_bits_start &&
564 (!bit_off ||
565 __ffs(bit_off) > __ffs(chunk->contig_bits_start))) {
566 /* use the start with the best alignment */
567 chunk->contig_bits_start = bit_off;
568 }
569 }
570
571 /**
572 * pcpu_chunk_refresh_hint - updates metadata about a chunk
573 * @chunk: chunk of interest
574 *
575 * Iterates over the metadata blocks to find the largest contig area.
576 * It also counts the populated pages and uses the delta to update the
577 * global count.
578 *
579 * Updates:
580 * chunk->contig_bits
581 * chunk->contig_bits_start
582 * nr_empty_pop_pages (chunk and global)
583 */
pcpu_chunk_refresh_hint(struct pcpu_chunk * chunk)584 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk)
585 {
586 int bit_off, bits, nr_empty_pop_pages;
587
588 /* clear metadata */
589 chunk->contig_bits = 0;
590
591 bit_off = chunk->first_bit;
592 bits = nr_empty_pop_pages = 0;
593 pcpu_for_each_md_free_region(chunk, bit_off, bits) {
594 pcpu_chunk_update(chunk, bit_off, bits);
595
596 nr_empty_pop_pages += pcpu_cnt_pop_pages(chunk, bit_off, bits);
597 }
598
599 /*
600 * Keep track of nr_empty_pop_pages.
601 *
602 * The chunk maintains the previous number of free pages it held,
603 * so the delta is used to update the global counter. The reserved
604 * chunk is not part of the free page count as they are populated
605 * at init and are special to serving reserved allocations.
606 */
607 if (chunk != pcpu_reserved_chunk)
608 pcpu_nr_empty_pop_pages +=
609 (nr_empty_pop_pages - chunk->nr_empty_pop_pages);
610
611 chunk->nr_empty_pop_pages = nr_empty_pop_pages;
612 }
613
614 /**
615 * pcpu_block_update - updates a block given a free area
616 * @block: block of interest
617 * @start: start offset in block
618 * @end: end offset in block
619 *
620 * Updates a block given a known free area. The region [start, end) is
621 * expected to be the entirety of the free area within a block. Chooses
622 * the best starting offset if the contig hints are equal.
623 */
pcpu_block_update(struct pcpu_block_md * block,int start,int end)624 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
625 {
626 int contig = end - start;
627
628 block->first_free = min(block->first_free, start);
629 if (start == 0)
630 block->left_free = contig;
631
632 if (end == PCPU_BITMAP_BLOCK_BITS)
633 block->right_free = contig;
634
635 if (contig > block->contig_hint) {
636 block->contig_hint_start = start;
637 block->contig_hint = contig;
638 } else if (block->contig_hint_start && contig == block->contig_hint &&
639 (!start || __ffs(start) > __ffs(block->contig_hint_start))) {
640 /* use the start with the best alignment */
641 block->contig_hint_start = start;
642 }
643 }
644
645 /**
646 * pcpu_block_refresh_hint
647 * @chunk: chunk of interest
648 * @index: index of the metadata block
649 *
650 * Scans over the block beginning at first_free and updates the block
651 * metadata accordingly.
652 */
pcpu_block_refresh_hint(struct pcpu_chunk * chunk,int index)653 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
654 {
655 struct pcpu_block_md *block = chunk->md_blocks + index;
656 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
657 int rs, re; /* region start, region end */
658
659 /* clear hints */
660 block->contig_hint = 0;
661 block->left_free = block->right_free = 0;
662
663 /* iterate over free areas and update the contig hints */
664 pcpu_for_each_unpop_region(alloc_map, rs, re, block->first_free,
665 PCPU_BITMAP_BLOCK_BITS) {
666 pcpu_block_update(block, rs, re);
667 }
668 }
669
670 /**
671 * pcpu_block_update_hint_alloc - update hint on allocation path
672 * @chunk: chunk of interest
673 * @bit_off: chunk offset
674 * @bits: size of request
675 *
676 * Updates metadata for the allocation path. The metadata only has to be
677 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
678 * scans are required if the block's contig hint is broken.
679 */
pcpu_block_update_hint_alloc(struct pcpu_chunk * chunk,int bit_off,int bits)680 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
681 int bits)
682 {
683 struct pcpu_block_md *s_block, *e_block, *block;
684 int s_index, e_index; /* block indexes of the freed allocation */
685 int s_off, e_off; /* block offsets of the freed allocation */
686
687 /*
688 * Calculate per block offsets.
689 * The calculation uses an inclusive range, but the resulting offsets
690 * are [start, end). e_index always points to the last block in the
691 * range.
692 */
693 s_index = pcpu_off_to_block_index(bit_off);
694 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
695 s_off = pcpu_off_to_block_off(bit_off);
696 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
697
698 s_block = chunk->md_blocks + s_index;
699 e_block = chunk->md_blocks + e_index;
700
701 /*
702 * Update s_block.
703 * block->first_free must be updated if the allocation takes its place.
704 * If the allocation breaks the contig_hint, a scan is required to
705 * restore this hint.
706 */
707 if (s_off == s_block->first_free)
708 s_block->first_free = find_next_zero_bit(
709 pcpu_index_alloc_map(chunk, s_index),
710 PCPU_BITMAP_BLOCK_BITS,
711 s_off + bits);
712
713 if (s_off >= s_block->contig_hint_start &&
714 s_off < s_block->contig_hint_start + s_block->contig_hint) {
715 /* block contig hint is broken - scan to fix it */
716 pcpu_block_refresh_hint(chunk, s_index);
717 } else {
718 /* update left and right contig manually */
719 s_block->left_free = min(s_block->left_free, s_off);
720 if (s_index == e_index)
721 s_block->right_free = min_t(int, s_block->right_free,
722 PCPU_BITMAP_BLOCK_BITS - e_off);
723 else
724 s_block->right_free = 0;
725 }
726
727 /*
728 * Update e_block.
729 */
730 if (s_index != e_index) {
731 /*
732 * When the allocation is across blocks, the end is along
733 * the left part of the e_block.
734 */
735 e_block->first_free = find_next_zero_bit(
736 pcpu_index_alloc_map(chunk, e_index),
737 PCPU_BITMAP_BLOCK_BITS, e_off);
738
739 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
740 /* reset the block */
741 e_block++;
742 } else {
743 if (e_off > e_block->contig_hint_start) {
744 /* contig hint is broken - scan to fix it */
745 pcpu_block_refresh_hint(chunk, e_index);
746 } else {
747 e_block->left_free = 0;
748 e_block->right_free =
749 min_t(int, e_block->right_free,
750 PCPU_BITMAP_BLOCK_BITS - e_off);
751 }
752 }
753
754 /* update in-between md_blocks */
755 for (block = s_block + 1; block < e_block; block++) {
756 block->contig_hint = 0;
757 block->left_free = 0;
758 block->right_free = 0;
759 }
760 }
761
762 /*
763 * The only time a full chunk scan is required is if the chunk
764 * contig hint is broken. Otherwise, it means a smaller space
765 * was used and therefore the chunk contig hint is still correct.
766 */
767 if (bit_off >= chunk->contig_bits_start &&
768 bit_off < chunk->contig_bits_start + chunk->contig_bits)
769 pcpu_chunk_refresh_hint(chunk);
770 }
771
772 /**
773 * pcpu_block_update_hint_free - updates the block hints on the free path
774 * @chunk: chunk of interest
775 * @bit_off: chunk offset
776 * @bits: size of request
777 *
778 * Updates metadata for the allocation path. This avoids a blind block
779 * refresh by making use of the block contig hints. If this fails, it scans
780 * forward and backward to determine the extent of the free area. This is
781 * capped at the boundary of blocks.
782 *
783 * A chunk update is triggered if a page becomes free, a block becomes free,
784 * or the free spans across blocks. This tradeoff is to minimize iterating
785 * over the block metadata to update chunk->contig_bits. chunk->contig_bits
786 * may be off by up to a page, but it will never be more than the available
787 * space. If the contig hint is contained in one block, it will be accurate.
788 */
pcpu_block_update_hint_free(struct pcpu_chunk * chunk,int bit_off,int bits)789 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
790 int bits)
791 {
792 struct pcpu_block_md *s_block, *e_block, *block;
793 int s_index, e_index; /* block indexes of the freed allocation */
794 int s_off, e_off; /* block offsets of the freed allocation */
795 int start, end; /* start and end of the whole free area */
796
797 /*
798 * Calculate per block offsets.
799 * The calculation uses an inclusive range, but the resulting offsets
800 * are [start, end). e_index always points to the last block in the
801 * range.
802 */
803 s_index = pcpu_off_to_block_index(bit_off);
804 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
805 s_off = pcpu_off_to_block_off(bit_off);
806 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
807
808 s_block = chunk->md_blocks + s_index;
809 e_block = chunk->md_blocks + e_index;
810
811 /*
812 * Check if the freed area aligns with the block->contig_hint.
813 * If it does, then the scan to find the beginning/end of the
814 * larger free area can be avoided.
815 *
816 * start and end refer to beginning and end of the free area
817 * within each their respective blocks. This is not necessarily
818 * the entire free area as it may span blocks past the beginning
819 * or end of the block.
820 */
821 start = s_off;
822 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
823 start = s_block->contig_hint_start;
824 } else {
825 /*
826 * Scan backwards to find the extent of the free area.
827 * find_last_bit returns the starting bit, so if the start bit
828 * is returned, that means there was no last bit and the
829 * remainder of the chunk is free.
830 */
831 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
832 start);
833 start = (start == l_bit) ? 0 : l_bit + 1;
834 }
835
836 end = e_off;
837 if (e_off == e_block->contig_hint_start)
838 end = e_block->contig_hint_start + e_block->contig_hint;
839 else
840 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
841 PCPU_BITMAP_BLOCK_BITS, end);
842
843 /* update s_block */
844 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
845 pcpu_block_update(s_block, start, e_off);
846
847 /* freeing in the same block */
848 if (s_index != e_index) {
849 /* update e_block */
850 pcpu_block_update(e_block, 0, end);
851
852 /* reset md_blocks in the middle */
853 for (block = s_block + 1; block < e_block; block++) {
854 block->first_free = 0;
855 block->contig_hint_start = 0;
856 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
857 block->left_free = PCPU_BITMAP_BLOCK_BITS;
858 block->right_free = PCPU_BITMAP_BLOCK_BITS;
859 }
860 }
861
862 /*
863 * Refresh chunk metadata when the free makes a page free, a block
864 * free, or spans across blocks. The contig hint may be off by up to
865 * a page, but if the hint is contained in a block, it will be accurate
866 * with the else condition below.
867 */
868 if ((ALIGN_DOWN(end, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS)) >
869 ALIGN(start, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS))) ||
870 s_index != e_index)
871 pcpu_chunk_refresh_hint(chunk);
872 else
873 pcpu_chunk_update(chunk, pcpu_block_off_to_off(s_index, start),
874 s_block->contig_hint);
875 }
876
877 /**
878 * pcpu_is_populated - determines if the region is populated
879 * @chunk: chunk of interest
880 * @bit_off: chunk offset
881 * @bits: size of area
882 * @next_off: return value for the next offset to start searching
883 *
884 * For atomic allocations, check if the backing pages are populated.
885 *
886 * RETURNS:
887 * Bool if the backing pages are populated.
888 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
889 */
pcpu_is_populated(struct pcpu_chunk * chunk,int bit_off,int bits,int * next_off)890 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
891 int *next_off)
892 {
893 int page_start, page_end, rs, re;
894
895 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
896 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
897
898 rs = page_start;
899 pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
900 if (rs >= page_end)
901 return true;
902
903 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
904 return false;
905 }
906
907 /**
908 * pcpu_find_block_fit - finds the block index to start searching
909 * @chunk: chunk of interest
910 * @alloc_bits: size of request in allocation units
911 * @align: alignment of area (max PAGE_SIZE bytes)
912 * @pop_only: use populated regions only
913 *
914 * Given a chunk and an allocation spec, find the offset to begin searching
915 * for a free region. This iterates over the bitmap metadata blocks to
916 * find an offset that will be guaranteed to fit the requirements. It is
917 * not quite first fit as if the allocation does not fit in the contig hint
918 * of a block or chunk, it is skipped. This errs on the side of caution
919 * to prevent excess iteration. Poor alignment can cause the allocator to
920 * skip over blocks and chunks that have valid free areas.
921 *
922 * RETURNS:
923 * The offset in the bitmap to begin searching.
924 * -1 if no offset is found.
925 */
pcpu_find_block_fit(struct pcpu_chunk * chunk,int alloc_bits,size_t align,bool pop_only)926 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
927 size_t align, bool pop_only)
928 {
929 int bit_off, bits, next_off;
930
931 /*
932 * Check to see if the allocation can fit in the chunk's contig hint.
933 * This is an optimization to prevent scanning by assuming if it
934 * cannot fit in the global hint, there is memory pressure and creating
935 * a new chunk would happen soon.
936 */
937 bit_off = ALIGN(chunk->contig_bits_start, align) -
938 chunk->contig_bits_start;
939 if (bit_off + alloc_bits > chunk->contig_bits)
940 return -1;
941
942 bit_off = chunk->first_bit;
943 bits = 0;
944 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
945 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
946 &next_off))
947 break;
948
949 bit_off = next_off;
950 bits = 0;
951 }
952
953 if (bit_off == pcpu_chunk_map_bits(chunk))
954 return -1;
955
956 return bit_off;
957 }
958
959 /**
960 * pcpu_alloc_area - allocates an area from a pcpu_chunk
961 * @chunk: chunk of interest
962 * @alloc_bits: size of request in allocation units
963 * @align: alignment of area (max PAGE_SIZE)
964 * @start: bit_off to start searching
965 *
966 * This function takes in a @start offset to begin searching to fit an
967 * allocation of @alloc_bits with alignment @align. It needs to scan
968 * the allocation map because if it fits within the block's contig hint,
969 * @start will be block->first_free. This is an attempt to fill the
970 * allocation prior to breaking the contig hint. The allocation and
971 * boundary maps are updated accordingly if it confirms a valid
972 * free area.
973 *
974 * RETURNS:
975 * Allocated addr offset in @chunk on success.
976 * -1 if no matching area is found.
977 */
pcpu_alloc_area(struct pcpu_chunk * chunk,int alloc_bits,size_t align,int start)978 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
979 size_t align, int start)
980 {
981 size_t align_mask = (align) ? (align - 1) : 0;
982 int bit_off, end, oslot;
983
984 lockdep_assert_held(&pcpu_lock);
985
986 oslot = pcpu_chunk_slot(chunk);
987
988 /*
989 * Search to find a fit.
990 */
991 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
992 pcpu_chunk_map_bits(chunk));
993 bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
994 alloc_bits, align_mask);
995 if (bit_off >= end)
996 return -1;
997
998 /* update alloc map */
999 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1000
1001 /* update boundary map */
1002 set_bit(bit_off, chunk->bound_map);
1003 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1004 set_bit(bit_off + alloc_bits, chunk->bound_map);
1005
1006 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1007
1008 /* update first free bit */
1009 if (bit_off == chunk->first_bit)
1010 chunk->first_bit = find_next_zero_bit(
1011 chunk->alloc_map,
1012 pcpu_chunk_map_bits(chunk),
1013 bit_off + alloc_bits);
1014
1015 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1016
1017 pcpu_chunk_relocate(chunk, oslot);
1018
1019 return bit_off * PCPU_MIN_ALLOC_SIZE;
1020 }
1021
1022 /**
1023 * pcpu_free_area - frees the corresponding offset
1024 * @chunk: chunk of interest
1025 * @off: addr offset into chunk
1026 *
1027 * This function determines the size of an allocation to free using
1028 * the boundary bitmap and clears the allocation map.
1029 */
pcpu_free_area(struct pcpu_chunk * chunk,int off)1030 static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
1031 {
1032 int bit_off, bits, end, oslot;
1033
1034 lockdep_assert_held(&pcpu_lock);
1035 pcpu_stats_area_dealloc(chunk);
1036
1037 oslot = pcpu_chunk_slot(chunk);
1038
1039 bit_off = off / PCPU_MIN_ALLOC_SIZE;
1040
1041 /* find end index */
1042 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1043 bit_off + 1);
1044 bits = end - bit_off;
1045 bitmap_clear(chunk->alloc_map, bit_off, bits);
1046
1047 /* update metadata */
1048 chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
1049
1050 /* update first free bit */
1051 chunk->first_bit = min(chunk->first_bit, bit_off);
1052
1053 pcpu_block_update_hint_free(chunk, bit_off, bits);
1054
1055 pcpu_chunk_relocate(chunk, oslot);
1056 }
1057
pcpu_init_md_blocks(struct pcpu_chunk * chunk)1058 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1059 {
1060 struct pcpu_block_md *md_block;
1061
1062 for (md_block = chunk->md_blocks;
1063 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1064 md_block++) {
1065 md_block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1066 md_block->left_free = PCPU_BITMAP_BLOCK_BITS;
1067 md_block->right_free = PCPU_BITMAP_BLOCK_BITS;
1068 }
1069 }
1070
1071 /**
1072 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1073 * @tmp_addr: the start of the region served
1074 * @map_size: size of the region served
1075 *
1076 * This is responsible for creating the chunks that serve the first chunk. The
1077 * base_addr is page aligned down of @tmp_addr while the region end is page
1078 * aligned up. Offsets are kept track of to determine the region served. All
1079 * this is done to appease the bitmap allocator in avoiding partial blocks.
1080 *
1081 * RETURNS:
1082 * Chunk serving the region at @tmp_addr of @map_size.
1083 */
pcpu_alloc_first_chunk(unsigned long tmp_addr,int map_size)1084 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1085 int map_size)
1086 {
1087 struct pcpu_chunk *chunk;
1088 unsigned long aligned_addr, lcm_align;
1089 int start_offset, offset_bits, region_size, region_bits;
1090
1091 /* region calculations */
1092 aligned_addr = tmp_addr & PAGE_MASK;
1093
1094 start_offset = tmp_addr - aligned_addr;
1095
1096 /*
1097 * Align the end of the region with the LCM of PAGE_SIZE and
1098 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1099 * the other.
1100 */
1101 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1102 region_size = ALIGN(start_offset + map_size, lcm_align);
1103
1104 /* allocate chunk */
1105 chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
1106 BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long),
1107 0);
1108
1109 INIT_LIST_HEAD(&chunk->list);
1110
1111 chunk->base_addr = (void *)aligned_addr;
1112 chunk->start_offset = start_offset;
1113 chunk->end_offset = region_size - chunk->start_offset - map_size;
1114
1115 chunk->nr_pages = region_size >> PAGE_SHIFT;
1116 region_bits = pcpu_chunk_map_bits(chunk);
1117
1118 chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
1119 sizeof(chunk->alloc_map[0]), 0);
1120 chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
1121 sizeof(chunk->bound_map[0]), 0);
1122 chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
1123 sizeof(chunk->md_blocks[0]), 0);
1124 pcpu_init_md_blocks(chunk);
1125
1126 /* manage populated page bitmap */
1127 chunk->immutable = true;
1128 bitmap_fill(chunk->populated, chunk->nr_pages);
1129 chunk->nr_populated = chunk->nr_pages;
1130 chunk->nr_empty_pop_pages =
1131 pcpu_cnt_pop_pages(chunk, start_offset / PCPU_MIN_ALLOC_SIZE,
1132 map_size / PCPU_MIN_ALLOC_SIZE);
1133
1134 chunk->contig_bits = map_size / PCPU_MIN_ALLOC_SIZE;
1135 chunk->free_bytes = map_size;
1136
1137 if (chunk->start_offset) {
1138 /* hide the beginning of the bitmap */
1139 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1140 bitmap_set(chunk->alloc_map, 0, offset_bits);
1141 set_bit(0, chunk->bound_map);
1142 set_bit(offset_bits, chunk->bound_map);
1143
1144 chunk->first_bit = offset_bits;
1145
1146 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1147 }
1148
1149 if (chunk->end_offset) {
1150 /* hide the end of the bitmap */
1151 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1152 bitmap_set(chunk->alloc_map,
1153 pcpu_chunk_map_bits(chunk) - offset_bits,
1154 offset_bits);
1155 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1156 chunk->bound_map);
1157 set_bit(region_bits, chunk->bound_map);
1158
1159 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1160 - offset_bits, offset_bits);
1161 }
1162
1163 return chunk;
1164 }
1165
pcpu_alloc_chunk(gfp_t gfp)1166 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1167 {
1168 struct pcpu_chunk *chunk;
1169 int region_bits;
1170
1171 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1172 if (!chunk)
1173 return NULL;
1174
1175 INIT_LIST_HEAD(&chunk->list);
1176 chunk->nr_pages = pcpu_unit_pages;
1177 region_bits = pcpu_chunk_map_bits(chunk);
1178
1179 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1180 sizeof(chunk->alloc_map[0]), gfp);
1181 if (!chunk->alloc_map)
1182 goto alloc_map_fail;
1183
1184 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1185 sizeof(chunk->bound_map[0]), gfp);
1186 if (!chunk->bound_map)
1187 goto bound_map_fail;
1188
1189 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1190 sizeof(chunk->md_blocks[0]), gfp);
1191 if (!chunk->md_blocks)
1192 goto md_blocks_fail;
1193
1194 pcpu_init_md_blocks(chunk);
1195
1196 /* init metadata */
1197 chunk->contig_bits = region_bits;
1198 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1199
1200 return chunk;
1201
1202 md_blocks_fail:
1203 pcpu_mem_free(chunk->bound_map);
1204 bound_map_fail:
1205 pcpu_mem_free(chunk->alloc_map);
1206 alloc_map_fail:
1207 pcpu_mem_free(chunk);
1208
1209 return NULL;
1210 }
1211
pcpu_free_chunk(struct pcpu_chunk * chunk)1212 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1213 {
1214 if (!chunk)
1215 return;
1216 pcpu_mem_free(chunk->md_blocks);
1217 pcpu_mem_free(chunk->bound_map);
1218 pcpu_mem_free(chunk->alloc_map);
1219 pcpu_mem_free(chunk);
1220 }
1221
1222 /**
1223 * pcpu_chunk_populated - post-population bookkeeping
1224 * @chunk: pcpu_chunk which got populated
1225 * @page_start: the start page
1226 * @page_end: the end page
1227 * @for_alloc: if this is to populate for allocation
1228 *
1229 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1230 * the bookkeeping information accordingly. Must be called after each
1231 * successful population.
1232 *
1233 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1234 * is to serve an allocation in that area.
1235 */
pcpu_chunk_populated(struct pcpu_chunk * chunk,int page_start,int page_end,bool for_alloc)1236 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1237 int page_end, bool for_alloc)
1238 {
1239 int nr = page_end - page_start;
1240
1241 lockdep_assert_held(&pcpu_lock);
1242
1243 bitmap_set(chunk->populated, page_start, nr);
1244 chunk->nr_populated += nr;
1245 pcpu_nr_populated += nr;
1246
1247 if (!for_alloc) {
1248 chunk->nr_empty_pop_pages += nr;
1249 pcpu_nr_empty_pop_pages += nr;
1250 }
1251 }
1252
1253 /**
1254 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1255 * @chunk: pcpu_chunk which got depopulated
1256 * @page_start: the start page
1257 * @page_end: the end page
1258 *
1259 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1260 * Update the bookkeeping information accordingly. Must be called after
1261 * each successful depopulation.
1262 */
pcpu_chunk_depopulated(struct pcpu_chunk * chunk,int page_start,int page_end)1263 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1264 int page_start, int page_end)
1265 {
1266 int nr = page_end - page_start;
1267
1268 lockdep_assert_held(&pcpu_lock);
1269
1270 bitmap_clear(chunk->populated, page_start, nr);
1271 chunk->nr_populated -= nr;
1272 chunk->nr_empty_pop_pages -= nr;
1273 pcpu_nr_empty_pop_pages -= nr;
1274 pcpu_nr_populated -= nr;
1275 }
1276
1277 /*
1278 * Chunk management implementation.
1279 *
1280 * To allow different implementations, chunk alloc/free and
1281 * [de]population are implemented in a separate file which is pulled
1282 * into this file and compiled together. The following functions
1283 * should be implemented.
1284 *
1285 * pcpu_populate_chunk - populate the specified range of a chunk
1286 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1287 * pcpu_create_chunk - create a new chunk
1288 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1289 * pcpu_addr_to_page - translate address to physical address
1290 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1291 */
1292 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1293 int page_start, int page_end, gfp_t gfp);
1294 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1295 int page_start, int page_end);
1296 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1297 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1298 static struct page *pcpu_addr_to_page(void *addr);
1299 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1300
1301 #ifdef CONFIG_NEED_PER_CPU_KM
1302 #include "percpu-km.c"
1303 #else
1304 #include "percpu-vm.c"
1305 #endif
1306
1307 /**
1308 * pcpu_chunk_addr_search - determine chunk containing specified address
1309 * @addr: address for which the chunk needs to be determined.
1310 *
1311 * This is an internal function that handles all but static allocations.
1312 * Static percpu address values should never be passed into the allocator.
1313 *
1314 * RETURNS:
1315 * The address of the found chunk.
1316 */
pcpu_chunk_addr_search(void * addr)1317 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1318 {
1319 /* is it in the dynamic region (first chunk)? */
1320 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1321 return pcpu_first_chunk;
1322
1323 /* is it in the reserved region? */
1324 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1325 return pcpu_reserved_chunk;
1326
1327 /*
1328 * The address is relative to unit0 which might be unused and
1329 * thus unmapped. Offset the address to the unit space of the
1330 * current processor before looking it up in the vmalloc
1331 * space. Note that any possible cpu id can be used here, so
1332 * there's no need to worry about preemption or cpu hotplug.
1333 */
1334 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1335 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1336 }
1337
1338 /**
1339 * pcpu_alloc - the percpu allocator
1340 * @size: size of area to allocate in bytes
1341 * @align: alignment of area (max PAGE_SIZE)
1342 * @reserved: allocate from the reserved chunk if available
1343 * @gfp: allocation flags
1344 *
1345 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1346 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1347 * then no warning will be triggered on invalid or failed allocation
1348 * requests.
1349 *
1350 * RETURNS:
1351 * Percpu pointer to the allocated area on success, NULL on failure.
1352 */
pcpu_alloc(size_t size,size_t align,bool reserved,gfp_t gfp)1353 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1354 gfp_t gfp)
1355 {
1356 /* whitelisted flags that can be passed to the backing allocators */
1357 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1358 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1359 bool do_warn = !(gfp & __GFP_NOWARN);
1360 static int warn_limit = 10;
1361 struct pcpu_chunk *chunk;
1362 const char *err;
1363 int slot, off, cpu, ret;
1364 unsigned long flags;
1365 void __percpu *ptr;
1366 size_t bits, bit_align;
1367
1368 /*
1369 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1370 * therefore alignment must be a minimum of that many bytes.
1371 * An allocation may have internal fragmentation from rounding up
1372 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1373 */
1374 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1375 align = PCPU_MIN_ALLOC_SIZE;
1376
1377 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1378 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1379 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1380
1381 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1382 !is_power_of_2(align))) {
1383 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1384 size, align);
1385 return NULL;
1386 }
1387
1388 if (!is_atomic) {
1389 /*
1390 * pcpu_balance_workfn() allocates memory under this mutex,
1391 * and it may wait for memory reclaim. Allow current task
1392 * to become OOM victim, in case of memory pressure.
1393 */
1394 if (gfp & __GFP_NOFAIL)
1395 mutex_lock(&pcpu_alloc_mutex);
1396 else if (mutex_lock_killable(&pcpu_alloc_mutex))
1397 return NULL;
1398 }
1399
1400 spin_lock_irqsave(&pcpu_lock, flags);
1401
1402 /* serve reserved allocations from the reserved chunk if available */
1403 if (reserved && pcpu_reserved_chunk) {
1404 chunk = pcpu_reserved_chunk;
1405
1406 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1407 if (off < 0) {
1408 err = "alloc from reserved chunk failed";
1409 goto fail_unlock;
1410 }
1411
1412 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1413 if (off >= 0)
1414 goto area_found;
1415
1416 err = "alloc from reserved chunk failed";
1417 goto fail_unlock;
1418 }
1419
1420 restart:
1421 /* search through normal chunks */
1422 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1423 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1424 off = pcpu_find_block_fit(chunk, bits, bit_align,
1425 is_atomic);
1426 if (off < 0)
1427 continue;
1428
1429 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1430 if (off >= 0)
1431 goto area_found;
1432
1433 }
1434 }
1435
1436 spin_unlock_irqrestore(&pcpu_lock, flags);
1437
1438 /*
1439 * No space left. Create a new chunk. We don't want multiple
1440 * tasks to create chunks simultaneously. Serialize and create iff
1441 * there's still no empty chunk after grabbing the mutex.
1442 */
1443 if (is_atomic) {
1444 err = "atomic alloc failed, no space left";
1445 goto fail;
1446 }
1447
1448 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1449 chunk = pcpu_create_chunk(pcpu_gfp);
1450 if (!chunk) {
1451 err = "failed to allocate new chunk";
1452 goto fail;
1453 }
1454
1455 spin_lock_irqsave(&pcpu_lock, flags);
1456 pcpu_chunk_relocate(chunk, -1);
1457 } else {
1458 spin_lock_irqsave(&pcpu_lock, flags);
1459 }
1460
1461 goto restart;
1462
1463 area_found:
1464 pcpu_stats_area_alloc(chunk, size);
1465 spin_unlock_irqrestore(&pcpu_lock, flags);
1466
1467 /* populate if not all pages are already there */
1468 if (!is_atomic) {
1469 int page_start, page_end, rs, re;
1470
1471 page_start = PFN_DOWN(off);
1472 page_end = PFN_UP(off + size);
1473
1474 pcpu_for_each_unpop_region(chunk->populated, rs, re,
1475 page_start, page_end) {
1476 WARN_ON(chunk->immutable);
1477
1478 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1479
1480 spin_lock_irqsave(&pcpu_lock, flags);
1481 if (ret) {
1482 pcpu_free_area(chunk, off);
1483 err = "failed to populate";
1484 goto fail_unlock;
1485 }
1486 pcpu_chunk_populated(chunk, rs, re, true);
1487 spin_unlock_irqrestore(&pcpu_lock, flags);
1488 }
1489
1490 mutex_unlock(&pcpu_alloc_mutex);
1491 }
1492
1493 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1494 pcpu_schedule_balance_work();
1495
1496 /* clear the areas and return address relative to base address */
1497 for_each_possible_cpu(cpu)
1498 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1499
1500 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1501 kmemleak_alloc_percpu(ptr, size, gfp);
1502
1503 trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1504 chunk->base_addr, off, ptr);
1505
1506 return ptr;
1507
1508 fail_unlock:
1509 spin_unlock_irqrestore(&pcpu_lock, flags);
1510 fail:
1511 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1512
1513 if (!is_atomic && do_warn && warn_limit) {
1514 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1515 size, align, is_atomic, err);
1516 dump_stack();
1517 if (!--warn_limit)
1518 pr_info("limit reached, disable warning\n");
1519 }
1520 if (is_atomic) {
1521 /* see the flag handling in pcpu_blance_workfn() */
1522 pcpu_atomic_alloc_failed = true;
1523 pcpu_schedule_balance_work();
1524 } else {
1525 mutex_unlock(&pcpu_alloc_mutex);
1526 }
1527 return NULL;
1528 }
1529
1530 /**
1531 * __alloc_percpu_gfp - allocate dynamic percpu area
1532 * @size: size of area to allocate in bytes
1533 * @align: alignment of area (max PAGE_SIZE)
1534 * @gfp: allocation flags
1535 *
1536 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1537 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1538 * be called from any context but is a lot more likely to fail. If @gfp
1539 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1540 * allocation requests.
1541 *
1542 * RETURNS:
1543 * Percpu pointer to the allocated area on success, NULL on failure.
1544 */
__alloc_percpu_gfp(size_t size,size_t align,gfp_t gfp)1545 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1546 {
1547 return pcpu_alloc(size, align, false, gfp);
1548 }
1549 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1550
1551 /**
1552 * __alloc_percpu - allocate dynamic percpu area
1553 * @size: size of area to allocate in bytes
1554 * @align: alignment of area (max PAGE_SIZE)
1555 *
1556 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1557 */
__alloc_percpu(size_t size,size_t align)1558 void __percpu *__alloc_percpu(size_t size, size_t align)
1559 {
1560 return pcpu_alloc(size, align, false, GFP_KERNEL);
1561 }
1562 EXPORT_SYMBOL_GPL(__alloc_percpu);
1563
1564 /**
1565 * __alloc_reserved_percpu - allocate reserved percpu area
1566 * @size: size of area to allocate in bytes
1567 * @align: alignment of area (max PAGE_SIZE)
1568 *
1569 * Allocate zero-filled percpu area of @size bytes aligned at @align
1570 * from reserved percpu area if arch has set it up; otherwise,
1571 * allocation is served from the same dynamic area. Might sleep.
1572 * Might trigger writeouts.
1573 *
1574 * CONTEXT:
1575 * Does GFP_KERNEL allocation.
1576 *
1577 * RETURNS:
1578 * Percpu pointer to the allocated area on success, NULL on failure.
1579 */
__alloc_reserved_percpu(size_t size,size_t align)1580 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1581 {
1582 return pcpu_alloc(size, align, true, GFP_KERNEL);
1583 }
1584
1585 /**
1586 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1587 * @work: unused
1588 *
1589 * Reclaim all fully free chunks except for the first one. This is also
1590 * responsible for maintaining the pool of empty populated pages. However,
1591 * it is possible that this is called when physical memory is scarce causing
1592 * OOM killer to be triggered. We should avoid doing so until an actual
1593 * allocation causes the failure as it is possible that requests can be
1594 * serviced from already backed regions.
1595 */
pcpu_balance_workfn(struct work_struct * work)1596 static void pcpu_balance_workfn(struct work_struct *work)
1597 {
1598 /* gfp flags passed to underlying allocators */
1599 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1600 LIST_HEAD(to_free);
1601 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1602 struct pcpu_chunk *chunk, *next;
1603 int slot, nr_to_pop, ret;
1604
1605 /*
1606 * There's no reason to keep around multiple unused chunks and VM
1607 * areas can be scarce. Destroy all free chunks except for one.
1608 */
1609 mutex_lock(&pcpu_alloc_mutex);
1610 spin_lock_irq(&pcpu_lock);
1611
1612 list_for_each_entry_safe(chunk, next, free_head, list) {
1613 WARN_ON(chunk->immutable);
1614
1615 /* spare the first one */
1616 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1617 continue;
1618
1619 list_move(&chunk->list, &to_free);
1620 }
1621
1622 spin_unlock_irq(&pcpu_lock);
1623
1624 list_for_each_entry_safe(chunk, next, &to_free, list) {
1625 int rs, re;
1626
1627 pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
1628 chunk->nr_pages) {
1629 pcpu_depopulate_chunk(chunk, rs, re);
1630 spin_lock_irq(&pcpu_lock);
1631 pcpu_chunk_depopulated(chunk, rs, re);
1632 spin_unlock_irq(&pcpu_lock);
1633 }
1634 pcpu_destroy_chunk(chunk);
1635 cond_resched();
1636 }
1637
1638 /*
1639 * Ensure there are certain number of free populated pages for
1640 * atomic allocs. Fill up from the most packed so that atomic
1641 * allocs don't increase fragmentation. If atomic allocation
1642 * failed previously, always populate the maximum amount. This
1643 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1644 * failing indefinitely; however, large atomic allocs are not
1645 * something we support properly and can be highly unreliable and
1646 * inefficient.
1647 */
1648 retry_pop:
1649 if (pcpu_atomic_alloc_failed) {
1650 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1651 /* best effort anyway, don't worry about synchronization */
1652 pcpu_atomic_alloc_failed = false;
1653 } else {
1654 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1655 pcpu_nr_empty_pop_pages,
1656 0, PCPU_EMPTY_POP_PAGES_HIGH);
1657 }
1658
1659 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1660 int nr_unpop = 0, rs, re;
1661
1662 if (!nr_to_pop)
1663 break;
1664
1665 spin_lock_irq(&pcpu_lock);
1666 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1667 nr_unpop = chunk->nr_pages - chunk->nr_populated;
1668 if (nr_unpop)
1669 break;
1670 }
1671 spin_unlock_irq(&pcpu_lock);
1672
1673 if (!nr_unpop)
1674 continue;
1675
1676 /* @chunk can't go away while pcpu_alloc_mutex is held */
1677 pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
1678 chunk->nr_pages) {
1679 int nr = min(re - rs, nr_to_pop);
1680
1681 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
1682 if (!ret) {
1683 nr_to_pop -= nr;
1684 spin_lock_irq(&pcpu_lock);
1685 pcpu_chunk_populated(chunk, rs, rs + nr, false);
1686 spin_unlock_irq(&pcpu_lock);
1687 } else {
1688 nr_to_pop = 0;
1689 }
1690
1691 if (!nr_to_pop)
1692 break;
1693 }
1694 }
1695
1696 if (nr_to_pop) {
1697 /* ran out of chunks to populate, create a new one and retry */
1698 chunk = pcpu_create_chunk(gfp);
1699 if (chunk) {
1700 spin_lock_irq(&pcpu_lock);
1701 pcpu_chunk_relocate(chunk, -1);
1702 spin_unlock_irq(&pcpu_lock);
1703 goto retry_pop;
1704 }
1705 }
1706
1707 mutex_unlock(&pcpu_alloc_mutex);
1708 }
1709
1710 /**
1711 * free_percpu - free percpu area
1712 * @ptr: pointer to area to free
1713 *
1714 * Free percpu area @ptr.
1715 *
1716 * CONTEXT:
1717 * Can be called from atomic context.
1718 */
free_percpu(void __percpu * ptr)1719 void free_percpu(void __percpu *ptr)
1720 {
1721 void *addr;
1722 struct pcpu_chunk *chunk;
1723 unsigned long flags;
1724 int off;
1725 bool need_balance = false;
1726
1727 if (!ptr)
1728 return;
1729
1730 kmemleak_free_percpu(ptr);
1731
1732 addr = __pcpu_ptr_to_addr(ptr);
1733
1734 spin_lock_irqsave(&pcpu_lock, flags);
1735
1736 chunk = pcpu_chunk_addr_search(addr);
1737 off = addr - chunk->base_addr;
1738
1739 pcpu_free_area(chunk, off);
1740
1741 /* if there are more than one fully free chunks, wake up grim reaper */
1742 if (chunk->free_bytes == pcpu_unit_size) {
1743 struct pcpu_chunk *pos;
1744
1745 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1746 if (pos != chunk) {
1747 need_balance = true;
1748 break;
1749 }
1750 }
1751
1752 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1753
1754 spin_unlock_irqrestore(&pcpu_lock, flags);
1755
1756 if (need_balance)
1757 pcpu_schedule_balance_work();
1758 }
1759 EXPORT_SYMBOL_GPL(free_percpu);
1760
__is_kernel_percpu_address(unsigned long addr,unsigned long * can_addr)1761 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1762 {
1763 #ifdef CONFIG_SMP
1764 const size_t static_size = __per_cpu_end - __per_cpu_start;
1765 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1766 unsigned int cpu;
1767
1768 for_each_possible_cpu(cpu) {
1769 void *start = per_cpu_ptr(base, cpu);
1770 void *va = (void *)addr;
1771
1772 if (va >= start && va < start + static_size) {
1773 if (can_addr) {
1774 *can_addr = (unsigned long) (va - start);
1775 *can_addr += (unsigned long)
1776 per_cpu_ptr(base, get_boot_cpu_id());
1777 }
1778 return true;
1779 }
1780 }
1781 #endif
1782 /* on UP, can't distinguish from other static vars, always false */
1783 return false;
1784 }
1785
1786 /**
1787 * is_kernel_percpu_address - test whether address is from static percpu area
1788 * @addr: address to test
1789 *
1790 * Test whether @addr belongs to in-kernel static percpu area. Module
1791 * static percpu areas are not considered. For those, use
1792 * is_module_percpu_address().
1793 *
1794 * RETURNS:
1795 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1796 */
is_kernel_percpu_address(unsigned long addr)1797 bool is_kernel_percpu_address(unsigned long addr)
1798 {
1799 return __is_kernel_percpu_address(addr, NULL);
1800 }
1801
1802 /**
1803 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1804 * @addr: the address to be converted to physical address
1805 *
1806 * Given @addr which is dereferenceable address obtained via one of
1807 * percpu access macros, this function translates it into its physical
1808 * address. The caller is responsible for ensuring @addr stays valid
1809 * until this function finishes.
1810 *
1811 * percpu allocator has special setup for the first chunk, which currently
1812 * supports either embedding in linear address space or vmalloc mapping,
1813 * and, from the second one, the backing allocator (currently either vm or
1814 * km) provides translation.
1815 *
1816 * The addr can be translated simply without checking if it falls into the
1817 * first chunk. But the current code reflects better how percpu allocator
1818 * actually works, and the verification can discover both bugs in percpu
1819 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1820 * code.
1821 *
1822 * RETURNS:
1823 * The physical address for @addr.
1824 */
per_cpu_ptr_to_phys(void * addr)1825 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1826 {
1827 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1828 bool in_first_chunk = false;
1829 unsigned long first_low, first_high;
1830 unsigned int cpu;
1831
1832 /*
1833 * The following test on unit_low/high isn't strictly
1834 * necessary but will speed up lookups of addresses which
1835 * aren't in the first chunk.
1836 *
1837 * The address check is against full chunk sizes. pcpu_base_addr
1838 * points to the beginning of the first chunk including the
1839 * static region. Assumes good intent as the first chunk may
1840 * not be full (ie. < pcpu_unit_pages in size).
1841 */
1842 first_low = (unsigned long)pcpu_base_addr +
1843 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
1844 first_high = (unsigned long)pcpu_base_addr +
1845 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
1846 if ((unsigned long)addr >= first_low &&
1847 (unsigned long)addr < first_high) {
1848 for_each_possible_cpu(cpu) {
1849 void *start = per_cpu_ptr(base, cpu);
1850
1851 if (addr >= start && addr < start + pcpu_unit_size) {
1852 in_first_chunk = true;
1853 break;
1854 }
1855 }
1856 }
1857
1858 if (in_first_chunk) {
1859 if (!is_vmalloc_addr(addr))
1860 return __pa(addr);
1861 else
1862 return page_to_phys(vmalloc_to_page(addr)) +
1863 offset_in_page(addr);
1864 } else
1865 return page_to_phys(pcpu_addr_to_page(addr)) +
1866 offset_in_page(addr);
1867 }
1868
1869 /**
1870 * pcpu_alloc_alloc_info - allocate percpu allocation info
1871 * @nr_groups: the number of groups
1872 * @nr_units: the number of units
1873 *
1874 * Allocate ai which is large enough for @nr_groups groups containing
1875 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1876 * cpu_map array which is long enough for @nr_units and filled with
1877 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1878 * pointer of other groups.
1879 *
1880 * RETURNS:
1881 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1882 * failure.
1883 */
pcpu_alloc_alloc_info(int nr_groups,int nr_units)1884 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1885 int nr_units)
1886 {
1887 struct pcpu_alloc_info *ai;
1888 size_t base_size, ai_size;
1889 void *ptr;
1890 int unit;
1891
1892 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1893 __alignof__(ai->groups[0].cpu_map[0]));
1894 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1895
1896 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
1897 if (!ptr)
1898 return NULL;
1899 ai = ptr;
1900 ptr += base_size;
1901
1902 ai->groups[0].cpu_map = ptr;
1903
1904 for (unit = 0; unit < nr_units; unit++)
1905 ai->groups[0].cpu_map[unit] = NR_CPUS;
1906
1907 ai->nr_groups = nr_groups;
1908 ai->__ai_size = PFN_ALIGN(ai_size);
1909
1910 return ai;
1911 }
1912
1913 /**
1914 * pcpu_free_alloc_info - free percpu allocation info
1915 * @ai: pcpu_alloc_info to free
1916 *
1917 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1918 */
pcpu_free_alloc_info(struct pcpu_alloc_info * ai)1919 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1920 {
1921 memblock_free_early(__pa(ai), ai->__ai_size);
1922 }
1923
1924 /**
1925 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1926 * @lvl: loglevel
1927 * @ai: allocation info to dump
1928 *
1929 * Print out information about @ai using loglevel @lvl.
1930 */
pcpu_dump_alloc_info(const char * lvl,const struct pcpu_alloc_info * ai)1931 static void pcpu_dump_alloc_info(const char *lvl,
1932 const struct pcpu_alloc_info *ai)
1933 {
1934 int group_width = 1, cpu_width = 1, width;
1935 char empty_str[] = "--------";
1936 int alloc = 0, alloc_end = 0;
1937 int group, v;
1938 int upa, apl; /* units per alloc, allocs per line */
1939
1940 v = ai->nr_groups;
1941 while (v /= 10)
1942 group_width++;
1943
1944 v = num_possible_cpus();
1945 while (v /= 10)
1946 cpu_width++;
1947 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1948
1949 upa = ai->alloc_size / ai->unit_size;
1950 width = upa * (cpu_width + 1) + group_width + 3;
1951 apl = rounddown_pow_of_two(max(60 / width, 1));
1952
1953 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1954 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1955 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1956
1957 for (group = 0; group < ai->nr_groups; group++) {
1958 const struct pcpu_group_info *gi = &ai->groups[group];
1959 int unit = 0, unit_end = 0;
1960
1961 BUG_ON(gi->nr_units % upa);
1962 for (alloc_end += gi->nr_units / upa;
1963 alloc < alloc_end; alloc++) {
1964 if (!(alloc % apl)) {
1965 pr_cont("\n");
1966 printk("%spcpu-alloc: ", lvl);
1967 }
1968 pr_cont("[%0*d] ", group_width, group);
1969
1970 for (unit_end += upa; unit < unit_end; unit++)
1971 if (gi->cpu_map[unit] != NR_CPUS)
1972 pr_cont("%0*d ",
1973 cpu_width, gi->cpu_map[unit]);
1974 else
1975 pr_cont("%s ", empty_str);
1976 }
1977 }
1978 pr_cont("\n");
1979 }
1980
1981 /**
1982 * pcpu_setup_first_chunk - initialize the first percpu chunk
1983 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1984 * @base_addr: mapped address
1985 *
1986 * Initialize the first percpu chunk which contains the kernel static
1987 * perpcu area. This function is to be called from arch percpu area
1988 * setup path.
1989 *
1990 * @ai contains all information necessary to initialize the first
1991 * chunk and prime the dynamic percpu allocator.
1992 *
1993 * @ai->static_size is the size of static percpu area.
1994 *
1995 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1996 * reserve after the static area in the first chunk. This reserves
1997 * the first chunk such that it's available only through reserved
1998 * percpu allocation. This is primarily used to serve module percpu
1999 * static areas on architectures where the addressing model has
2000 * limited offset range for symbol relocations to guarantee module
2001 * percpu symbols fall inside the relocatable range.
2002 *
2003 * @ai->dyn_size determines the number of bytes available for dynamic
2004 * allocation in the first chunk. The area between @ai->static_size +
2005 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2006 *
2007 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2008 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2009 * @ai->dyn_size.
2010 *
2011 * @ai->atom_size is the allocation atom size and used as alignment
2012 * for vm areas.
2013 *
2014 * @ai->alloc_size is the allocation size and always multiple of
2015 * @ai->atom_size. This is larger than @ai->atom_size if
2016 * @ai->unit_size is larger than @ai->atom_size.
2017 *
2018 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2019 * percpu areas. Units which should be colocated are put into the
2020 * same group. Dynamic VM areas will be allocated according to these
2021 * groupings. If @ai->nr_groups is zero, a single group containing
2022 * all units is assumed.
2023 *
2024 * The caller should have mapped the first chunk at @base_addr and
2025 * copied static data to each unit.
2026 *
2027 * The first chunk will always contain a static and a dynamic region.
2028 * However, the static region is not managed by any chunk. If the first
2029 * chunk also contains a reserved region, it is served by two chunks -
2030 * one for the reserved region and one for the dynamic region. They
2031 * share the same vm, but use offset regions in the area allocation map.
2032 * The chunk serving the dynamic region is circulated in the chunk slots
2033 * and available for dynamic allocation like any other chunk.
2034 *
2035 * RETURNS:
2036 * 0 on success, -errno on failure.
2037 */
pcpu_setup_first_chunk(const struct pcpu_alloc_info * ai,void * base_addr)2038 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2039 void *base_addr)
2040 {
2041 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2042 size_t static_size, dyn_size;
2043 struct pcpu_chunk *chunk;
2044 unsigned long *group_offsets;
2045 size_t *group_sizes;
2046 unsigned long *unit_off;
2047 unsigned int cpu;
2048 int *unit_map;
2049 int group, unit, i;
2050 int map_size;
2051 unsigned long tmp_addr;
2052
2053 #define PCPU_SETUP_BUG_ON(cond) do { \
2054 if (unlikely(cond)) { \
2055 pr_emerg("failed to initialize, %s\n", #cond); \
2056 pr_emerg("cpu_possible_mask=%*pb\n", \
2057 cpumask_pr_args(cpu_possible_mask)); \
2058 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2059 BUG(); \
2060 } \
2061 } while (0)
2062
2063 /* sanity checks */
2064 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2065 #ifdef CONFIG_SMP
2066 PCPU_SETUP_BUG_ON(!ai->static_size);
2067 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2068 #endif
2069 PCPU_SETUP_BUG_ON(!base_addr);
2070 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2071 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2072 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2073 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2074 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2075 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2076 PCPU_SETUP_BUG_ON(!ai->dyn_size);
2077 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2078 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2079 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2080 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2081
2082 /* process group information and build config tables accordingly */
2083 group_offsets = memblock_virt_alloc(ai->nr_groups *
2084 sizeof(group_offsets[0]), 0);
2085 group_sizes = memblock_virt_alloc(ai->nr_groups *
2086 sizeof(group_sizes[0]), 0);
2087 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
2088 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
2089
2090 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2091 unit_map[cpu] = UINT_MAX;
2092
2093 pcpu_low_unit_cpu = NR_CPUS;
2094 pcpu_high_unit_cpu = NR_CPUS;
2095
2096 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2097 const struct pcpu_group_info *gi = &ai->groups[group];
2098
2099 group_offsets[group] = gi->base_offset;
2100 group_sizes[group] = gi->nr_units * ai->unit_size;
2101
2102 for (i = 0; i < gi->nr_units; i++) {
2103 cpu = gi->cpu_map[i];
2104 if (cpu == NR_CPUS)
2105 continue;
2106
2107 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2108 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2109 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2110
2111 unit_map[cpu] = unit + i;
2112 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2113
2114 /* determine low/high unit_cpu */
2115 if (pcpu_low_unit_cpu == NR_CPUS ||
2116 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2117 pcpu_low_unit_cpu = cpu;
2118 if (pcpu_high_unit_cpu == NR_CPUS ||
2119 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2120 pcpu_high_unit_cpu = cpu;
2121 }
2122 }
2123 pcpu_nr_units = unit;
2124
2125 for_each_possible_cpu(cpu)
2126 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2127
2128 /* we're done parsing the input, undefine BUG macro and dump config */
2129 #undef PCPU_SETUP_BUG_ON
2130 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2131
2132 pcpu_nr_groups = ai->nr_groups;
2133 pcpu_group_offsets = group_offsets;
2134 pcpu_group_sizes = group_sizes;
2135 pcpu_unit_map = unit_map;
2136 pcpu_unit_offsets = unit_off;
2137
2138 /* determine basic parameters */
2139 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2140 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2141 pcpu_atom_size = ai->atom_size;
2142 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2143 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2144
2145 pcpu_stats_save_ai(ai);
2146
2147 /*
2148 * Allocate chunk slots. The additional last slot is for
2149 * empty chunks.
2150 */
2151 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2152 pcpu_slot = memblock_virt_alloc(
2153 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
2154 for (i = 0; i < pcpu_nr_slots; i++)
2155 INIT_LIST_HEAD(&pcpu_slot[i]);
2156
2157 /*
2158 * The end of the static region needs to be aligned with the
2159 * minimum allocation size as this offsets the reserved and
2160 * dynamic region. The first chunk ends page aligned by
2161 * expanding the dynamic region, therefore the dynamic region
2162 * can be shrunk to compensate while still staying above the
2163 * configured sizes.
2164 */
2165 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2166 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2167
2168 /*
2169 * Initialize first chunk.
2170 * If the reserved_size is non-zero, this initializes the reserved
2171 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2172 * and the dynamic region is initialized here. The first chunk,
2173 * pcpu_first_chunk, will always point to the chunk that serves
2174 * the dynamic region.
2175 */
2176 tmp_addr = (unsigned long)base_addr + static_size;
2177 map_size = ai->reserved_size ?: dyn_size;
2178 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2179
2180 /* init dynamic chunk if necessary */
2181 if (ai->reserved_size) {
2182 pcpu_reserved_chunk = chunk;
2183
2184 tmp_addr = (unsigned long)base_addr + static_size +
2185 ai->reserved_size;
2186 map_size = dyn_size;
2187 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2188 }
2189
2190 /* link the first chunk in */
2191 pcpu_first_chunk = chunk;
2192 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2193 pcpu_chunk_relocate(pcpu_first_chunk, -1);
2194
2195 /* include all regions of the first chunk */
2196 pcpu_nr_populated += PFN_DOWN(size_sum);
2197
2198 pcpu_stats_chunk_alloc();
2199 trace_percpu_create_chunk(base_addr);
2200
2201 /* we're done */
2202 pcpu_base_addr = base_addr;
2203 return 0;
2204 }
2205
2206 #ifdef CONFIG_SMP
2207
2208 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2209 [PCPU_FC_AUTO] = "auto",
2210 [PCPU_FC_EMBED] = "embed",
2211 [PCPU_FC_PAGE] = "page",
2212 };
2213
2214 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2215
percpu_alloc_setup(char * str)2216 static int __init percpu_alloc_setup(char *str)
2217 {
2218 if (!str)
2219 return -EINVAL;
2220
2221 if (0)
2222 /* nada */;
2223 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2224 else if (!strcmp(str, "embed"))
2225 pcpu_chosen_fc = PCPU_FC_EMBED;
2226 #endif
2227 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2228 else if (!strcmp(str, "page"))
2229 pcpu_chosen_fc = PCPU_FC_PAGE;
2230 #endif
2231 else
2232 pr_warn("unknown allocator %s specified\n", str);
2233
2234 return 0;
2235 }
2236 early_param("percpu_alloc", percpu_alloc_setup);
2237
2238 /*
2239 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2240 * Build it if needed by the arch config or the generic setup is going
2241 * to be used.
2242 */
2243 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2244 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2245 #define BUILD_EMBED_FIRST_CHUNK
2246 #endif
2247
2248 /* build pcpu_page_first_chunk() iff needed by the arch config */
2249 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2250 #define BUILD_PAGE_FIRST_CHUNK
2251 #endif
2252
2253 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2254 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2255 /**
2256 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2257 * @reserved_size: the size of reserved percpu area in bytes
2258 * @dyn_size: minimum free size for dynamic allocation in bytes
2259 * @atom_size: allocation atom size
2260 * @cpu_distance_fn: callback to determine distance between cpus, optional
2261 *
2262 * This function determines grouping of units, their mappings to cpus
2263 * and other parameters considering needed percpu size, allocation
2264 * atom size and distances between CPUs.
2265 *
2266 * Groups are always multiples of atom size and CPUs which are of
2267 * LOCAL_DISTANCE both ways are grouped together and share space for
2268 * units in the same group. The returned configuration is guaranteed
2269 * to have CPUs on different nodes on different groups and >=75% usage
2270 * of allocated virtual address space.
2271 *
2272 * RETURNS:
2273 * On success, pointer to the new allocation_info is returned. On
2274 * failure, ERR_PTR value is returned.
2275 */
pcpu_build_alloc_info(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn)2276 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2277 size_t reserved_size, size_t dyn_size,
2278 size_t atom_size,
2279 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2280 {
2281 static int group_map[NR_CPUS] __initdata;
2282 static int group_cnt[NR_CPUS] __initdata;
2283 const size_t static_size = __per_cpu_end - __per_cpu_start;
2284 int nr_groups = 1, nr_units = 0;
2285 size_t size_sum, min_unit_size, alloc_size;
2286 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
2287 int last_allocs, group, unit;
2288 unsigned int cpu, tcpu;
2289 struct pcpu_alloc_info *ai;
2290 unsigned int *cpu_map;
2291
2292 /* this function may be called multiple times */
2293 memset(group_map, 0, sizeof(group_map));
2294 memset(group_cnt, 0, sizeof(group_cnt));
2295
2296 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2297 size_sum = PFN_ALIGN(static_size + reserved_size +
2298 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2299 dyn_size = size_sum - static_size - reserved_size;
2300
2301 /*
2302 * Determine min_unit_size, alloc_size and max_upa such that
2303 * alloc_size is multiple of atom_size and is the smallest
2304 * which can accommodate 4k aligned segments which are equal to
2305 * or larger than min_unit_size.
2306 */
2307 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2308
2309 /* determine the maximum # of units that can fit in an allocation */
2310 alloc_size = roundup(min_unit_size, atom_size);
2311 upa = alloc_size / min_unit_size;
2312 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2313 upa--;
2314 max_upa = upa;
2315
2316 /* group cpus according to their proximity */
2317 for_each_possible_cpu(cpu) {
2318 group = 0;
2319 next_group:
2320 for_each_possible_cpu(tcpu) {
2321 if (cpu == tcpu)
2322 break;
2323 if (group_map[tcpu] == group && cpu_distance_fn &&
2324 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2325 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2326 group++;
2327 nr_groups = max(nr_groups, group + 1);
2328 goto next_group;
2329 }
2330 }
2331 group_map[cpu] = group;
2332 group_cnt[group]++;
2333 }
2334
2335 /*
2336 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2337 * Expand the unit_size until we use >= 75% of the units allocated.
2338 * Related to atom_size, which could be much larger than the unit_size.
2339 */
2340 last_allocs = INT_MAX;
2341 for (upa = max_upa; upa; upa--) {
2342 int allocs = 0, wasted = 0;
2343
2344 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2345 continue;
2346
2347 for (group = 0; group < nr_groups; group++) {
2348 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2349 allocs += this_allocs;
2350 wasted += this_allocs * upa - group_cnt[group];
2351 }
2352
2353 /*
2354 * Don't accept if wastage is over 1/3. The
2355 * greater-than comparison ensures upa==1 always
2356 * passes the following check.
2357 */
2358 if (wasted > num_possible_cpus() / 3)
2359 continue;
2360
2361 /* and then don't consume more memory */
2362 if (allocs > last_allocs)
2363 break;
2364 last_allocs = allocs;
2365 best_upa = upa;
2366 }
2367 upa = best_upa;
2368
2369 /* allocate and fill alloc_info */
2370 for (group = 0; group < nr_groups; group++)
2371 nr_units += roundup(group_cnt[group], upa);
2372
2373 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2374 if (!ai)
2375 return ERR_PTR(-ENOMEM);
2376 cpu_map = ai->groups[0].cpu_map;
2377
2378 for (group = 0; group < nr_groups; group++) {
2379 ai->groups[group].cpu_map = cpu_map;
2380 cpu_map += roundup(group_cnt[group], upa);
2381 }
2382
2383 ai->static_size = static_size;
2384 ai->reserved_size = reserved_size;
2385 ai->dyn_size = dyn_size;
2386 ai->unit_size = alloc_size / upa;
2387 ai->atom_size = atom_size;
2388 ai->alloc_size = alloc_size;
2389
2390 for (group = 0, unit = 0; group_cnt[group]; group++) {
2391 struct pcpu_group_info *gi = &ai->groups[group];
2392
2393 /*
2394 * Initialize base_offset as if all groups are located
2395 * back-to-back. The caller should update this to
2396 * reflect actual allocation.
2397 */
2398 gi->base_offset = unit * ai->unit_size;
2399
2400 for_each_possible_cpu(cpu)
2401 if (group_map[cpu] == group)
2402 gi->cpu_map[gi->nr_units++] = cpu;
2403 gi->nr_units = roundup(gi->nr_units, upa);
2404 unit += gi->nr_units;
2405 }
2406 BUG_ON(unit != nr_units);
2407
2408 return ai;
2409 }
2410 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2411
2412 #if defined(BUILD_EMBED_FIRST_CHUNK)
2413 /**
2414 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2415 * @reserved_size: the size of reserved percpu area in bytes
2416 * @dyn_size: minimum free size for dynamic allocation in bytes
2417 * @atom_size: allocation atom size
2418 * @cpu_distance_fn: callback to determine distance between cpus, optional
2419 * @alloc_fn: function to allocate percpu page
2420 * @free_fn: function to free percpu page
2421 *
2422 * This is a helper to ease setting up embedded first percpu chunk and
2423 * can be called where pcpu_setup_first_chunk() is expected.
2424 *
2425 * If this function is used to setup the first chunk, it is allocated
2426 * by calling @alloc_fn and used as-is without being mapped into
2427 * vmalloc area. Allocations are always whole multiples of @atom_size
2428 * aligned to @atom_size.
2429 *
2430 * This enables the first chunk to piggy back on the linear physical
2431 * mapping which often uses larger page size. Please note that this
2432 * can result in very sparse cpu->unit mapping on NUMA machines thus
2433 * requiring large vmalloc address space. Don't use this allocator if
2434 * vmalloc space is not orders of magnitude larger than distances
2435 * between node memory addresses (ie. 32bit NUMA machines).
2436 *
2437 * @dyn_size specifies the minimum dynamic area size.
2438 *
2439 * If the needed size is smaller than the minimum or specified unit
2440 * size, the leftover is returned using @free_fn.
2441 *
2442 * RETURNS:
2443 * 0 on success, -errno on failure.
2444 */
pcpu_embed_first_chunk(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn,pcpu_fc_alloc_fn_t alloc_fn,pcpu_fc_free_fn_t free_fn)2445 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2446 size_t atom_size,
2447 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2448 pcpu_fc_alloc_fn_t alloc_fn,
2449 pcpu_fc_free_fn_t free_fn)
2450 {
2451 void *base = (void *)ULONG_MAX;
2452 void **areas = NULL;
2453 struct pcpu_alloc_info *ai;
2454 size_t size_sum, areas_size;
2455 unsigned long max_distance;
2456 int group, i, highest_group, rc;
2457
2458 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2459 cpu_distance_fn);
2460 if (IS_ERR(ai))
2461 return PTR_ERR(ai);
2462
2463 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2464 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2465
2466 areas = memblock_virt_alloc_nopanic(areas_size, 0);
2467 if (!areas) {
2468 rc = -ENOMEM;
2469 goto out_free;
2470 }
2471
2472 /* allocate, copy and determine base address & max_distance */
2473 highest_group = 0;
2474 for (group = 0; group < ai->nr_groups; group++) {
2475 struct pcpu_group_info *gi = &ai->groups[group];
2476 unsigned int cpu = NR_CPUS;
2477 void *ptr;
2478
2479 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2480 cpu = gi->cpu_map[i];
2481 BUG_ON(cpu == NR_CPUS);
2482
2483 /* allocate space for the whole group */
2484 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2485 if (!ptr) {
2486 rc = -ENOMEM;
2487 goto out_free_areas;
2488 }
2489 /* kmemleak tracks the percpu allocations separately */
2490 kmemleak_free(ptr);
2491 areas[group] = ptr;
2492
2493 base = min(ptr, base);
2494 if (ptr > areas[highest_group])
2495 highest_group = group;
2496 }
2497 max_distance = areas[highest_group] - base;
2498 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2499
2500 /* warn if maximum distance is further than 75% of vmalloc space */
2501 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2502 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2503 max_distance, VMALLOC_TOTAL);
2504 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2505 /* and fail if we have fallback */
2506 rc = -EINVAL;
2507 goto out_free_areas;
2508 #endif
2509 }
2510
2511 /*
2512 * Copy data and free unused parts. This should happen after all
2513 * allocations are complete; otherwise, we may end up with
2514 * overlapping groups.
2515 */
2516 for (group = 0; group < ai->nr_groups; group++) {
2517 struct pcpu_group_info *gi = &ai->groups[group];
2518 void *ptr = areas[group];
2519
2520 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2521 if (gi->cpu_map[i] == NR_CPUS) {
2522 /* unused unit, free whole */
2523 free_fn(ptr, ai->unit_size);
2524 continue;
2525 }
2526 /* copy and return the unused part */
2527 memcpy(ptr, __per_cpu_load, ai->static_size);
2528 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2529 }
2530 }
2531
2532 /* base address is now known, determine group base offsets */
2533 for (group = 0; group < ai->nr_groups; group++) {
2534 ai->groups[group].base_offset = areas[group] - base;
2535 }
2536
2537 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2538 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2539 ai->dyn_size, ai->unit_size);
2540
2541 rc = pcpu_setup_first_chunk(ai, base);
2542 goto out_free;
2543
2544 out_free_areas:
2545 for (group = 0; group < ai->nr_groups; group++)
2546 if (areas[group])
2547 free_fn(areas[group],
2548 ai->groups[group].nr_units * ai->unit_size);
2549 out_free:
2550 pcpu_free_alloc_info(ai);
2551 if (areas)
2552 memblock_free_early(__pa(areas), areas_size);
2553 return rc;
2554 }
2555 #endif /* BUILD_EMBED_FIRST_CHUNK */
2556
2557 #ifdef BUILD_PAGE_FIRST_CHUNK
2558 /**
2559 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2560 * @reserved_size: the size of reserved percpu area in bytes
2561 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2562 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2563 * @populate_pte_fn: function to populate pte
2564 *
2565 * This is a helper to ease setting up page-remapped first percpu
2566 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2567 *
2568 * This is the basic allocator. Static percpu area is allocated
2569 * page-by-page into vmalloc area.
2570 *
2571 * RETURNS:
2572 * 0 on success, -errno on failure.
2573 */
pcpu_page_first_chunk(size_t reserved_size,pcpu_fc_alloc_fn_t alloc_fn,pcpu_fc_free_fn_t free_fn,pcpu_fc_populate_pte_fn_t populate_pte_fn)2574 int __init pcpu_page_first_chunk(size_t reserved_size,
2575 pcpu_fc_alloc_fn_t alloc_fn,
2576 pcpu_fc_free_fn_t free_fn,
2577 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2578 {
2579 static struct vm_struct vm;
2580 struct pcpu_alloc_info *ai;
2581 char psize_str[16];
2582 int unit_pages;
2583 size_t pages_size;
2584 struct page **pages;
2585 int unit, i, j, rc;
2586 int upa;
2587 int nr_g0_units;
2588
2589 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2590
2591 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2592 if (IS_ERR(ai))
2593 return PTR_ERR(ai);
2594 BUG_ON(ai->nr_groups != 1);
2595 upa = ai->alloc_size/ai->unit_size;
2596 nr_g0_units = roundup(num_possible_cpus(), upa);
2597 if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
2598 pcpu_free_alloc_info(ai);
2599 return -EINVAL;
2600 }
2601
2602 unit_pages = ai->unit_size >> PAGE_SHIFT;
2603
2604 /* unaligned allocations can't be freed, round up to page size */
2605 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2606 sizeof(pages[0]));
2607 pages = memblock_virt_alloc(pages_size, 0);
2608
2609 /* allocate pages */
2610 j = 0;
2611 for (unit = 0; unit < num_possible_cpus(); unit++) {
2612 unsigned int cpu = ai->groups[0].cpu_map[unit];
2613 for (i = 0; i < unit_pages; i++) {
2614 void *ptr;
2615
2616 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2617 if (!ptr) {
2618 pr_warn("failed to allocate %s page for cpu%u\n",
2619 psize_str, cpu);
2620 goto enomem;
2621 }
2622 /* kmemleak tracks the percpu allocations separately */
2623 kmemleak_free(ptr);
2624 pages[j++] = virt_to_page(ptr);
2625 }
2626 }
2627
2628 /* allocate vm area, map the pages and copy static data */
2629 vm.flags = VM_ALLOC;
2630 vm.size = num_possible_cpus() * ai->unit_size;
2631 vm_area_register_early(&vm, PAGE_SIZE);
2632
2633 for (unit = 0; unit < num_possible_cpus(); unit++) {
2634 unsigned long unit_addr =
2635 (unsigned long)vm.addr + unit * ai->unit_size;
2636
2637 for (i = 0; i < unit_pages; i++)
2638 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2639
2640 /* pte already populated, the following shouldn't fail */
2641 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2642 unit_pages);
2643 if (rc < 0)
2644 panic("failed to map percpu area, err=%d\n", rc);
2645
2646 /*
2647 * FIXME: Archs with virtual cache should flush local
2648 * cache for the linear mapping here - something
2649 * equivalent to flush_cache_vmap() on the local cpu.
2650 * flush_cache_vmap() can't be used as most supporting
2651 * data structures are not set up yet.
2652 */
2653
2654 /* copy static data */
2655 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2656 }
2657
2658 /* we're ready, commit */
2659 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2660 unit_pages, psize_str, ai->static_size,
2661 ai->reserved_size, ai->dyn_size);
2662
2663 rc = pcpu_setup_first_chunk(ai, vm.addr);
2664 goto out_free_ar;
2665
2666 enomem:
2667 while (--j >= 0)
2668 free_fn(page_address(pages[j]), PAGE_SIZE);
2669 rc = -ENOMEM;
2670 out_free_ar:
2671 memblock_free_early(__pa(pages), pages_size);
2672 pcpu_free_alloc_info(ai);
2673 return rc;
2674 }
2675 #endif /* BUILD_PAGE_FIRST_CHUNK */
2676
2677 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2678 /*
2679 * Generic SMP percpu area setup.
2680 *
2681 * The embedding helper is used because its behavior closely resembles
2682 * the original non-dynamic generic percpu area setup. This is
2683 * important because many archs have addressing restrictions and might
2684 * fail if the percpu area is located far away from the previous
2685 * location. As an added bonus, in non-NUMA cases, embedding is
2686 * generally a good idea TLB-wise because percpu area can piggy back
2687 * on the physical linear memory mapping which uses large page
2688 * mappings on applicable archs.
2689 */
2690 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2691 EXPORT_SYMBOL(__per_cpu_offset);
2692
pcpu_dfl_fc_alloc(unsigned int cpu,size_t size,size_t align)2693 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2694 size_t align)
2695 {
2696 return memblock_virt_alloc_from_nopanic(
2697 size, align, __pa(MAX_DMA_ADDRESS));
2698 }
2699
pcpu_dfl_fc_free(void * ptr,size_t size)2700 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2701 {
2702 memblock_free_early(__pa(ptr), size);
2703 }
2704
setup_per_cpu_areas(void)2705 void __init setup_per_cpu_areas(void)
2706 {
2707 unsigned long delta;
2708 unsigned int cpu;
2709 int rc;
2710
2711 /*
2712 * Always reserve area for module percpu variables. That's
2713 * what the legacy allocator did.
2714 */
2715 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2716 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2717 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2718 if (rc < 0)
2719 panic("Failed to initialize percpu areas.");
2720
2721 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2722 for_each_possible_cpu(cpu)
2723 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2724 }
2725 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2726
2727 #else /* CONFIG_SMP */
2728
2729 /*
2730 * UP percpu area setup.
2731 *
2732 * UP always uses km-based percpu allocator with identity mapping.
2733 * Static percpu variables are indistinguishable from the usual static
2734 * variables and don't require any special preparation.
2735 */
setup_per_cpu_areas(void)2736 void __init setup_per_cpu_areas(void)
2737 {
2738 const size_t unit_size =
2739 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2740 PERCPU_DYNAMIC_RESERVE));
2741 struct pcpu_alloc_info *ai;
2742 void *fc;
2743
2744 ai = pcpu_alloc_alloc_info(1, 1);
2745 fc = memblock_virt_alloc_from_nopanic(unit_size,
2746 PAGE_SIZE,
2747 __pa(MAX_DMA_ADDRESS));
2748 if (!ai || !fc)
2749 panic("Failed to allocate memory for percpu areas.");
2750 /* kmemleak tracks the percpu allocations separately */
2751 kmemleak_free(fc);
2752
2753 ai->dyn_size = unit_size;
2754 ai->unit_size = unit_size;
2755 ai->atom_size = unit_size;
2756 ai->alloc_size = unit_size;
2757 ai->groups[0].nr_units = 1;
2758 ai->groups[0].cpu_map[0] = 0;
2759
2760 if (pcpu_setup_first_chunk(ai, fc) < 0)
2761 panic("Failed to initialize percpu areas.");
2762 pcpu_free_alloc_info(ai);
2763 }
2764
2765 #endif /* CONFIG_SMP */
2766
2767 /*
2768 * pcpu_nr_pages - calculate total number of populated backing pages
2769 *
2770 * This reflects the number of pages populated to back chunks. Metadata is
2771 * excluded in the number exposed in meminfo as the number of backing pages
2772 * scales with the number of cpus and can quickly outweigh the memory used for
2773 * metadata. It also keeps this calculation nice and simple.
2774 *
2775 * RETURNS:
2776 * Total number of populated backing pages in use by the allocator.
2777 */
pcpu_nr_pages(void)2778 unsigned long pcpu_nr_pages(void)
2779 {
2780 return pcpu_nr_populated * pcpu_nr_units;
2781 }
2782
2783 /*
2784 * Percpu allocator is initialized early during boot when neither slab or
2785 * workqueue is available. Plug async management until everything is up
2786 * and running.
2787 */
percpu_enable_async(void)2788 static int __init percpu_enable_async(void)
2789 {
2790 pcpu_async_enabled = true;
2791 return 0;
2792 }
2793 subsys_initcall(percpu_enable_async);
2794