1 /*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25 #undef DEBUG
26
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
34 #include <linux/security.h>
35 #include <asm/mman.h>
36 #include <asm/mmu.h>
37 #include <asm/copro.h>
38 #include <asm/hugetlb.h>
39
40 static DEFINE_SPINLOCK(slice_convert_lock);
41 /*
42 * One bit per slice. We have lower slices which cover 256MB segments
43 * upto 4G range. That gets us 16 low slices. For the rest we track slices
44 * in 1TB size.
45 */
46 struct slice_mask {
47 u64 low_slices;
48 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
49 };
50
51 #ifdef DEBUG
52 int _slice_debug = 1;
53
slice_print_mask(const char * label,struct slice_mask mask)54 static void slice_print_mask(const char *label, struct slice_mask mask)
55 {
56 if (!_slice_debug)
57 return;
58 pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices);
59 pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices);
60 }
61
62 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
63
64 #else
65
slice_print_mask(const char * label,struct slice_mask mask)66 static void slice_print_mask(const char *label, struct slice_mask mask) {}
67 #define slice_dbg(fmt...)
68
69 #endif
70
slice_range_to_mask(unsigned long start,unsigned long len,struct slice_mask * ret)71 static void slice_range_to_mask(unsigned long start, unsigned long len,
72 struct slice_mask *ret)
73 {
74 unsigned long end = start + len - 1;
75
76 ret->low_slices = 0;
77 if (SLICE_NUM_HIGH)
78 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
79
80 if (start < SLICE_LOW_TOP) {
81 unsigned long mend = min(end,
82 (unsigned long)(SLICE_LOW_TOP - 1));
83
84 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
85 - (1u << GET_LOW_SLICE_INDEX(start));
86 }
87
88 if ((start + len) > SLICE_LOW_TOP) {
89 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
90 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
91 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
92
93 bitmap_set(ret->high_slices, start_index, count);
94 }
95 }
96
slice_area_is_free(struct mm_struct * mm,unsigned long addr,unsigned long len)97 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
98 unsigned long len)
99 {
100 struct vm_area_struct *vma;
101
102 if ((mm->context.addr_limit - len) < addr)
103 return 0;
104 vma = find_vma(mm, addr);
105 return (!vma || (addr + len) <= vm_start_gap(vma));
106 }
107
slice_low_has_vma(struct mm_struct * mm,unsigned long slice)108 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
109 {
110 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
111 1ul << SLICE_LOW_SHIFT);
112 }
113
slice_high_has_vma(struct mm_struct * mm,unsigned long slice)114 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
115 {
116 unsigned long start = slice << SLICE_HIGH_SHIFT;
117 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
118
119 #ifdef CONFIG_PPC64
120 /* Hack, so that each addresses is controlled by exactly one
121 * of the high or low area bitmaps, the first high area starts
122 * at 4GB, not 0 */
123 if (start == 0)
124 start = SLICE_LOW_TOP;
125 #endif
126
127 return !slice_area_is_free(mm, start, end - start);
128 }
129
slice_mask_for_free(struct mm_struct * mm,struct slice_mask * ret)130 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
131 {
132 unsigned long i;
133
134 ret->low_slices = 0;
135 if (SLICE_NUM_HIGH)
136 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
137
138 for (i = 0; i < SLICE_NUM_LOW; i++)
139 if (!slice_low_has_vma(mm, i))
140 ret->low_slices |= 1u << i;
141
142 if (mm->context.addr_limit <= SLICE_LOW_TOP)
143 return;
144
145 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
146 if (!slice_high_has_vma(mm, i))
147 __set_bit(i, ret->high_slices);
148 }
149
slice_mask_for_size(struct mm_struct * mm,int psize,struct slice_mask * ret)150 static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
151 {
152 unsigned char *hpsizes;
153 int index, mask_index;
154 unsigned long i;
155 u64 lpsizes;
156
157 ret->low_slices = 0;
158 if (SLICE_NUM_HIGH)
159 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
160
161 lpsizes = mm->context.low_slices_psize;
162 for (i = 0; i < SLICE_NUM_LOW; i++)
163 if (((lpsizes >> (i * 4)) & 0xf) == psize)
164 ret->low_slices |= 1u << i;
165
166 hpsizes = mm->context.high_slices_psize;
167 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
168 mask_index = i & 0x1;
169 index = i >> 1;
170 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
171 __set_bit(i, ret->high_slices);
172 }
173 }
174
slice_check_fit(struct mm_struct * mm,struct slice_mask mask,struct slice_mask available)175 static int slice_check_fit(struct mm_struct *mm,
176 struct slice_mask mask, struct slice_mask available)
177 {
178 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
179 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
180
181 if (!SLICE_NUM_HIGH)
182 return (mask.low_slices & available.low_slices) ==
183 mask.low_slices;
184
185 bitmap_and(result, mask.high_slices,
186 available.high_slices, slice_count);
187
188 return (mask.low_slices & available.low_slices) == mask.low_slices &&
189 bitmap_equal(result, mask.high_slices, slice_count);
190 }
191
slice_flush_segments(void * parm)192 static void slice_flush_segments(void *parm)
193 {
194 #ifdef CONFIG_PPC64
195 struct mm_struct *mm = parm;
196 unsigned long flags;
197
198 if (mm != current->active_mm)
199 return;
200
201 copy_mm_to_paca(current->active_mm);
202
203 local_irq_save(flags);
204 slb_flush_and_rebolt();
205 local_irq_restore(flags);
206 #endif
207 }
208
slice_convert(struct mm_struct * mm,struct slice_mask mask,int psize)209 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
210 {
211 int index, mask_index;
212 /* Write the new slice psize bits */
213 unsigned char *hpsizes;
214 u64 lpsizes;
215 unsigned long i, flags;
216
217 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
218 slice_print_mask(" mask", mask);
219
220 /* We need to use a spinlock here to protect against
221 * concurrent 64k -> 4k demotion ...
222 */
223 spin_lock_irqsave(&slice_convert_lock, flags);
224
225 lpsizes = mm->context.low_slices_psize;
226 for (i = 0; i < SLICE_NUM_LOW; i++)
227 if (mask.low_slices & (1u << i))
228 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
229 (((unsigned long)psize) << (i * 4));
230
231 /* Assign the value back */
232 mm->context.low_slices_psize = lpsizes;
233
234 hpsizes = mm->context.high_slices_psize;
235 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
236 mask_index = i & 0x1;
237 index = i >> 1;
238 if (test_bit(i, mask.high_slices))
239 hpsizes[index] = (hpsizes[index] &
240 ~(0xf << (mask_index * 4))) |
241 (((unsigned long)psize) << (mask_index * 4));
242 }
243
244 slice_dbg(" lsps=%lx, hsps=%lx\n",
245 (unsigned long)mm->context.low_slices_psize,
246 (unsigned long)mm->context.high_slices_psize);
247
248 spin_unlock_irqrestore(&slice_convert_lock, flags);
249
250 copro_flush_all_slbs(mm);
251 }
252
253 /*
254 * Compute which slice addr is part of;
255 * set *boundary_addr to the start or end boundary of that slice
256 * (depending on 'end' parameter);
257 * return boolean indicating if the slice is marked as available in the
258 * 'available' slice_mark.
259 */
slice_scan_available(unsigned long addr,struct slice_mask available,int end,unsigned long * boundary_addr)260 static bool slice_scan_available(unsigned long addr,
261 struct slice_mask available,
262 int end,
263 unsigned long *boundary_addr)
264 {
265 unsigned long slice;
266 if (addr < SLICE_LOW_TOP) {
267 slice = GET_LOW_SLICE_INDEX(addr);
268 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269 return !!(available.low_slices & (1u << slice));
270 } else {
271 slice = GET_HIGH_SLICE_INDEX(addr);
272 *boundary_addr = (slice + end) ?
273 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274 return !!test_bit(slice, available.high_slices);
275 }
276 }
277
slice_find_area_bottomup(struct mm_struct * mm,unsigned long len,struct slice_mask available,int psize,unsigned long high_limit)278 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279 unsigned long len,
280 struct slice_mask available,
281 int psize, unsigned long high_limit)
282 {
283 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284 unsigned long addr, found, next_end;
285 struct vm_unmapped_area_info info;
286
287 info.flags = 0;
288 info.length = len;
289 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
290 info.align_offset = 0;
291
292 addr = TASK_UNMAPPED_BASE;
293 /*
294 * Check till the allow max value for this mmap request
295 */
296 while (addr < high_limit) {
297 info.low_limit = addr;
298 if (!slice_scan_available(addr, available, 1, &addr))
299 continue;
300
301 next_slice:
302 /*
303 * At this point [info.low_limit; addr) covers
304 * available slices only and ends at a slice boundary.
305 * Check if we need to reduce the range, or if we can
306 * extend it to cover the next available slice.
307 */
308 if (addr >= high_limit)
309 addr = high_limit;
310 else if (slice_scan_available(addr, available, 1, &next_end)) {
311 addr = next_end;
312 goto next_slice;
313 }
314 info.high_limit = addr;
315
316 found = vm_unmapped_area(&info);
317 if (!(found & ~PAGE_MASK))
318 return found;
319 }
320
321 return -ENOMEM;
322 }
323
slice_find_area_topdown(struct mm_struct * mm,unsigned long len,struct slice_mask available,int psize,unsigned long high_limit)324 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
325 unsigned long len,
326 struct slice_mask available,
327 int psize, unsigned long high_limit)
328 {
329 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
330 unsigned long addr, found, prev;
331 struct vm_unmapped_area_info info;
332 unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
333
334 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
335 info.length = len;
336 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
337 info.align_offset = 0;
338
339 addr = mm->mmap_base;
340 /*
341 * If we are trying to allocate above DEFAULT_MAP_WINDOW
342 * Add the different to the mmap_base.
343 * Only for that request for which high_limit is above
344 * DEFAULT_MAP_WINDOW we should apply this.
345 */
346 if (high_limit > DEFAULT_MAP_WINDOW)
347 addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
348
349 while (addr > min_addr) {
350 info.high_limit = addr;
351 if (!slice_scan_available(addr - 1, available, 0, &addr))
352 continue;
353
354 prev_slice:
355 /*
356 * At this point [addr; info.high_limit) covers
357 * available slices only and starts at a slice boundary.
358 * Check if we need to reduce the range, or if we can
359 * extend it to cover the previous available slice.
360 */
361 if (addr < min_addr)
362 addr = min_addr;
363 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
364 addr = prev;
365 goto prev_slice;
366 }
367 info.low_limit = addr;
368
369 found = vm_unmapped_area(&info);
370 if (!(found & ~PAGE_MASK))
371 return found;
372 }
373
374 /*
375 * A failed mmap() very likely causes application failure,
376 * so fall back to the bottom-up function here. This scenario
377 * can happen with large stack limits and large mmap()
378 * allocations.
379 */
380 return slice_find_area_bottomup(mm, len, available, psize, high_limit);
381 }
382
383
slice_find_area(struct mm_struct * mm,unsigned long len,struct slice_mask mask,int psize,int topdown,unsigned long high_limit)384 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
385 struct slice_mask mask, int psize,
386 int topdown, unsigned long high_limit)
387 {
388 if (topdown)
389 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
390 else
391 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
392 }
393
slice_or_mask(struct slice_mask * dst,struct slice_mask * src)394 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
395 {
396 dst->low_slices |= src->low_slices;
397 if (!SLICE_NUM_HIGH)
398 return;
399 bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
400 SLICE_NUM_HIGH);
401 }
402
slice_andnot_mask(struct slice_mask * dst,struct slice_mask * src)403 static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
404 {
405 dst->low_slices &= ~src->low_slices;
406
407 if (!SLICE_NUM_HIGH)
408 return;
409 bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
410 SLICE_NUM_HIGH);
411 }
412
413 #ifdef CONFIG_PPC_64K_PAGES
414 #define MMU_PAGE_BASE MMU_PAGE_64K
415 #else
416 #define MMU_PAGE_BASE MMU_PAGE_4K
417 #endif
418
slice_get_unmapped_area(unsigned long addr,unsigned long len,unsigned long flags,unsigned int psize,int topdown)419 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
420 unsigned long flags, unsigned int psize,
421 int topdown)
422 {
423 struct slice_mask mask;
424 struct slice_mask good_mask;
425 struct slice_mask potential_mask;
426 struct slice_mask compat_mask;
427 int fixed = (flags & MAP_FIXED);
428 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
429 unsigned long page_size = 1UL << pshift;
430 struct mm_struct *mm = current->mm;
431 unsigned long newaddr;
432 unsigned long high_limit;
433
434 high_limit = DEFAULT_MAP_WINDOW;
435 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
436 high_limit = TASK_SIZE;
437
438 if (len > high_limit)
439 return -ENOMEM;
440 if (len & (page_size - 1))
441 return -EINVAL;
442 if (fixed) {
443 if (addr & (page_size - 1))
444 return -EINVAL;
445 if (addr > high_limit - len)
446 return -ENOMEM;
447 }
448
449 if (high_limit > mm->context.addr_limit) {
450 mm->context.addr_limit = high_limit;
451 on_each_cpu(slice_flush_segments, mm, 1);
452 }
453
454 /*
455 * init different masks
456 */
457 mask.low_slices = 0;
458
459 /* silence stupid warning */;
460 potential_mask.low_slices = 0;
461
462 compat_mask.low_slices = 0;
463
464 if (SLICE_NUM_HIGH) {
465 bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
466 bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
467 bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
468 }
469
470 /* Sanity checks */
471 BUG_ON(mm->task_size == 0);
472 BUG_ON(mm->context.addr_limit == 0);
473 VM_BUG_ON(radix_enabled());
474
475 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
476 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
477 addr, len, flags, topdown);
478
479 /* If hint, make sure it matches our alignment restrictions */
480 if (!fixed && addr) {
481 addr = _ALIGN_UP(addr, page_size);
482 slice_dbg(" aligned addr=%lx\n", addr);
483 /* Ignore hint if it's too large or overlaps a VMA */
484 if (addr > high_limit - len || addr < mmap_min_addr ||
485 !slice_area_is_free(mm, addr, len))
486 addr = 0;
487 }
488
489 /* First make up a "good" mask of slices that have the right size
490 * already
491 */
492 slice_mask_for_size(mm, psize, &good_mask);
493 slice_print_mask(" good_mask", good_mask);
494
495 /*
496 * Here "good" means slices that are already the right page size,
497 * "compat" means slices that have a compatible page size (i.e.
498 * 4k in a 64k pagesize kernel), and "free" means slices without
499 * any VMAs.
500 *
501 * If MAP_FIXED:
502 * check if fits in good | compat => OK
503 * check if fits in good | compat | free => convert free
504 * else bad
505 * If have hint:
506 * check if hint fits in good => OK
507 * check if hint fits in good | free => convert free
508 * Otherwise:
509 * search in good, found => OK
510 * search in good | free, found => convert free
511 * search in good | compat | free, found => convert free.
512 */
513
514 #ifdef CONFIG_PPC_64K_PAGES
515 /* If we support combo pages, we can allow 64k pages in 4k slices */
516 if (psize == MMU_PAGE_64K) {
517 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
518 if (fixed)
519 slice_or_mask(&good_mask, &compat_mask);
520 }
521 #endif
522
523 /* First check hint if it's valid or if we have MAP_FIXED */
524 if (addr != 0 || fixed) {
525 /* Build a mask for the requested range */
526 slice_range_to_mask(addr, len, &mask);
527 slice_print_mask(" mask", mask);
528
529 /* Check if we fit in the good mask. If we do, we just return,
530 * nothing else to do
531 */
532 if (slice_check_fit(mm, mask, good_mask)) {
533 slice_dbg(" fits good !\n");
534 return addr;
535 }
536 } else {
537 /* Now let's see if we can find something in the existing
538 * slices for that size
539 */
540 newaddr = slice_find_area(mm, len, good_mask,
541 psize, topdown, high_limit);
542 if (newaddr != -ENOMEM) {
543 /* Found within the good mask, we don't have to setup,
544 * we thus return directly
545 */
546 slice_dbg(" found area at 0x%lx\n", newaddr);
547 return newaddr;
548 }
549 }
550
551 /* We don't fit in the good mask, check what other slices are
552 * empty and thus can be converted
553 */
554 slice_mask_for_free(mm, &potential_mask);
555 slice_or_mask(&potential_mask, &good_mask);
556 slice_print_mask(" potential", potential_mask);
557
558 if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
559 slice_dbg(" fits potential !\n");
560 goto convert;
561 }
562
563 /* If we have MAP_FIXED and failed the above steps, then error out */
564 if (fixed)
565 return -EBUSY;
566
567 slice_dbg(" search...\n");
568
569 /* If we had a hint that didn't work out, see if we can fit
570 * anywhere in the good area.
571 */
572 if (addr) {
573 addr = slice_find_area(mm, len, good_mask,
574 psize, topdown, high_limit);
575 if (addr != -ENOMEM) {
576 slice_dbg(" found area at 0x%lx\n", addr);
577 return addr;
578 }
579 }
580
581 /* Now let's see if we can find something in the existing slices
582 * for that size plus free slices
583 */
584 addr = slice_find_area(mm, len, potential_mask,
585 psize, topdown, high_limit);
586
587 #ifdef CONFIG_PPC_64K_PAGES
588 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
589 /* retry the search with 4k-page slices included */
590 slice_or_mask(&potential_mask, &compat_mask);
591 addr = slice_find_area(mm, len, potential_mask,
592 psize, topdown, high_limit);
593 }
594 #endif
595
596 if (addr == -ENOMEM)
597 return -ENOMEM;
598
599 slice_range_to_mask(addr, len, &mask);
600 slice_dbg(" found potential area at 0x%lx\n", addr);
601 slice_print_mask(" mask", mask);
602
603 convert:
604 slice_andnot_mask(&mask, &good_mask);
605 slice_andnot_mask(&mask, &compat_mask);
606 if (mask.low_slices ||
607 (SLICE_NUM_HIGH &&
608 !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
609 slice_convert(mm, mask, psize);
610 if (psize > MMU_PAGE_BASE)
611 on_each_cpu(slice_flush_segments, mm, 1);
612 }
613 return addr;
614
615 }
616 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
617
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)618 unsigned long arch_get_unmapped_area(struct file *filp,
619 unsigned long addr,
620 unsigned long len,
621 unsigned long pgoff,
622 unsigned long flags)
623 {
624 return slice_get_unmapped_area(addr, len, flags,
625 current->mm->context.user_psize, 0);
626 }
627
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)628 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
629 const unsigned long addr0,
630 const unsigned long len,
631 const unsigned long pgoff,
632 const unsigned long flags)
633 {
634 return slice_get_unmapped_area(addr0, len, flags,
635 current->mm->context.user_psize, 1);
636 }
637
get_slice_psize(struct mm_struct * mm,unsigned long addr)638 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
639 {
640 unsigned char *hpsizes;
641 int index, mask_index;
642
643 /*
644 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
645 */
646 if (radix_enabled()) {
647 #ifdef CONFIG_PPC_64K_PAGES
648 return MMU_PAGE_64K;
649 #else
650 return MMU_PAGE_4K;
651 #endif
652 }
653 if (addr < SLICE_LOW_TOP) {
654 u64 lpsizes;
655 lpsizes = mm->context.low_slices_psize;
656 index = GET_LOW_SLICE_INDEX(addr);
657 return (lpsizes >> (index * 4)) & 0xf;
658 }
659 hpsizes = mm->context.high_slices_psize;
660 index = GET_HIGH_SLICE_INDEX(addr);
661 mask_index = index & 0x1;
662 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
663 }
664 EXPORT_SYMBOL_GPL(get_slice_psize);
665
666 /*
667 * This is called by hash_page when it needs to do a lazy conversion of
668 * an address space from real 64K pages to combo 4K pages (typically
669 * when hitting a non cacheable mapping on a processor or hypervisor
670 * that won't allow them for 64K pages).
671 *
672 * This is also called in init_new_context() to change back the user
673 * psize from whatever the parent context had it set to
674 * N.B. This may be called before mm->context.id has been set.
675 *
676 * This function will only change the content of the {low,high)_slice_psize
677 * masks, it will not flush SLBs as this shall be handled lazily by the
678 * caller.
679 */
slice_set_user_psize(struct mm_struct * mm,unsigned int psize)680 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
681 {
682 int index, mask_index;
683 unsigned char *hpsizes;
684 unsigned long flags, lpsizes;
685 unsigned int old_psize;
686 int i;
687
688 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
689
690 VM_BUG_ON(radix_enabled());
691 spin_lock_irqsave(&slice_convert_lock, flags);
692
693 old_psize = mm->context.user_psize;
694 slice_dbg(" old_psize=%d\n", old_psize);
695 if (old_psize == psize)
696 goto bail;
697
698 mm->context.user_psize = psize;
699 wmb();
700
701 lpsizes = mm->context.low_slices_psize;
702 for (i = 0; i < SLICE_NUM_LOW; i++)
703 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
704 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
705 (((unsigned long)psize) << (i * 4));
706 /* Assign the value back */
707 mm->context.low_slices_psize = lpsizes;
708
709 hpsizes = mm->context.high_slices_psize;
710 for (i = 0; i < SLICE_NUM_HIGH; i++) {
711 mask_index = i & 0x1;
712 index = i >> 1;
713 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
714 hpsizes[index] = (hpsizes[index] &
715 ~(0xf << (mask_index * 4))) |
716 (((unsigned long)psize) << (mask_index * 4));
717 }
718
719
720
721
722 slice_dbg(" lsps=%lx, hsps=%lx\n",
723 (unsigned long)mm->context.low_slices_psize,
724 (unsigned long)mm->context.high_slices_psize);
725
726 bail:
727 spin_unlock_irqrestore(&slice_convert_lock, flags);
728 }
729
slice_set_range_psize(struct mm_struct * mm,unsigned long start,unsigned long len,unsigned int psize)730 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
731 unsigned long len, unsigned int psize)
732 {
733 struct slice_mask mask;
734
735 VM_BUG_ON(radix_enabled());
736
737 slice_range_to_mask(start, len, &mask);
738 slice_convert(mm, mask, psize);
739 }
740
741 #ifdef CONFIG_HUGETLB_PAGE
742 /*
743 * is_hugepage_only_range() is used by generic code to verify whether
744 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
745 *
746 * until the generic code provides a more generic hook and/or starts
747 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
748 * here knows how to deal with), we hijack it to keep standard mappings
749 * away from us.
750 *
751 * because of that generic code limitation, MAP_FIXED mapping cannot
752 * "convert" back a slice with no VMAs to the standard page size, only
753 * get_unmapped_area() can. It would be possible to fix it here but I
754 * prefer working on fixing the generic code instead.
755 *
756 * WARNING: This will not work if hugetlbfs isn't enabled since the
757 * generic code will redefine that function as 0 in that. This is ok
758 * for now as we only use slices with hugetlbfs enabled. This should
759 * be fixed as the generic code gets fixed.
760 */
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)761 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
762 unsigned long len)
763 {
764 struct slice_mask mask, available;
765 unsigned int psize = mm->context.user_psize;
766
767 if (radix_enabled())
768 return 0;
769
770 slice_range_to_mask(addr, len, &mask);
771 slice_mask_for_size(mm, psize, &available);
772 #ifdef CONFIG_PPC_64K_PAGES
773 /* We need to account for 4k slices too */
774 if (psize == MMU_PAGE_64K) {
775 struct slice_mask compat_mask;
776 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
777 slice_or_mask(&available, &compat_mask);
778 }
779 #endif
780
781 #if 0 /* too verbose */
782 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
783 mm, addr, len);
784 slice_print_mask(" mask", mask);
785 slice_print_mask(" available", available);
786 #endif
787 return !slice_check_fit(mm, mask, available);
788 }
789 #endif
790