Lines Matching +full:llp +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Based on earlier code written by:
12 #include <asm/asm-prototypes.h>
16 #include <asm/ppc-opcode.h>
26 #include <asm/code-patching.h>
83 * ignores all other bits from 0-27, so just clear them all. in assert_slb_presence()
85 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
103 WRITE_ONCE(p->save_area[index].esid, 0); in slb_shadow_update()
104 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
105 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
110 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); in slb_shadow_clear()
143 : "r" (be64_to_cpu(p->save_area[index].vsid)), in __slb_restore_bolted_realmode()
144 "r" (be64_to_cpu(p->save_area[index].esid))); in __slb_restore_bolted_realmode()
147 assert_slb_presence(true, local_paca->kstack); in __slb_restore_bolted_realmode()
156 get_paca()->slb_cache_ptr = 0; in slb_restore_bolted_realmode()
158 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_restore_bolted_realmode()
159 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_restore_bolted_realmode()
181 * Older processors will ignore this optimisation. Over-invalidation in __slb_flush_and_restore_bolted()
189 ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid); in __slb_flush_and_restore_bolted()
190 ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); in __slb_flush_and_restore_bolted()
201 * This flushes non-bolted entries, it can be run in virtual mode. Must
220 assert_slb_presence(true, get_paca()->kstack); in slb_flush_and_restore_bolted()
222 get_paca()->slb_cache_ptr = 0; in slb_flush_and_restore_bolted()
224 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_flush_and_restore_bolted()
225 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_flush_and_restore_bolted()
234 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; in slb_save_contents()
242 slb_ptr->esid = e; in slb_save_contents()
243 slb_ptr->vsid = v; in slb_save_contents()
252 unsigned long llp; in slb_dump_contents() local
258 pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr); in slb_dump_contents()
261 e = slb_ptr->esid; in slb_dump_contents()
262 v = slb_ptr->vsid; in slb_dump_contents()
274 llp = v & SLB_VSID_LLP; in slb_dump_contents()
276 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", in slb_dump_contents()
278 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); in slb_dump_contents()
280 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", in slb_dump_contents()
282 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); in slb_dump_contents()
285 pr_err("----------------------------------\n"); in slb_dump_contents()
288 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); in slb_dump_contents()
290 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); in slb_dump_contents()
292 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
295 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
301 * vmalloc is not bolted, so just have to flush non-bolted. in slb_vmalloc_update()
310 for (i = 0; i < ti->slb_preload_nr; i++) { in preload_hit()
313 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; in preload_hit()
314 if (esid == ti->slb_preload_esid[idx]) in preload_hit()
336 idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; in preload_add()
337 ti->slb_preload_esid[idx] = esid; in preload_add()
338 if (ti->slb_preload_nr == SLB_PRELOAD_NR) in preload_add()
339 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; in preload_add()
341 ti->slb_preload_nr++; in preload_add()
348 if (!ti->slb_preload_nr) in preload_age()
350 ti->slb_preload_nr--; in preload_age()
351 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; in preload_age()
357 struct mm_struct *mm = current->mm; in slb_setup_new_exec()
366 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) in slb_setup_new_exec()
393 if (!is_kernel_addr(mm->mmap_base)) { in slb_setup_new_exec()
394 if (preload_add(ti, mm->mmap_base)) in slb_setup_new_exec()
395 slb_allocate_user(mm, mm->mmap_base); in slb_setup_new_exec()
407 struct mm_struct *mm = current->mm; in preload_new_slb_context()
408 unsigned long heap = mm->start_brk; in preload_new_slb_context()
413 if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) in preload_new_slb_context()
444 unsigned long slbie_data = get_paca()->slb_cache[index]; in slb_cache_slbie_kernel()
445 unsigned long ksp = get_paca()->kstack; in slb_cache_slbie_kernel()
458 unsigned long slbie_data = get_paca()->slb_cache[index]; in slb_cache_slbie_user()
474 * We need interrupts hard-disabled here, not just soft-disabled, in switch_slb()
484 get_paca()->slb_cache_ptr = 0; in switch_slb()
485 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in switch_slb()
497 unsigned long offset = get_paca()->slb_cache_ptr; in switch_slb()
519 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in switch_slb()
522 get_paca()->slb_cache_ptr = 0; in switch_slb()
524 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in switch_slb()
534 tsk->thread.load_slb++; in switch_slb()
535 if (!tsk->thread.load_slb) { in switch_slb()
542 for (i = 0; i < ti->slb_preload_nr; i++) { in switch_slb()
546 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; in switch_slb()
547 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
574 /* Prepare our SLB miss handler based on our page size */ in slb_initialize()
578 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; in slb_initialize()
584 pr_devel("SLB: linear LLP = %04lx\n", linear_llp); in slb_initialize()
585 pr_devel("SLB: io LLP = %04lx\n", io_llp); in slb_initialize()
587 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); in slb_initialize()
591 get_paca()->stab_rr = SLB_NUM_BOLTED - 1; in slb_initialize()
592 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; in slb_initialize()
593 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; in slb_initialize()
606 * get_paca()->kstack hasn't been initialized yet. in slb_initialize()
611 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) in slb_initialize()
612 create_shadowed_slbe(get_paca()->kstack, in slb_initialize()
631 slb_cache_index = local_paca->slb_cache_ptr; in slb_cache_update()
637 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; in slb_cache_update()
638 local_paca->slb_cache_ptr++; in slb_cache_update()
645 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; in slb_cache_update()
669 if (local_paca->slb_used_bitmap != U32_MAX) { in alloc_slb_index()
670 index = ffz(local_paca->slb_used_bitmap); in alloc_slb_index()
671 local_paca->slb_used_bitmap |= 1U << index; in alloc_slb_index()
673 local_paca->slb_kern_bitmap |= 1U << index; in alloc_slb_index()
675 /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ in alloc_slb_index()
676 index = local_paca->stab_rr; in alloc_slb_index()
677 if (index < (mmu_slb_size - 1)) in alloc_slb_index()
681 local_paca->stab_rr = index; in alloc_slb_index()
684 local_paca->slb_kern_bitmap |= 1U << index; in alloc_slb_index()
686 local_paca->slb_kern_bitmap &= ~(1U << index); in alloc_slb_index()
703 return -EFAULT; in slb_insert_entry()
729 int slb_cache_index = local_paca->slb_cache_ptr; in slb_insert_entry()
733 * cache of inserted (non-bolted) kernel SLB entries. All in slb_insert_entry()
734 * non-bolted kernel entries are flushed on any user fault, in slb_insert_entry()
735 * or if there are already 3 non-boled kernel entries. in slb_insert_entry()
747 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; in slb_insert_entry()
748 local_paca->slb_cache_ptr = slb_cache_index; in slb_insert_entry()
770 return -EFAULT; in slb_allocate_kernel()
778 return -EFAULT; in slb_allocate_kernel()
785 return -EFAULT; in slb_allocate_kernel()
787 flags = local_paca->vmalloc_sllp; in slb_allocate_kernel()
792 return -EFAULT; in slb_allocate_kernel()
797 return -EFAULT; in slb_allocate_kernel()
820 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
821 return -EFAULT; in slb_allocate_user()
823 context = get_user_context(&mm->context, ea); in slb_allocate_user()
825 return -EFAULT; in slb_allocate_user()
829 return -EFAULT; in slb_allocate_user()
847 if (unlikely(!(regs->msr & MSR_RI))) in do_slb_fault()
848 return -EINVAL; in do_slb_fault()
853 * mm->context stuff is not. in do_slb_fault()
859 * looking at possible non-bolted memory. We could test user vs in do_slb_fault()
869 BUG_ON(local_paca->in_kernel_slb_handler); in do_slb_fault()
870 local_paca->in_kernel_slb_handler = 1; in do_slb_fault()
874 local_paca->in_kernel_slb_handler = 0; in do_slb_fault()
878 struct mm_struct *mm = current->mm; in do_slb_fault()
882 return -EFAULT; in do_slb_fault()
894 if (err == -EFAULT) { in do_bad_slb_fault()
899 } else if (err == -EINVAL) { in do_bad_slb_fault()