• Home
  • Raw
  • Download

Lines Matching +full:llp +full:- +full:based

5  * Based on earlier code written by:
29 #include <asm/code-patching.h>
66 WRITE_ONCE(p->save_area[index].esid, 0); in slb_shadow_update()
67 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
68 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
73 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); in slb_shadow_clear()
105 : "r" (be64_to_cpu(p->save_area[index].vsid)), in __slb_restore_bolted_realmode()
106 "r" (be64_to_cpu(p->save_area[index].esid))); in __slb_restore_bolted_realmode()
118 get_paca()->slb_cache_ptr = 0; in slb_restore_bolted_realmode()
144 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); in __slb_flush_and_rebolt()
151 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); in __slb_flush_and_rebolt()
153 be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); in __slb_flush_and_rebolt()
160 /* Slot 1 - first VMALLOC segment */ in __slb_flush_and_rebolt()
162 /* Slot 2 - kernel stack */ in __slb_flush_and_rebolt()
184 get_paca()->slb_cache_ptr = 0; in slb_flush_and_rebolt()
235 * We need interrupts hard-disabled here, not just soft-disabled, in switch_slb()
241 offset = get_paca()->slb_cache_ptr; in switch_slb()
247 slbie_data = (unsigned long)get_paca()->slb_cache[i] in switch_slb()
263 get_paca()->slb_cache_ptr = 0; in switch_slb()
294 * (which is actually addi) and cmpldi both take a 16-bit immediate in patch_slb_encoding()
296 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits. in patch_slb_encoding()
331 /* Prepare our SLB miss handler based on our page size */ in slb_initialize()
335 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; in slb_initialize()
348 pr_devel("SLB: linear LLP = %04lx\n", linear_llp); in slb_initialize()
349 pr_devel("SLB: io LLP = %04lx\n", io_llp); in slb_initialize()
354 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); in slb_initialize()
358 get_paca()->stab_rr = SLB_NUM_BOLTED - 1; in slb_initialize()
372 * get_paca()->kstack hasn't been initialized yet. in slb_initialize()
377 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) in slb_initialize()
378 create_shadowed_slbe(get_paca()->kstack, in slb_initialize()
402 index = get_paca()->stab_rr; in insert_slb_entry()
405 * simple round-robin replacement of slb starting at SLB_NUM_BOLTED. in insert_slb_entry()
407 if (index < (mmu_slb_size - 1)) in insert_slb_entry()
412 get_paca()->stab_rr = index; in insert_slb_entry()
430 slb_cache_index = get_paca()->slb_cache_ptr; in insert_slb_entry()
436 get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28; in insert_slb_entry()
437 get_paca()->slb_cache_ptr++; in insert_slb_entry()
444 get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; in insert_slb_entry()
450 struct mm_struct *mm = current->mm; in handle_multi_context_slb_miss()
465 unsigned long ea = regs->dar; in slb_miss_large_addr()
485 if (ea >= current->mm->context.slb_addr_limit) in slb_miss_large_addr()
488 context = get_ea_context(&current->mm->context, ea); in slb_miss_large_addr()