• Home
  • Raw
  • Download

Lines Matching full:ea

37 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
42 static inline unsigned long mk_esid_data(unsigned long ea, int ssize, in mk_esid_data() argument
45 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; in mk_esid_data()
55 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, in mk_vsid_data() argument
58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); in mk_vsid_data()
72 static void assert_slb_presence(bool present, unsigned long ea) in assert_slb_presence() argument
86 ea &= ~((1UL << SID_SHIFT) - 1); in assert_slb_presence()
87 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); in assert_slb_presence()
93 static inline void slb_shadow_update(unsigned long ea, int ssize, in slb_shadow_update() argument
105 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update()
106 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update()
114 static inline void create_shadowed_slbe(unsigned long ea, int ssize, in create_shadowed_slbe() argument
123 slb_shadow_update(ea, ssize, flags, index); in create_shadowed_slbe()
125 assert_slb_presence(false, ea); in create_shadowed_slbe()
127 : "r" (mk_vsid_data(ea, ssize, flags)), in create_shadowed_slbe()
128 "r" (mk_esid_data(ea, ssize, index)) in create_shadowed_slbe()
293 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
296 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); in slb_dump_contents()
321 static bool preload_add(struct thread_info *ti, unsigned long ea) in preload_add() argument
328 if (ea & ESID_MASK_1T) in preload_add()
329 ea &= ESID_MASK_1T; in preload_add()
332 esid = ea >> SID_SHIFT; in preload_add()
545 unsigned long ea; in switch_slb() local
548 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; in switch_slb()
550 slb_allocate_user(mm, ea); in switch_slb()
695 static long slb_insert_entry(unsigned long ea, unsigned long context, in slb_insert_entry() argument
702 vsid = get_vsid(context, ea, ssize); in slb_insert_entry()
720 esid_data = mk_esid_data(ea, ssize, index); in slb_insert_entry()
728 assert_slb_presence(false, ea); in slb_insert_entry()
761 static long slb_allocate_kernel(unsigned long ea, unsigned long id) in slb_allocate_kernel() argument
770 if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) in slb_allocate_kernel()
778 if (ea >= H_VMEMMAP_END) in slb_allocate_kernel()
785 if (ea >= H_VMALLOC_END) in slb_allocate_kernel()
792 if (ea >= H_KERN_IO_END) in slb_allocate_kernel()
805 context = get_kernel_context(ea); in slb_allocate_kernel()
807 return slb_insert_entry(ea, context, flags, ssize, true); in slb_allocate_kernel()
810 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) in slb_allocate_user() argument
821 if (ea >= mm_ctx_slb_addr_limit(&mm->context)) in slb_allocate_user()
824 context = get_user_context(&mm->context, ea); in slb_allocate_user()
828 if (unlikely(ea >= H_PGTABLE_RANGE)) { in slb_allocate_user()
833 ssize = user_segment_size(ea); in slb_allocate_user()
835 bpsize = get_slice_psize(mm, ea); in slb_allocate_user()
838 return slb_insert_entry(ea, context, flags, ssize, false); in slb_allocate_user()
841 long do_slb_fault(struct pt_regs *regs, unsigned long ea) in do_slb_fault() argument
843 unsigned long id = get_region_id(ea); in do_slb_fault()
873 err = slb_allocate_kernel(ea, id); in do_slb_fault()
885 err = slb_allocate_user(mm, ea); in do_slb_fault()
887 preload_add(current_thread_info(), ea); in do_slb_fault()
893 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) in do_bad_slb_fault() argument
897 _exception(SIGSEGV, regs, SEGV_BNDERR, ea); in do_bad_slb_fault()
899 bad_page_fault(regs, ea, SIGSEGV); in do_bad_slb_fault()