/arch/powerpc/mm/ |
D | hash_native_64.c | 48 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) in __tlbie() argument 69 switch (psize) { in __tlbie() 83 penc = mmu_psize_defs[psize].penc[apsize]; in __tlbie() 103 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) in __tlbiel() argument 118 switch (psize) { in __tlbiel() 131 penc = mmu_psize_defs[psize].penc[apsize]; in __tlbiel() 151 static inline void tlbie(unsigned long vpn, int psize, int apsize, in tlbie() argument 160 use_local = mmu_psize_defs[psize].tlbiel; in tlbie() 165 __tlbiel(vpn, psize, apsize, ssize); in tlbie() 168 __tlbie(vpn, psize, apsize, ssize); in tlbie() [all …]
|
D | slice.c | 150 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) in slice_mask_for_size() argument 160 if (((lpsizes >> (i * 4)) & 0xf) == psize) in slice_mask_for_size() 167 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) in slice_mask_for_size() 196 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) in slice_convert() argument 204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); in slice_convert() 216 (((unsigned long)psize) << (i * 4)); in slice_convert() 228 (((unsigned long)psize) << (mask_index * 4)); in slice_convert() 268 int psize) in slice_find_area_bottomup() argument 270 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); in slice_find_area_bottomup() 311 int psize) in slice_find_area_topdown() argument [all …]
|
D | tlb_nohash.c | 124 static inline int mmu_get_tsize(int psize) in mmu_get_tsize() argument 126 return mmu_psize_defs[psize].enc; in mmu_get_tsize() 129 static inline int mmu_get_tsize(int psize) in mmu_get_tsize() argument 435 int i, psize; in setup_page_sizes() local 448 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { in setup_page_sizes() 452 def = &mmu_psize_defs[psize]; in setup_page_sizes() 487 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { in setup_page_sizes() 488 struct mmu_psize_def *def = &mmu_psize_defs[psize]; in setup_page_sizes() 496 if (book3e_htw_mode && psize == MMU_PAGE_2M) in setup_page_sizes() 510 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { in setup_page_sizes() [all …]
|
D | tlb_hash64.c | 49 unsigned int psize; in hpte_need_flush() local 65 psize = get_slice_psize(mm, addr); in hpte_need_flush() 67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); in hpte_need_flush() 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ in hpte_need_flush() 73 psize = pte_pagesize_index(mm, addr, pte); in hpte_need_flush() 101 flush_hash_page(vpn, rpte, psize, ssize, 0); in hpte_need_flush() 116 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 123 batch->psize = psize; in hpte_need_flush() 152 batch->psize, batch->ssize, local); in __flush_tlb_pending()
|
D | hugetlbpage-book3e.c | 49 static inline int mmu_get_tsize(int psize) in mmu_get_tsize() argument 51 return mmu_psize_defs[psize].enc; in mmu_get_tsize() 83 unsigned long psize, tsize, shift; in book3e_hugetlb_preload() local 97 psize = get_slice_psize(mm, ea); in book3e_hugetlb_preload() 98 tsize = mmu_get_tsize(psize); in book3e_hugetlb_preload() 99 shift = mmu_psize_defs[psize].shift; in book3e_hugetlb_preload() 101 psize = vma_mmu_pagesize(vma); in book3e_hugetlb_preload() 102 shift = __ilog2(psize); in book3e_hugetlb_preload()
|
D | hugepage-hash64.c | 23 int ssize, unsigned int psize) in __hash_page_thp() argument 83 shift = mmu_psize_defs[psize].shift; in __hash_page_thp() 89 if (psize == MMU_PAGE_4K) { in __hash_page_thp() 111 psize, lpsize, ssize, flags); in __hash_page_thp() 146 psize, lpsize, ssize); in __hash_page_thp() 155 psize, lpsize, ssize); in __hash_page_thp() 172 psize, lpsize, old_pmd); in __hash_page_thp() 186 if (psize == MMU_PAGE_4K) in __hash_page_thp()
|
D | hash_utils_64.c | 184 int psize, int ssize) in htab_bolt_mapping() argument 190 shift = mmu_psize_defs[psize].shift; in htab_bolt_mapping() 196 vstart, vend, pstart, prot, psize, ssize); in htab_bolt_mapping() 237 HPTE_V_BOLTED, psize, psize, ssize); in htab_bolt_mapping() 251 int psize, int ssize) in htab_remove_mapping() argument 256 shift = mmu_psize_defs[psize].shift; in htab_remove_mapping() 266 ppc_md.hpte_removebolted(vaddr, psize, ssize); in htab_remove_mapping() 593 unsigned long mem_size, rnd_mem_size, pteg_count, psize; in htab_get_table_size() local 611 psize = mmu_psize_defs[mmu_virtual_psize].shift; in htab_get_table_size() 612 pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11); in htab_get_table_size() [all …]
|
D | copro_fault.c | 104 int psize, ssize; in copro_calculate_slb() local 111 psize = get_slice_psize(mm, ea); in copro_calculate_slb() 119 psize = mmu_vmalloc_psize; in copro_calculate_slb() 121 psize = mmu_io_psize; in copro_calculate_slb() 128 psize = mmu_linear_psize; in copro_calculate_slb() 140 vsid |= mmu_psize_defs[psize].sllp | in copro_calculate_slb()
|
D | hugetlbpage.c | 794 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); in vma_mmu_pagesize() local 796 return 1UL << mmu_psize_to_shift(psize); in vma_mmu_pagesize() 859 int psize; in hugetlbpage_init() local 861 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { in hugetlbpage_init() 864 if (!mmu_psize_defs[psize].shift) in hugetlbpage_init() 867 shift = mmu_psize_to_shift(psize); in hugetlbpage_init() 897 int psize; in hugetlbpage_init() local 902 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { in hugetlbpage_init() 906 if (!mmu_psize_defs[psize].shift) in hugetlbpage_init() 909 shift = mmu_psize_to_shift(psize); in hugetlbpage_init()
|
D | pgtable_64.c | 729 unsigned int psize; in hpte_do_hugepage_flush() local 736 psize = get_slice_psize(mm, addr); in hpte_do_hugepage_flush() 737 BUG_ON(psize == MMU_PAGE_16M); in hpte_do_hugepage_flush() 740 psize = MMU_PAGE_4K; in hpte_do_hugepage_flush() 742 psize = MMU_PAGE_64K; in hpte_do_hugepage_flush() 757 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); in hpte_do_hugepage_flush()
|
D | hugetlbpage-hash64.c | 19 unsigned long vflags, int psize, int ssize);
|
/arch/powerpc/include/asm/ |
D | page_64.h | 121 unsigned int psize, 127 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); 129 unsigned long len, unsigned int psize); 138 #define slice_set_user_psize(mm, psize) \ argument 140 (mm)->context.user_psize = (psize); \ 141 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 149 #define slice_set_user_psize(mm, psize) do { BUG(); } while(0) argument 152 #define slice_set_range_psize(mm, start, len, psize) \ argument 153 slice_set_user_psize((mm), (psize))
|
D | mmu-hash64.h | 150 int psize; in shift_to_mmu_psize() local 152 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) in shift_to_mmu_psize() 153 if (mmu_psize_defs[psize].shift == shift) in shift_to_mmu_psize() 154 return psize; in shift_to_mmu_psize() 235 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, in hpte_encode_avpn() argument 247 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); in hpte_encode_avpn() 343 unsigned long flags, int ssize, unsigned int psize); 348 int ssize, unsigned int psize) in __hash_page_thp() argument 356 int ssize, int psize, int lpsize, 360 int psize, int ssize); [all …]
|
D | pte-hash64-64k.h | 81 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ argument 84 unsigned __split = (psize == MMU_PAGE_4K || \ 85 psize == MMU_PAGE_64K_AP); \ 86 shift = mmu_psize_defs[psize].shift; \
|
D | kvm_book3s_64.h | 102 static inline int __hpte_actual_psize(unsigned int lp, int psize) in __hpte_actual_psize() argument 111 if (mmu_psize_defs[psize].penc[i] == -1) in __hpte_actual_psize() 126 if ((lp & mask) == mmu_psize_defs[psize].penc[i]) in __hpte_actual_psize() 260 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) in hpte_rpn() argument 262 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; in hpte_rpn() 395 static inline unsigned long slb_pgsize_encoding(unsigned long psize) in slb_pgsize_encoding() argument 399 if (psize > 0x1000) { in slb_pgsize_encoding() 401 if (psize == 0x10000) in slb_pgsize_encoding()
|
D | mmu-book3e.h | 262 int psize; in shift_to_mmu_psize() local 264 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) in shift_to_mmu_psize() 265 if (mmu_psize_defs[psize].shift == shift) in shift_to_mmu_psize() 266 return psize; in shift_to_mmu_psize()
|
D | tlbflush.h | 99 unsigned int psize; member 127 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 131 pmd_t *pmdp, unsigned int psize, int ssize,
|
D | machdep.h | 48 int psize, int ssize); 54 int psize, int apsize, 58 int psize, int ssize); 63 int psize, int ssize, int local);
|
/arch/powerpc/platforms/pseries/ |
D | lpar.c | 129 int psize, int apsize, int ssize) in pSeries_lpar_hpte_insert() argument 139 hpte_group, vpn, pa, rflags, vflags, psize); in pSeries_lpar_hpte_insert() 141 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; in pSeries_lpar_hpte_insert() 142 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; in pSeries_lpar_hpte_insert() 292 int psize, int apsize, in pSeries_lpar_hpte_updatepp() argument 299 want_v = hpte_encode_avpn(vpn, psize, ssize); in pSeries_lpar_hpte_updatepp() 302 want_v, slot, flags, psize); in pSeries_lpar_hpte_updatepp() 337 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) in pSeries_lpar_hpte_find() argument 344 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); in pSeries_lpar_hpte_find() 345 want_v = hpte_encode_avpn(vpn, psize, ssize); in pSeries_lpar_hpte_find() [all …]
|
/arch/powerpc/platforms/ps3/ |
D | htab.c | 49 int psize, int apsize, int ssize) in ps3_hpte_insert() argument 65 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; in ps3_hpte_insert() 66 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags; in ps3_hpte_insert() 112 unsigned long vpn, int psize, int apsize, in ps3_hpte_updatepp() argument 121 want_v = hpte_encode_avpn(vpn, psize, ssize); in ps3_hpte_updatepp() 132 __func__, ps3_result(result), vpn, slot, psize); in ps3_hpte_updatepp() 160 int psize, int ssize) in ps3_hpte_updateboltedpp() argument 166 int psize, int apsize, int ssize, int local) in ps3_hpte_invalidate() argument 177 __func__, ps3_result(result), vpn, slot, psize); in ps3_hpte_invalidate()
|
/arch/sh/boards/mach-lboxre2/ |
D | setup.c | 54 unsigned long paddrbase, psize; in lboxre2_devices_setup() local 58 psize = PAGE_SIZE; in lboxre2_devices_setup() 60 cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot)); in lboxre2_devices_setup()
|
/arch/powerpc/kvm/ |
D | book3s_64_mmu_hv.c | 182 unsigned long psize; in kvmppc_map_vrma() local 188 psize = 1ul << porder; in kvmppc_map_vrma() 199 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); in kvmppc_map_vrma() 200 hp1 = hpte1_pgsize_encoding(psize) | in kvmppc_map_vrma() 442 unsigned long mmu_seq, psize, pte_size; in kvmppc_book3s_hv_page_fault() local 480 psize = hpte_page_size(hpte[0], r); in kvmppc_book3s_hv_page_fault() 481 gpa_base = r & HPTE_R_RPN & ~(psize - 1); in kvmppc_book3s_hv_page_fault() 483 gpa = gpa_base | (ea & (psize - 1)); in kvmppc_book3s_hv_page_fault() 519 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && in kvmppc_book3s_hv_page_fault() 523 pte_size = psize; in kvmppc_book3s_hv_page_fault() [all …]
|
D | book3s_hv_rm_mmu.c | 102 void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize) in kvmppc_update_rmap_change() argument 106 if (!psize) in kvmppc_update_rmap_change() 108 order = ilog2(psize); in kvmppc_update_rmap_change() 171 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local 185 psize = hpte_page_size(pteh, ptel); in kvmppc_do_h_enter() 186 if (!psize) in kvmppc_do_h_enter() 198 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); in kvmppc_do_h_enter() 212 if (!slot_is_aligned(memslot, psize)) in kvmppc_do_h_enter() 243 if (host_pte_size < psize) { in kvmppc_do_h_enter() 262 ptel &= ~(HPTE_R_PP0 - psize); in kvmppc_do_h_enter() [all …]
|
/arch/blackfin/mm/ |
D | sram-alloc.c | 328 unsigned long *psize) in _sram_alloc_max() argument 347 *psize = pmax->size; in _sram_alloc_max() 349 return _sram_alloc(*psize, pfree_head, pused_head); in _sram_alloc_max() 643 void *l1sram_alloc_max(size_t *psize) in l1sram_alloc_max() argument 654 &per_cpu(used_l1_ssram_head, cpu), psize); in l1sram_alloc_max()
|
/arch/powerpc/platforms/powernv/ |
D | opal.c | 121 int i, psize, size; in early_init_dt_scan_recoverable_ranges() local 127 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); in early_init_dt_scan_recoverable_ranges() 141 mc_recoverable_range_len = psize / (sizeof(*prop) * 5); in early_init_dt_scan_recoverable_ranges()
|