Home
last modified time | relevance | path

Searched refs:vsid (Results 1 – 25 of 30) sorted by relevance

12

/arch/powerpc/mm/
Dcopro_fault.c99 u64 vsid; in copro_calculate_slb() local
109 vsid = get_vsid(mm->context.id, ea, ssize); in copro_calculate_slb()
118 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in copro_calculate_slb()
124 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in copro_calculate_slb()
131 vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER; in copro_calculate_slb()
133 vsid |= mmu_psize_defs[psize].sllp | in copro_calculate_slb()
137 slb->vsid = vsid; in copro_calculate_slb()
Dhugepage-hash64.c21 static void invalidate_old_hpte(unsigned long vsid, unsigned long addr, in invalidate_old_hpte() argument
40 return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, in invalidate_old_hpte()
59 vpn = hpt_vpn(addr, vsid, ssize); in invalidate_old_hpte()
72 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_thp() argument
138 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_thp()
147 invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); in __hash_page_thp()
221 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_thp()
Dhash_utils_64.c200 unsigned long vsid = get_kernel_vsid(vaddr, ssize); in htab_bolt_mapping() local
201 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); in htab_bolt_mapping()
207 if (!vsid) in htab_bolt_mapping()
959 unsigned long vsid, unsigned long trap, in hash_failure_debug() argument
967 trap, vsid, ssize, psize, lpsize, pte); in hash_failure_debug()
996 unsigned long vsid; in hash_page_mm() local
1017 vsid = get_vsid(mm->context.id, ea, ssize); in hash_page_mm()
1020 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in hash_page_mm()
1034 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); in hash_page_mm()
1037 if (!vsid) { in hash_page_mm()
[all …]
Dtlb_hash64.c48 unsigned long vsid; in hpte_need_flush() local
85 vsid = get_vsid(mm->context.id, addr, ssize); in hpte_need_flush()
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); in hpte_need_flush()
90 WARN_ON(vsid == 0); in hpte_need_flush()
91 vpn = hpt_vpn(addr, vsid, ssize); in hpte_need_flush()
Dhash_native_64.c361 unsigned long vsid; in native_hpte_updateboltedpp() local
365 vsid = get_kernel_vsid(ea, ssize); in native_hpte_updateboltedpp()
366 vpn = hpt_vpn(ea, vsid, ssize); in native_hpte_updateboltedpp()
419 static void native_hugepage_invalidate(unsigned long vsid, in native_hugepage_invalidate() argument
444 vpn = hpt_vpn(addr, vsid, ssize); in native_hugepage_invalidate()
509 unsigned long vsid, seg_off; in hpte_decode() local
542 vsid = avpn >> 5; in hpte_decode()
545 vpi = (vsid ^ pteg) & htab_hash_mask; in hpte_decode()
548 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; in hpte_decode()
553 vsid = avpn >> 17; in hpte_decode()
[all …]
Dhugetlbpage-hash64.c21 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_huge() argument
33 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_huge()
116 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_huge()
Dpgtable_64.c739 unsigned long hidx, vpn, vsid, hash, shift, slot; in hpte_do_hugepage_flush() local
766 vsid = get_vsid(mm->context.id, s_addr, ssize); in hpte_do_hugepage_flush()
767 WARN_ON(vsid == 0); in hpte_do_hugepage_flush()
769 vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); in hpte_do_hugepage_flush()
774 return ppc_md.hugepage_invalidate(vsid, s_addr, in hpte_do_hugepage_flush()
794 vpn = hpt_vpn(addr, vsid, ssize); in hpte_do_hugepage_flush()
Dslb.c66 get_slb_shadow()->save_area[entry].vsid = in slb_shadow_update()
115 be64_to_cpu(get_slb_shadow()->save_area[2].vsid); in __slb_flush_and_rebolt()
/arch/powerpc/kvm/
Dbook3s_32_mmu.c90 u64 *vsid);
100 u64 vsid; in kvmppc_mmu_book3s_32_ea_to_vp() local
106 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp()
107 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp()
178 u64 vsid; in kvmppc_mmu_book3s_32_xlate_bat() local
180 eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_xlate_bat()
181 vsid <<= 16; in kvmppc_mmu_book3s_32_xlate_bat()
182 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; in kvmppc_mmu_book3s_32_xlate_bat()
369 u64 *vsid) in kvmppc_mmu_book3s_32_esid_to_vsid() argument
387 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_32_esid_to_vsid()
[all …]
Dbook3s_64_mmu_host.c87 u64 vsid; in kvmppc_mmu_map_page() local
117 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
118 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
122 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
126 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page()
151 if (vsid & VSID_64K) in kvmppc_mmu_map_page()
219 u64 vsid; in kvmppc_mmu_unmap_page() local
221 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page()
222 if (vsid & VSID_64K) in kvmppc_mmu_unmap_page()
345 svcpu->slb[slb_index].vsid = slb_vsid; in kvmppc_mmu_map_segment()
Dbook3s_32_mmu_host.c117 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument
125 hash = ((vsid ^ page) << 6); in kvmppc_mmu_get_pteg()
146 u64 vsid; in kvmppc_mmu_map_page() local
169 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
170 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
173 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page()
177 vsid = map->host_vsid; in kvmppc_mmu_map_page()
178 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | in kvmppc_mmu_map_page()
187 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page()
205 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page()
Dbook3s_64_mmu.c68 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe()
74 vcpu->arch.slb[i].vsid); in kvmppc_mmu_book3s_64_find_slbe()
95 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); in kvmppc_slb_calc_vpn()
158 page, vcpu_book3s->sdr1, pteg, slbe->vsid); in kvmppc_mmu_book3s_64_get_pteg()
178 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); in kvmppc_mmu_book3s_64_get_avpn()
401 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); in kvmppc_mmu_book3s_64_slbmte()
578 u64 *vsid) in kvmppc_mmu_book3s_64_esid_to_vsid() argument
590 gvsid = slb->vsid; in kvmppc_mmu_book3s_64_esid_to_vsid()
636 *vsid = gvsid; in kvmppc_mmu_book3s_64_esid_to_vsid()
644 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_64_esid_to_vsid()
Dbook3s_hv_ras.c55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); in reload_slb()
Dbook3s_hv_rm_mmu.c786 unsigned long vsid, hash; in kvmppc_hv_find_lock_hpte() local
803 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; in kvmppc_hv_find_lock_hpte()
804 vsid ^= vsid << 25; in kvmppc_hv_find_lock_hpte()
807 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; in kvmppc_hv_find_lock_hpte()
809 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; in kvmppc_hv_find_lock_hpte()
/arch/powerpc/include/asm/
Dmmu-hash64.h287 unsigned long vsid, int ssize) in hpt_vpn() argument
293 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); in hpt_vpn()
303 unsigned long hash, vsid; in hpt_hash() local
312 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); in hpt_hash()
313 hash = vsid ^ (vsid << 25) ^ in hpt_hash()
320 unsigned long vsid, pte_t *ptep, unsigned long trap,
323 unsigned long vsid, pte_t *ptep, unsigned long trap,
329 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
334 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
338 unsigned long vsid, pmd_t *pmdp, in __hash_page_thp() argument
[all …]
Dcopro.h15 u64 esid, vsid; member
Dmmu-hash32.h65 unsigned long vsid:24; /* Virtual segment identifier */ member
Dlppaca.h131 __be64 vsid; member
Dkvm_book3s_asm.h135 u64 vsid; member
Dkvm_host.h362 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
369 u64 vsid; member
/arch/microblaze/include/asm/
Dmmu.h26 unsigned long vsid:24; /* Virtual segment identifier */ member
54 unsigned long vsid:24; /* Virtual Segment Identifier */ member
/arch/powerpc/platforms/pseries/
Dlpar.c360 unsigned long lpar_rc, slot, vsid, flags; in pSeries_lpar_hpte_updateboltedpp() local
362 vsid = get_kernel_vsid(ea, ssize); in pSeries_lpar_hpte_updateboltedpp()
363 vpn = hpt_vpn(ea, vsid, ssize); in pSeries_lpar_hpte_updateboltedpp()
442 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, in pSeries_lpar_hugepage_invalidate() argument
465 vpn = hpt_vpn(addr, vsid, ssize); in pSeries_lpar_hugepage_invalidate()
496 unsigned long slot, vsid; in pSeries_lpar_hpte_removebolted() local
498 vsid = get_kernel_vsid(ea, ssize); in pSeries_lpar_hpte_removebolted()
499 vpn = hpt_vpn(ea, vsid, ssize); in pSeries_lpar_hpte_removebolted()
/arch/powerpc/platforms/cell/
Dbeat_htab.c260 unsigned long lpar_rc, slot, vsid; in beat_lpar_hpte_updateboltedpp() local
263 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); in beat_lpar_hpte_updateboltedpp()
264 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M); in beat_lpar_hpte_updateboltedpp()
Dspu_base.c153 __func__, slbe, slb->vsid, slb->esid); in spu_load_slb()
159 out_be64(&priv2->slb_vsid_RW, slb->vsid); in spu_load_slb()
229 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | in __spu_kernel_slb()
/arch/powerpc/xmon/
Dxmon.c2700 unsigned long esid,vsid,valid; in dump_segments() local
2707 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); in dump_segments()
2709 if (valid | esid | vsid) { in dump_segments()
2710 printf("%02d %016lx %016lx", i, esid, vsid); in dump_segments()
2712 llp = vsid & SLB_VSID_LLP; in dump_segments()
2713 if (vsid & SLB_VSID_B_1T) { in dump_segments()
2716 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, in dump_segments()
2721 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, in dump_segments()

12