• Home
  • Raw
  • Download

Lines Matching full:asid

52  * to what is traditionally called ASID on the RISC processors.
54 * We don't use the traditional ASID implementation, where each process/mm gets
55 * its own ASID and flush/restart when we run out of ASID space.
64 * ASID - [0, TLB_NR_DYN_ASIDS-1]
69 * ASID+1, because PCID 0 is special.
73 * PCID values, but we can still do with a single ASID denomination
101 * Given @asid, compute kPCID
103 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument
105 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid()
109 * Make sure that the dynamic ASID space does not confict with the in kern_pcid()
115 * The ASID being passed in here should have respected the in kern_pcid()
118 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); in kern_pcid()
125 * If PCID is on, ASID-aware code paths put the ASID+1 into the in kern_pcid()
129 * the TLB for ASID 0 if the saved ASID was nonzero. It also means in kern_pcid()
133 return asid + 1; in kern_pcid()
137 * Given @asid, compute uPCID
139 static inline u16 user_pcid(u16 asid) in user_pcid() argument
141 u16 ret = kern_pcid(asid); in user_pcid()
148 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) in build_cr3() argument
151 return __sme_pa(pgd) | kern_pcid(asid); in build_cr3()
153 VM_WARN_ON_ONCE(asid != 0); in build_cr3()
158 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) in build_cr3_noflush() argument
160 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in build_cr3_noflush()
167 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; in build_cr3_noflush()
178 u16 asid; in clear_asid_other() local
189 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in clear_asid_other()
190 /* Do not need to flush the current asid */ in clear_asid_other()
191 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) in clear_asid_other()
195 * this asid, we do a flush: in clear_asid_other()
197 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); in clear_asid_other()
208 u16 asid; in choose_new_asid() local
219 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in choose_new_asid()
220 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
224 *new_asid = asid; in choose_new_asid()
225 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
231 * We don't currently own an ASID slot on this CPU. in choose_new_asid()
243 * Given an ASID, flush the corresponding user ASID. We can delay this
248 static inline void invalidate_user_asid(u16 asid) in invalidate_user_asid() argument
250 /* There is no user ASID if address space separation is off */ in invalidate_user_asid()
255 * We only have a single ASID if PCID is off and the CR3 in invalidate_user_asid()
264 __set_bit(kern_pcid(asid), in invalidate_user_asid()
465 * back into an incorrect ASID slot and leave it there in switch_mm_irqs_off()
565 /* The new ASID is already up to date. */ in switch_mm_irqs_off()
608 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
635 /* Force ASID 0 and force a TLB flush. */ in initialize_tlbstate_and_flush()