Home
last modified time | relevance | path

Searched refs:asid (Results 1 – 25 of 56) sorted by relevance

123

/arch/arm64/mm/
Dcontext.c38 #define asid2idx(asid) ((asid) & ~ASID_MASK) argument
44 u32 asid; in get_cpu_asid_bits() local
54 asid = 8; in get_cpu_asid_bits()
57 asid = 16; in get_cpu_asid_bits()
60 return asid; in get_cpu_asid_bits()
66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local
68 if (asid < asid_bits) { in verify_cpu_asid_bits()
74 smp_processor_id(), asid, asid_bits); in verify_cpu_asid_bits()
101 #define asid_gen_match(asid) \ argument
102 (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
[all …]
/arch/arm/mm/
Dcontext.c56 u64 context_id, asid; in a15_erratum_get_cpumask() local
67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
68 if (asid == 0) in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
70 if (context_id == asid) in a15_erratum_get_cpumask()
139 u64 asid; in flush_context() local
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
152 if (asid == 0) in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
154 __set_bit(asid & ~ASID_MASK, asid_map); in flush_context()
[all …]
/arch/csky/mm/
Dasid.c21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument
27 u64 asid; in flush_context() local
33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context()
41 if (asid == 0) in flush_context()
42 asid = reserved_asid(info, i); in flush_context()
43 __set_bit(asid2idx(info, asid), info->map); in flush_context()
44 reserved_asid(info, i) = asid; in flush_context()
54 static bool check_update_reserved_asid(struct asid_info *info, u64 asid, in check_update_reserved_asid() argument
70 if (reserved_asid(info, cpu) == asid) { in check_update_reserved_asid()
83 u64 asid = atomic64_read(pasid); in new_context() local
[all …]
/arch/sh/include/asm/
Dmmu_context_32.h6 static inline void set_asid(unsigned long asid) in set_asid() argument
8 __raw_writel(asid, MMU_PTEAEX); in set_asid()
16 static inline void set_asid(unsigned long asid) in set_asid() argument
25 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid()
31 unsigned long asid; in get_asid() local
34 : "=r" (asid) in get_asid()
36 asid &= MMU_CONTEXT_ASID_MASK; in get_asid()
37 return asid; in get_asid()
Dmmu_context.h57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local
60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context()
65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context()
76 if (!asid) in get_mmu_context()
77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context()
80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context()
128 #define set_asid(asid) do { } while (0) argument
131 #define switch_and_save_asid(asid) (0) argument
Dtlbflush.h23 extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
35 extern void flush_tlb_one(unsigned long asid, unsigned long page);
42 #define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page) argument
/arch/xtensa/include/asm/
Dmmu_context.h72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local
73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context()
79 asid += ASID_USER_FIRST; in get_new_mmu_context()
81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context()
82 mm->context.asid[cpu] = asid; in get_new_mmu_context()
93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local
95 if (asid == NO_CONTEXT || in get_mmu_context()
96 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context()
104 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in activate_context()
120 mm->context.asid[cpu] = NO_CONTEXT; in init_new_context()
/arch/sh/mm/
Dtlbflush_32.c21 unsigned long asid; in local_flush_tlb_page() local
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
30 set_asid(asid); in local_flush_tlb_page()
32 local_flush_tlb_one(asid, page); in local_flush_tlb_page()
56 unsigned long asid; in local_flush_tlb_range() local
59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range()
65 set_asid(asid); in local_flush_tlb_range()
68 local_flush_tlb_one(asid, start); in local_flush_tlb_range()
89 unsigned long asid; in local_flush_tlb_kernel_range() local
92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range()
[all …]
Dtlb-pteaex.c70 void local_flush_tlb_one(unsigned long asid, unsigned long page) in local_flush_tlb_one() argument
74 __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); in local_flush_tlb_one()
76 __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); in local_flush_tlb_one()
Dtlb-sh3.c55 void local_flush_tlb_one(unsigned long asid, unsigned long page) in local_flush_tlb_one() argument
67 data = (page & 0xfffe0000) | asid; /* VALID bit is off */ in local_flush_tlb_one()
Dtlb-debugfs.c94 unsigned long vpn, ppn, asid, size; in tlb_seq_show() local
107 asid = val & MMU_CONTEXT_ASID_MASK; in tlb_seq_show()
126 entry, vpn, ppn, asid, in tlb_seq_show()
/arch/riscv/mm/
Dtlbflush.c9 static inline void local_flush_tlb_all_asid(unsigned long asid) in local_flush_tlb_all_asid() argument
13 : "r" (asid) in local_flush_tlb_all_asid()
18 unsigned long asid) in local_flush_tlb_page_asid() argument
22 : "r" (addr), "r" (asid) in local_flush_tlb_page_asid()
46 unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; in __sbi_tlb_flush_range() local
51 start, size, asid); in __sbi_tlb_flush_range()
53 local_flush_tlb_page_asid(start, asid); in __sbi_tlb_flush_range()
55 local_flush_tlb_all_asid(asid); in __sbi_tlb_flush_range()
Dcontext.c99 unsigned long asid, ver = atomic_long_read(&current_version); in __new_context() local
126 asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx); in __new_context()
127 if (asid != num_asids) in __new_context()
137 asid = find_next_zero_bit(context_asid_map, num_asids, 1); in __new_context()
140 __set_bit(asid, context_asid_map); in __new_context()
141 cur_idx = asid; in __new_context()
142 return asid | ver; in __new_context()
/arch/arm64/include/asm/
Dtlbflush.h57 #define __TLBI_VADDR(addr, asid) \ argument
61 __ta |= (unsigned long)(asid) << 48; \
136 #define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \ argument
144 __ta |= (unsigned long)(asid) << 48; \
248 unsigned long asid; in flush_tlb_mm() local
251 asid = __TLBI_VADDR(0, ASID(mm)); in flush_tlb_mm()
252 __tlbi(aside1is, asid); in flush_tlb_mm()
253 __tlbi_user(aside1is, asid); in flush_tlb_mm()
288 unsigned long asid, addr, pages; in __flush_tlb_range() local
308 asid = ASID(vma->vm_mm); in __flush_tlb_range()
[all …]
/arch/x86/mm/
Dtlb.c111 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument
113 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid()
126 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); in kern_pcid()
141 return asid + 1; in kern_pcid()
147 static inline u16 user_pcid(u16 asid) in user_pcid() argument
149 u16 ret = kern_pcid(asid); in user_pcid()
156 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) in build_cr3() argument
159 return __sme_pa(pgd) | kern_pcid(asid); in build_cr3()
161 VM_WARN_ON_ONCE(asid != 0); in build_cr3()
166 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) in build_cr3_noflush() argument
[all …]
/arch/mips/lib/
Dr3k_dump_tlb.c31 unsigned int asid; in dump_tlb() local
35 asid = read_c0_entryhi() & asid_mask; in dump_tlb()
50 (entryhi & asid_mask) == asid)) { in dump_tlb()
69 write_c0_entryhi(asid); in dump_tlb()
Ddump_tlb.c76 unsigned long s_entryhi, entryhi, asid, mmid; in dump_tlb() local
98 asid = s_mmid = read_c0_memorymapid(); in dump_tlb()
100 asid = s_entryhi & asidmask; in dump_tlb()
139 if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid)) in dump_tlb()
/arch/csky/include/asm/
Dasid.h46 u64 asid, old_active_asid; in asid_check_context() local
48 asid = atomic64_read(pasid); in asid_check_context()
66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context()
68 old_active_asid, asid)) in asid_check_context()
Dmmu_context.h17 #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
19 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
32 setup_pgd(next->pgd, next->context.asid.counter); in switch_mm()
/arch/xtensa/mm/
Dtlb.c70 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm()
74 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm()
95 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_range()
99 (unsigned long)mm->context.asid[cpu], start, end); in local_flush_tlb_range()
105 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_range()
133 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_page()
139 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_page()
/arch/mips/dec/
Dkn01-berr.c79 long asid, entryhi, vaddr; in dec_kn01_be_backend() local
109 asid = read_c0_entryhi(); in dec_kn01_be_backend()
110 entryhi = asid & (PAGE_SIZE - 1); in dec_kn01_be_backend()
118 write_c0_entryhi(asid); in dec_kn01_be_backend()
/arch/mips/mm/
Dcontext.c24 u64 asid; in get_new_mmu_context() local
34 asid = asid_cache(cpu); in get_new_mmu_context()
36 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { in get_new_mmu_context()
42 set_cpu_context(cpu, mm, asid); in get_new_mmu_context()
43 asid_cache(cpu) = asid; in get_new_mmu_context()
/arch/csky/abiv1/inc/abi/
Dckmmu.h91 static inline void setup_pgd(pgd_t *pgd, int asid) in setup_pgd() argument
94 write_mmu_entryhi(asid); in setup_pgd()
/arch/csky/abiv2/inc/abi/
Dckmmu.h114 static inline void setup_pgd(pgd_t *pgd, int asid) in setup_pgd() argument
131 :"r"(asid), "r"(__pa(pgd) | BIT(0)) in setup_pgd()
/arch/x86/kvm/svm/
Dsvm_ops.h49 static inline void invlpga(unsigned long addr, u32 asid) in invlpga() argument
51 svm_asm2(invlpga, "c"(asid), "a"(addr)); in invlpga()

123