| /kernel/linux/linux-6.6/arch/arm64/mm/ |
| D | context.c | 38 #define ctxid2asid(asid) ((asid) & ~ASID_MASK) argument 39 #define asid2ctxid(asid, genid) ((asid) | (genid)) argument 44 u32 asid; in get_cpu_asid_bits() local 50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits() 54 asid = 8; in get_cpu_asid_bits() 57 asid = 16; in get_cpu_asid_bits() 60 return asid; in get_cpu_asid_bits() 66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local 68 if (asid < asid_bits) { in verify_cpu_asid_bits() 70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits() [all …]
|
| /kernel/linux/linux-5.10/arch/arm64/mm/ |
| D | context.c | 38 #define asid2idx(asid) ((asid) & ~ASID_MASK) argument 44 u32 asid; in get_cpu_asid_bits() local 50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits() 54 asid = 8; in get_cpu_asid_bits() 57 asid = 16; in get_cpu_asid_bits() 60 return asid; in get_cpu_asid_bits() 66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local 68 if (asid < asid_bits) { in verify_cpu_asid_bits() 70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits() 71 * fewer ASID bits than the boot CPU. in verify_cpu_asid_bits() [all …]
|
| /kernel/linux/linux-6.6/arch/csky/mm/ |
| D | asid.c | 3 * Generic ASID allocator. 14 #include <asm/asid.h> 21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument 27 u64 asid; in flush_context() local 29 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context() 33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context() 38 * ASID, as this is the only trace we have of in flush_context() 41 if (asid == 0) in flush_context() 42 asid = reserved_asid(info, i); in flush_context() 43 __set_bit(asid2idx(info, asid), info->map); in flush_context() [all …]
|
| /kernel/linux/linux-5.10/arch/csky/mm/ |
| D | asid.c | 3 * Generic ASID allocator. 14 #include <asm/asid.h> 21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument 27 u64 asid; in flush_context() local 29 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context() 33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context() 38 * ASID, as this is the only trace we have of in flush_context() 41 if (asid == 0) in flush_context() 42 asid = reserved_asid(info, i); in flush_context() 43 __set_bit(asid2idx(info, asid), info->map); in flush_context() [all …]
|
| /kernel/linux/linux-6.6/arch/arm/mm/ |
| D | context.c | 27 * | process ID | ASID | 32 * The ASID is used to tag entries in the CPU caches and TLBs. 56 u64 context_id, asid; in a15_erratum_get_cpumask() local 65 * running the same ASID as the one being invalidated. in a15_erratum_get_cpumask() 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 68 if (asid == 0) in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 70 if (context_id == asid) in a15_erratum_get_cpumask() 79 * With LPAE, the ASID and page tables are updated atomicly, so there is 80 * no need for a reserved set of tables (the active ASID tracking prevents [all …]
|
| /kernel/linux/linux-5.10/arch/arm/mm/ |
| D | context.c | 27 * | process ID | ASID | 32 * The ASID is used to tag entries in the CPU caches and TLBs. 56 u64 context_id, asid; in a15_erratum_get_cpumask() local 65 * running the same ASID as the one being invalidated. in a15_erratum_get_cpumask() 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 68 if (asid == 0) in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 70 if (context_id == asid) in a15_erratum_get_cpumask() 79 * With LPAE, the ASID and page tables are updated atomicly, so there is 80 * no need for a reserved set of tables (the active ASID tracking prevents [all …]
|
| /kernel/linux/linux-6.6/arch/arc/include/asm/ |
| D | mmu_context.h | 10 * -Major rewrite of Core ASID allocation routine get_new_mmu_context 23 /* ARC ASID Management 25 * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on 28 * ASID is managed per cpu, so task threads across CPUs can have different 29 * ASID. Global ASID management is needed if hardware supports TLB shootdown 32 * Each task is assigned unique ASID, with a simple round-robin allocator 36 * A new allocation cycle, post rollover, could potentially reassign an ASID 37 * to a different task. Thus the rule is to refresh the ASID in a new cycle. 38 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits 49 #define asid_mm(mm, cpu) mm->context.asid[cpu] [all …]
|
| /kernel/linux/linux-5.10/arch/arc/include/asm/ |
| D | mmu_context.h | 10 * -Major rewrite of Core ASID allocation routine get_new_mmu_context 24 /* ARC700 ASID Management 26 * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries 30 * Linux assigns each task a unique ASID. A simple round-robin allocation 31 * of H/w ASID is done using software tracker @asid_cpu. 33 * the entire TLB and wrapping ASID back to zero. 35 * A new allocation cycle, post rollover, could potentially reassign an ASID 36 * to a different task. Thus the rule is to refresh the ASID in a new cycle. 37 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits 48 #define asid_mm(mm, cpu) mm->context.asid[cpu] [all …]
|
| /kernel/linux/linux-6.6/arch/riscv/mm/ |
| D | tlbflush.c | 18 unsigned long asid) in local_flush_tlb_range_threshold_asid() argument 24 local_flush_tlb_all_asid(asid); in local_flush_tlb_range_threshold_asid() 29 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_threshold_asid() 35 unsigned long size, unsigned long stride, unsigned long asid) in local_flush_tlb_range_asid() argument 38 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_asid() 40 local_flush_tlb_all_asid(asid); in local_flush_tlb_range_asid() 42 local_flush_tlb_range_threshold_asid(start, size, stride, asid); in local_flush_tlb_range_asid() 65 unsigned long asid; member 75 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); in __ipi_flush_tlb_range_asid() 83 unsigned long asid = FLUSH_TLB_NO_ASID; in __flush_tlb_range() local [all …]
|
| D | context.c | 69 /* Update the list of reserved ASIDs and the ASID bitmap. */ in __flush_context() 88 /* Mark ASID #0 as used because it is used at boot-time */ in __flush_context() 99 unsigned long asid, ver = atomic_long_read(¤t_version); in __new_context() local 123 * Allocate a free ASID. If we can't find one then increment in __new_context() 126 asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx); in __new_context() 127 if (asid != num_asids) in __new_context() 137 asid = find_next_zero_bit(context_asid_map, num_asids, 1); in __new_context() 140 __set_bit(asid, context_asid_map); in __new_context() 141 cur_idx = asid; in __new_context() 142 return asid | ver; in __new_context() [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
| D | mmu_context.h | 38 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local 73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context() 75 * Start new asid cycle; continue counting with next in get_new_mmu_context() 79 asid += ASID_USER_FIRST; in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 89 * Check if our ASID is of an older version and thus invalid. in get_mmu_context() 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local 95 if (asid == NO_CONTEXT || in get_mmu_context() [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
| D | mmu_context.h | 38 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local 73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context() 75 * Start new asid cycle; continue counting with next in get_new_mmu_context() 79 asid += ASID_USER_FIRST; in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 89 * Check if our ASID is of an older version and thus invalid. in get_mmu_context() 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local 95 if (asid == NO_CONTEXT || in get_mmu_context() [all …]
|
| /kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/ |
| D | arm-smmu-v3-sva.c | 16 * Check if the CPU ASID is available on the SMMU side. If a private context 20 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) in arm_smmu_share_asid() argument 28 cd = xa_load(&arm_smmu_asid_xa, asid); in arm_smmu_share_asid() 48 * Race with unmap: TLB invalidations will start targeting the new ASID, in arm_smmu_share_asid() 49 * which isn't assigned yet. We'll do an invalidate-all on the old ASID in arm_smmu_share_asid() 52 cd->asid = new_asid; in arm_smmu_share_asid() 54 * Update ASID and invalidate CD in all associated masters. There will in arm_smmu_share_asid() 61 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid() 63 xa_erase(&arm_smmu_asid_xa, asid); in arm_smmu_share_asid() 70 u16 asid; in arm_smmu_alloc_shared_cd() local [all …]
|
| /kernel/linux/linux-5.10/arch/sh/mm/ |
| D | tlbflush_32.c | 21 unsigned long asid; in local_flush_tlb_page() local 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 30 set_asid(asid); in local_flush_tlb_page() 32 local_flush_tlb_one(asid, page); in local_flush_tlb_page() 56 unsigned long asid; in local_flush_tlb_range() local 59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range() 65 set_asid(asid); in local_flush_tlb_range() 68 local_flush_tlb_one(asid, start); in local_flush_tlb_range() 89 unsigned long asid; in local_flush_tlb_kernel_range() local 92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range() [all …]
|
| /kernel/linux/linux-6.6/arch/sh/mm/ |
| D | tlbflush_32.c | 21 unsigned long asid; in local_flush_tlb_page() local 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 30 set_asid(asid); in local_flush_tlb_page() 32 local_flush_tlb_one(asid, page); in local_flush_tlb_page() 56 unsigned long asid; in local_flush_tlb_range() local 59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range() 65 set_asid(asid); in local_flush_tlb_range() 68 local_flush_tlb_one(asid, start); in local_flush_tlb_range() 89 unsigned long asid; in local_flush_tlb_kernel_range() local 92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range() [all …]
|
| /kernel/linux/linux-6.6/arch/sh/include/asm/ |
| D | mmu_context_32.h | 6 static inline void set_asid(unsigned long asid) in set_asid() argument 8 __raw_writel(asid, MMU_PTEAEX); in set_asid() 16 static inline void set_asid(unsigned long asid) in set_asid() argument 25 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid() 31 unsigned long asid; in get_asid() local 34 : "=r" (asid) in get_asid() 36 asid &= MMU_CONTEXT_ASID_MASK; in get_asid() 37 return asid; in get_asid()
|
| D | mmu_context.h | 6 * ASID handling idea taken from MIPS implementation. 22 * (b) ASID (Address Space IDentifier) 33 /* Impossible ASID value, to differentiate from NO_CONTEXT. */ 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context() 67 * We exhaust ASID of this version. in get_mmu_context() 76 if (!asid) in get_mmu_context() 77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() [all …]
|
| /kernel/linux/linux-6.6/arch/csky/include/asm/ |
| D | asid.h | 22 /* Number of ASID allocated by context (shift value) */ 37 * Check the ASID is still valid for the context. If not generate a new ASID. 39 * @pasid: Pointer to the current ASID batch 46 u64 asid, old_active_asid; in asid_check_context() local 48 asid = atomic64_read(pasid); in asid_check_context() 52 * If our active_asid is non-zero and the ASID matches the current in asid_check_context() 60 * - We get a valid ASID back from the cmpxchg, which means the in asid_check_context() 66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context() 68 old_active_asid, asid)) in asid_check_context()
|
| /kernel/linux/linux-5.10/arch/csky/include/asm/ |
| D | asid.h | 22 /* Number of ASID allocated by context (shift value) */ 37 * Check the ASID is still valid for the context. If not generate a new ASID. 39 * @pasid: Pointer to the current ASID batch 46 u64 asid, old_active_asid; in asid_check_context() local 48 asid = atomic64_read(pasid); in asid_check_context() 52 * If our active_asid is non-zero and the ASID matches the current in asid_check_context() 60 * - We get a valid ASID back from the cmpxchg, which means the in asid_check_context() 66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context() 68 old_active_asid, asid)) in asid_check_context()
|
| /kernel/linux/linux-5.10/arch/sh/include/asm/ |
| D | mmu_context_32.h | 15 static inline void set_asid(unsigned long asid) in set_asid() argument 17 __raw_writel(asid, MMU_PTEAEX); in set_asid() 25 static inline void set_asid(unsigned long asid) in set_asid() argument 34 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid() 40 unsigned long asid; in get_asid() local 43 : "=r" (asid) in get_asid() 45 asid &= MMU_CONTEXT_ASID_MASK; in get_asid() 46 return asid; in get_asid()
|
| D | mmu_context.h | 6 * ASID handling idea taken from MIPS implementation. 22 * (b) ASID (Address Space IDentifier) 33 /* Impossible ASID value, to differentiate from NO_CONTEXT. */ 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context() 67 * We exhaust ASID of this version. in get_mmu_context() 76 if (!asid) in get_mmu_context() 77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() [all …]
|
| /kernel/linux/linux-5.10/arch/x86/mm/ |
| D | tlb.c | 52 * to what is traditionally called ASID on the RISC processors. 54 * We don't use the traditional ASID implementation, where each process/mm gets 55 * its own ASID and flush/restart when we run out of ASID space. 64 * ASID - [0, TLB_NR_DYN_ASIDS-1] 69 * ASID+1, because PCID 0 is special. 73 * PCID values, but we can still do with a single ASID denomination 101 * Given @asid, compute kPCID 103 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument 105 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid() 109 * Make sure that the dynamic ASID space does not confict with the in kern_pcid() [all …]
|
| /kernel/linux/linux-5.10/drivers/misc/sgi-gru/ |
| D | grumain.c | 56 /*--------- ASID Management ------------------------------------------- 62 * asid in use ("x"s below). Set "limit" to this value. 70 * Each time MAX_ASID is reached, increment the asid generation. Since 73 * a context, the asid generation of the GTS asid is rechecked. If it 74 * doesn't match the current generation, a new asid will be assigned. 79 * All asid manipulation & context loading/unloading is protected by the 83 /* Hit the asid limit. Start over */ 93 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument 97 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit() 100 if (asid >= limit) in gru_reset_asid_limit() [all …]
|
| /kernel/linux/linux-6.6/drivers/misc/sgi-gru/ |
| D | grumain.c | 56 /*--------- ASID Management ------------------------------------------- 62 * asid in use ("x"s below). Set "limit" to this value. 70 * Each time MAX_ASID is reached, increment the asid generation. Since 73 * a context, the asid generation of the GTS asid is rechecked. If it 74 * doesn't match the current generation, a new asid will be assigned. 79 * All asid manipulation & context loading/unloading is protected by the 83 /* Hit the asid limit. Start over */ 93 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument 97 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit() 100 if (asid >= limit) in gru_reset_asid_limit() [all …]
|
| /kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
| D | asid.c | 21 /* ASID 0 is reserved for the kernel driver and device CPU */ in hl_asid_init() 51 void hl_asid_free(struct hl_device *hdev, unsigned long asid) in hl_asid_free() argument 53 if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid), in hl_asid_free() 54 "Invalid ASID %lu", asid)) in hl_asid_free() 56 clear_bit(asid, hdev->asid_bitmap); in hl_asid_free()
|