• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Based on arch/arm/mm/context.c
3  *
4  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/tlbflush.h>
29 
30 static u32 asid_bits;
31 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
32 
33 static atomic64_t asid_generation;
34 static unsigned long *asid_map;
35 
36 static DEFINE_PER_CPU(atomic64_t, active_asids);
37 static DEFINE_PER_CPU(u64, reserved_asids);
38 static cpumask_t tlb_flush_pending;
39 
40 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
41 #define ASID_FIRST_VERSION	(1UL << asid_bits)
42 
43 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
44 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
45 #define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
46 #define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
47 #else
48 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
49 #define asid2idx(asid)		((asid) & ~ASID_MASK)
50 #define idx2asid(idx)		asid2idx(idx)
51 #endif
52 
53 /* Get the ASIDBits supported by the current CPU */
get_cpu_asid_bits(void)54 static u32 get_cpu_asid_bits(void)
55 {
56 	u32 asid;
57 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
58 						ID_AA64MMFR0_ASID_SHIFT);
59 
60 	switch (fld) {
61 	default:
62 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
63 					smp_processor_id(),  fld);
64 		/* Fallthrough */
65 	case 0:
66 		asid = 8;
67 		break;
68 	case 2:
69 		asid = 16;
70 	}
71 
72 	return asid;
73 }
74 
75 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
verify_cpu_asid_bits(void)76 void verify_cpu_asid_bits(void)
77 {
78 	u32 asid = get_cpu_asid_bits();
79 
80 	if (asid < asid_bits) {
81 		/*
82 		 * We cannot decrease the ASID size at runtime, so panic if we support
83 		 * fewer ASID bits than the boot CPU.
84 		 */
85 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
86 				smp_processor_id(), asid, asid_bits);
87 		cpu_panic_kernel();
88 	}
89 }
90 
flush_context(unsigned int cpu)91 static void flush_context(unsigned int cpu)
92 {
93 	int i;
94 	u64 asid;
95 
96 	/* Update the list of reserved ASIDs and the ASID bitmap. */
97 	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
98 
99 	/*
100 	 * Ensure the generation bump is observed before we xchg the
101 	 * active_asids.
102 	 */
103 	smp_wmb();
104 
105 	for_each_possible_cpu(i) {
106 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
107 		/*
108 		 * If this CPU has already been through a
109 		 * rollover, but hasn't run another task in
110 		 * the meantime, we must preserve its reserved
111 		 * ASID, as this is the only trace we have of
112 		 * the process it is still running.
113 		 */
114 		if (asid == 0)
115 			asid = per_cpu(reserved_asids, i);
116 		__set_bit(asid2idx(asid), asid_map);
117 		per_cpu(reserved_asids, i) = asid;
118 	}
119 
120 	/* Queue a TLB invalidate and flush the I-cache if necessary. */
121 	cpumask_setall(&tlb_flush_pending);
122 }
123 
check_update_reserved_asid(u64 asid,u64 newasid)124 static bool check_update_reserved_asid(u64 asid, u64 newasid)
125 {
126 	int cpu;
127 	bool hit = false;
128 
129 	/*
130 	 * Iterate over the set of reserved ASIDs looking for a match.
131 	 * If we find one, then we can update our mm to use newasid
132 	 * (i.e. the same ASID in the current generation) but we can't
133 	 * exit the loop early, since we need to ensure that all copies
134 	 * of the old ASID are updated to reflect the mm. Failure to do
135 	 * so could result in us missing the reserved ASID in a future
136 	 * generation.
137 	 */
138 	for_each_possible_cpu(cpu) {
139 		if (per_cpu(reserved_asids, cpu) == asid) {
140 			hit = true;
141 			per_cpu(reserved_asids, cpu) = newasid;
142 		}
143 	}
144 
145 	return hit;
146 }
147 
new_context(struct mm_struct * mm,unsigned int cpu)148 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
149 {
150 	static u32 cur_idx = 1;
151 	u64 asid = atomic64_read(&mm->context.id);
152 	u64 generation = atomic64_read(&asid_generation);
153 
154 	if (asid != 0) {
155 		u64 newasid = generation | (asid & ~ASID_MASK);
156 
157 		/*
158 		 * If our current ASID was active during a rollover, we
159 		 * can continue to use it and this was just a false alarm.
160 		 */
161 		if (check_update_reserved_asid(asid, newasid))
162 			return newasid;
163 
164 		/*
165 		 * We had a valid ASID in a previous life, so try to re-use
166 		 * it if possible.
167 		 */
168 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
169 			return newasid;
170 	}
171 
172 	/*
173 	 * Allocate a free ASID. If we can't find one, take a note of the
174 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
175 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
176 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
177 	 * pairs.
178 	 */
179 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
180 	if (asid != NUM_USER_ASIDS)
181 		goto set_asid;
182 
183 	/* We're out of ASIDs, so increment the global generation count */
184 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
185 						 &asid_generation);
186 	flush_context(cpu);
187 
188 	/* We have more ASIDs than CPUs, so this will always succeed */
189 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
190 
191 set_asid:
192 	__set_bit(asid, asid_map);
193 	cur_idx = asid;
194 	return idx2asid(asid) | generation;
195 }
196 
check_and_switch_context(struct mm_struct * mm,unsigned int cpu)197 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
198 {
199 	unsigned long flags;
200 	u64 asid;
201 
202 	asid = atomic64_read(&mm->context.id);
203 
204 	/*
205 	 * The memory ordering here is subtle. We rely on the control
206 	 * dependency between the generation read and the update of
207 	 * active_asids to ensure that we are synchronised with a
208 	 * parallel rollover (i.e. this pairs with the smp_wmb() in
209 	 * flush_context).
210 	 */
211 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
212 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
213 		goto switch_mm_fastpath;
214 
215 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
216 	/* Check that our ASID belongs to the current generation. */
217 	asid = atomic64_read(&mm->context.id);
218 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
219 		asid = new_context(mm, cpu);
220 		atomic64_set(&mm->context.id, asid);
221 	}
222 
223 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
224 		local_flush_tlb_all();
225 
226 	atomic64_set(&per_cpu(active_asids, cpu), asid);
227 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
228 
229 switch_mm_fastpath:
230 
231 	arm64_apply_bp_hardening();
232 
233 	/*
234 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
235 	 * emulating PAN.
236 	 */
237 	if (!system_uses_ttbr0_pan())
238 		cpu_switch_mm(mm->pgd, mm);
239 }
240 
241 /* Errata workaround post TTBRx_EL1 update. */
post_ttbr_update_workaround(void)242 asmlinkage void post_ttbr_update_workaround(void)
243 {
244 	asm(ALTERNATIVE("nop; nop; nop",
245 			"ic iallu; dsb nsh; isb",
246 			ARM64_WORKAROUND_CAVIUM_27456,
247 			CONFIG_CAVIUM_ERRATUM_27456));
248 }
249 
asids_init(void)250 static int asids_init(void)
251 {
252 	asid_bits = get_cpu_asid_bits();
253 	/*
254 	 * Expect allocation after rollover to fail if we don't have at least
255 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
256 	 */
257 	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
258 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
259 	asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
260 			   GFP_KERNEL);
261 	if (!asid_map)
262 		panic("Failed to allocate bitmap for %lu ASIDs\n",
263 		      NUM_USER_ASIDS);
264 
265 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
266 	return 0;
267 }
268 early_initcall(asids_init);
269