• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Based on arch/arm/mm/context.c
3  *
4  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
27 #include <asm/tlbflush.h>
28 
29 static u32 asid_bits;
30 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
31 
32 static atomic64_t asid_generation;
33 static unsigned long *asid_map;
34 
35 static DEFINE_PER_CPU(atomic64_t, active_asids);
36 static DEFINE_PER_CPU(u64, reserved_asids);
37 static cpumask_t tlb_flush_pending;
38 
39 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
40 #define ASID_FIRST_VERSION	(1UL << asid_bits)
41 
42 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
43 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
44 #define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
45 #define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
46 #else
47 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
48 #define asid2idx(asid)		((asid) & ~ASID_MASK)
49 #define idx2asid(idx)		asid2idx(idx)
50 #endif
51 
flush_context(unsigned int cpu)52 static void flush_context(unsigned int cpu)
53 {
54 	int i;
55 	u64 asid;
56 
57 	/* Update the list of reserved ASIDs and the ASID bitmap. */
58 	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
59 
60 	/*
61 	 * Ensure the generation bump is observed before we xchg the
62 	 * active_asids.
63 	 */
64 	smp_wmb();
65 
66 	for_each_possible_cpu(i) {
67 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
68 		/*
69 		 * If this CPU has already been through a
70 		 * rollover, but hasn't run another task in
71 		 * the meantime, we must preserve its reserved
72 		 * ASID, as this is the only trace we have of
73 		 * the process it is still running.
74 		 */
75 		if (asid == 0)
76 			asid = per_cpu(reserved_asids, i);
77 		__set_bit(asid2idx(asid), asid_map);
78 		per_cpu(reserved_asids, i) = asid;
79 	}
80 
81 	/* Queue a TLB invalidate and flush the I-cache if necessary. */
82 	cpumask_setall(&tlb_flush_pending);
83 
84 	if (icache_is_aivivt())
85 		__flush_icache_all();
86 }
87 
check_update_reserved_asid(u64 asid,u64 newasid)88 static bool check_update_reserved_asid(u64 asid, u64 newasid)
89 {
90 	int cpu;
91 	bool hit = false;
92 
93 	/*
94 	 * Iterate over the set of reserved ASIDs looking for a match.
95 	 * If we find one, then we can update our mm to use newasid
96 	 * (i.e. the same ASID in the current generation) but we can't
97 	 * exit the loop early, since we need to ensure that all copies
98 	 * of the old ASID are updated to reflect the mm. Failure to do
99 	 * so could result in us missing the reserved ASID in a future
100 	 * generation.
101 	 */
102 	for_each_possible_cpu(cpu) {
103 		if (per_cpu(reserved_asids, cpu) == asid) {
104 			hit = true;
105 			per_cpu(reserved_asids, cpu) = newasid;
106 		}
107 	}
108 
109 	return hit;
110 }
111 
new_context(struct mm_struct * mm,unsigned int cpu)112 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
113 {
114 	static u32 cur_idx = 1;
115 	u64 asid = atomic64_read(&mm->context.id);
116 	u64 generation = atomic64_read(&asid_generation);
117 
118 	if (asid != 0) {
119 		u64 newasid = generation | (asid & ~ASID_MASK);
120 
121 		/*
122 		 * If our current ASID was active during a rollover, we
123 		 * can continue to use it and this was just a false alarm.
124 		 */
125 		if (check_update_reserved_asid(asid, newasid))
126 			return newasid;
127 
128 		/*
129 		 * We had a valid ASID in a previous life, so try to re-use
130 		 * it if possible.
131 		 */
132 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
133 			return newasid;
134 	}
135 
136 	/*
137 	 * Allocate a free ASID. If we can't find one, take a note of the
138 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
139 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
140 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
141 	 * pairs.
142 	 */
143 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
144 	if (asid != NUM_USER_ASIDS)
145 		goto set_asid;
146 
147 	/* We're out of ASIDs, so increment the global generation count */
148 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
149 						 &asid_generation);
150 	flush_context(cpu);
151 
152 	/* We have at least 1 ASID per CPU, so this will always succeed */
153 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
154 
155 set_asid:
156 	__set_bit(asid, asid_map);
157 	cur_idx = asid;
158 	return idx2asid(asid) | generation;
159 }
160 
check_and_switch_context(struct mm_struct * mm,unsigned int cpu)161 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
162 {
163 	unsigned long flags;
164 	u64 asid;
165 
166 	asid = atomic64_read(&mm->context.id);
167 
168 	/*
169 	 * The memory ordering here is subtle. We rely on the control
170 	 * dependency between the generation read and the update of
171 	 * active_asids to ensure that we are synchronised with a
172 	 * parallel rollover (i.e. this pairs with the smp_wmb() in
173 	 * flush_context).
174 	 */
175 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
176 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
177 		goto switch_mm_fastpath;
178 
179 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
180 	/* Check that our ASID belongs to the current generation. */
181 	asid = atomic64_read(&mm->context.id);
182 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
183 		asid = new_context(mm, cpu);
184 		atomic64_set(&mm->context.id, asid);
185 	}
186 
187 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
188 		local_flush_tlb_all();
189 
190 	atomic64_set(&per_cpu(active_asids, cpu), asid);
191 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
192 
193 switch_mm_fastpath:
194 	/*
195 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
196 	 * emulating PAN.
197 	 */
198 	if (!system_uses_ttbr0_pan())
199 		cpu_switch_mm(mm->pgd, mm);
200 }
201 
202 /* Errata workaround post TTBRx_EL1 update. */
post_ttbr_update_workaround(void)203 asmlinkage void post_ttbr_update_workaround(void)
204 {
205 	asm(ALTERNATIVE("nop; nop; nop",
206 			"ic iallu; dsb nsh; isb",
207 			ARM64_WORKAROUND_CAVIUM_27456,
208 			CONFIG_CAVIUM_ERRATUM_27456));
209 }
210 
asids_init(void)211 static int asids_init(void)
212 {
213 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(SYS_ID_AA64MMFR0_EL1),
214 						       ID_AA64MMFR0_ASID_SHIFT);
215 
216 	switch (fld) {
217 	default:
218 		pr_warn("Unknown ASID size (%d); assuming 8-bit\n", fld);
219 		/* Fallthrough */
220 	case 0:
221 		asid_bits = 8;
222 		break;
223 	case 2:
224 		asid_bits = 16;
225 	}
226 
227 	/* If we end up with more CPUs than ASIDs, expect things to crash */
228 	WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
229 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
230 	asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
231 			   GFP_KERNEL);
232 	if (!asid_map)
233 		panic("Failed to allocate bitmap for %lu ASIDs\n",
234 		      NUM_USER_ASIDS);
235 
236 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
237 	return 0;
238 }
239 early_initcall(asids_init);
240