• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * native hashtable management.
3  *
4  * SMP scalability work:
5  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #undef DEBUG_LOW
14 
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 
22 #include <asm/machdep.h>
23 #include <asm/mmu.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/trace.h>
28 #include <asm/tlb.h>
29 #include <asm/cputable.h>
30 #include <asm/udbg.h>
31 #include <asm/kexec.h>
32 #include <asm/ppc-opcode.h>
33 
34 #include <misc/cxl-base.h>
35 
36 #ifdef DEBUG_LOW
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
38 #else
39 #define DBG_LOW(fmt...)
40 #endif
41 
42 #ifdef __BIG_ENDIAN__
43 #define HPTE_LOCK_BIT 3
44 #else
45 #define HPTE_LOCK_BIT (56+3)
46 #endif
47 
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
49 
___tlbie(unsigned long vpn,int psize,int apsize,int ssize)50 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
51 						int apsize, int ssize)
52 {
53 	unsigned long va;
54 	unsigned int penc;
55 	unsigned long sllp;
56 
57 	/*
58 	 * We need 14 to 65 bits of va for a tlibe of 4K page
59 	 * With vpn we ignore the lower VPN_SHIFT bits already.
60 	 * And top two bits are already ignored because we can
61 	 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
62 	 * of 12.
63 	 */
64 	va = vpn << VPN_SHIFT;
65 	/*
66 	 * clear top 16 bits of 64bit va, non SLS segment
67 	 * Older versions of the architecture (2.02 and earler) require the
68 	 * masking of the top 16 bits.
69 	 */
70 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
71 		va &= ~(0xffffULL << 48);
72 
73 	switch (psize) {
74 	case MMU_PAGE_4K:
75 		/* clear out bits after (52) [0....52.....63] */
76 		va &= ~((1ul << (64 - 52)) - 1);
77 		va |= ssize << 8;
78 		sllp = get_sllp_encoding(apsize);
79 		va |= sllp << 5;
80 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
81 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
82 			     : "memory");
83 		break;
84 	default:
85 		/* We need 14 to 14 + i bits of va */
86 		penc = mmu_psize_defs[psize].penc[apsize];
87 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
88 		va |= penc << 12;
89 		va |= ssize << 8;
90 		/*
91 		 * AVAL bits:
92 		 * We don't need all the bits, but rest of the bits
93 		 * must be ignored by the processor.
94 		 * vpn cover upto 65 bits of va. (0...65) and we need
95 		 * 58..64 bits of va.
96 		 */
97 		va |= (vpn & 0xfe); /* AVAL */
98 		va |= 1; /* L */
99 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
100 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
101 			     : "memory");
102 		break;
103 	}
104 	return va;
105 }
106 
fixup_tlbie_vpn(unsigned long vpn,int psize,int apsize,int ssize)107 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
108 				   int apsize, int ssize)
109 {
110 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
111 		/* Radix flush for a hash guest */
112 
113 		unsigned long rb,rs,prs,r,ric;
114 
115 		rb = PPC_BIT(52); /* IS = 2 */
116 		rs = 0;  /* lpid = 0 */
117 		prs = 0; /* partition scoped */
118 		r = 1;   /* radix format */
119 		ric = 0; /* RIC_FLSUH_TLB */
120 
121 		/*
122 		 * Need the extra ptesync to make sure we don't
123 		 * re-order the tlbie
124 		 */
125 		asm volatile("ptesync": : :"memory");
126 		asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
127 			     : : "r"(rb), "i"(r), "i"(prs),
128 			       "i"(ric), "r"(rs) : "memory");
129 	}
130 
131 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
132 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
133 		asm volatile("ptesync": : :"memory");
134 		___tlbie(vpn, psize, apsize, ssize);
135 	}
136 }
137 
__tlbie(unsigned long vpn,int psize,int apsize,int ssize)138 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
139 {
140 	unsigned long rb;
141 
142 	rb = ___tlbie(vpn, psize, apsize, ssize);
143 	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
144 }
145 
__tlbiel(unsigned long vpn,int psize,int apsize,int ssize)146 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
147 {
148 	unsigned long va;
149 	unsigned int penc;
150 	unsigned long sllp;
151 
152 	/* VPN_SHIFT can be atmost 12 */
153 	va = vpn << VPN_SHIFT;
154 	/*
155 	 * clear top 16 bits of 64 bit va, non SLS segment
156 	 * Older versions of the architecture (2.02 and earler) require the
157 	 * masking of the top 16 bits.
158 	 */
159 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
160 		va &= ~(0xffffULL << 48);
161 
162 	switch (psize) {
163 	case MMU_PAGE_4K:
164 		/* clear out bits after(52) [0....52.....63] */
165 		va &= ~((1ul << (64 - 52)) - 1);
166 		va |= ssize << 8;
167 		sllp = get_sllp_encoding(apsize);
168 		va |= sllp << 5;
169 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
170 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
171 			     : "memory");
172 		break;
173 	default:
174 		/* We need 14 to 14 + i bits of va */
175 		penc = mmu_psize_defs[psize].penc[apsize];
176 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
177 		va |= penc << 12;
178 		va |= ssize << 8;
179 		/*
180 		 * AVAL bits:
181 		 * We don't need all the bits, but rest of the bits
182 		 * must be ignored by the processor.
183 		 * vpn cover upto 65 bits of va. (0...65) and we need
184 		 * 58..64 bits of va.
185 		 */
186 		va |= (vpn & 0xfe);
187 		va |= 1; /* L */
188 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
189 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
190 			     : "memory");
191 		break;
192 	}
193 	trace_tlbie(0, 1, va, 0, 0, 0, 0);
194 
195 }
196 
tlbie(unsigned long vpn,int psize,int apsize,int ssize,int local)197 static inline void tlbie(unsigned long vpn, int psize, int apsize,
198 			 int ssize, int local)
199 {
200 	unsigned int use_local;
201 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
202 
203 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
204 
205 	if (use_local)
206 		use_local = mmu_psize_defs[psize].tlbiel;
207 	if (lock_tlbie && !use_local)
208 		raw_spin_lock(&native_tlbie_lock);
209 	asm volatile("ptesync": : :"memory");
210 	if (use_local) {
211 		__tlbiel(vpn, psize, apsize, ssize);
212 		asm volatile("ptesync": : :"memory");
213 	} else {
214 		__tlbie(vpn, psize, apsize, ssize);
215 		fixup_tlbie_vpn(vpn, psize, apsize, ssize);
216 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
217 	}
218 	if (lock_tlbie && !use_local)
219 		raw_spin_unlock(&native_tlbie_lock);
220 }
221 
native_lock_hpte(struct hash_pte * hptep)222 static inline void native_lock_hpte(struct hash_pte *hptep)
223 {
224 	unsigned long *word = (unsigned long *)&hptep->v;
225 
226 	while (1) {
227 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
228 			break;
229 		spin_begin();
230 		while(test_bit(HPTE_LOCK_BIT, word))
231 			spin_cpu_relax();
232 		spin_end();
233 	}
234 }
235 
native_unlock_hpte(struct hash_pte * hptep)236 static inline void native_unlock_hpte(struct hash_pte *hptep)
237 {
238 	unsigned long *word = (unsigned long *)&hptep->v;
239 
240 	clear_bit_unlock(HPTE_LOCK_BIT, word);
241 }
242 
native_hpte_insert(unsigned long hpte_group,unsigned long vpn,unsigned long pa,unsigned long rflags,unsigned long vflags,int psize,int apsize,int ssize)243 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
244 			unsigned long pa, unsigned long rflags,
245 			unsigned long vflags, int psize, int apsize, int ssize)
246 {
247 	struct hash_pte *hptep = htab_address + hpte_group;
248 	unsigned long hpte_v, hpte_r;
249 	int i;
250 
251 	if (!(vflags & HPTE_V_BOLTED)) {
252 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
253 			" rflags=%lx, vflags=%lx, psize=%d)\n",
254 			hpte_group, vpn, pa, rflags, vflags, psize);
255 	}
256 
257 	for (i = 0; i < HPTES_PER_GROUP; i++) {
258 		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
259 			/* retry with lock held */
260 			native_lock_hpte(hptep);
261 			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
262 				break;
263 			native_unlock_hpte(hptep);
264 		}
265 
266 		hptep++;
267 	}
268 
269 	if (i == HPTES_PER_GROUP)
270 		return -1;
271 
272 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
273 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
274 
275 	if (!(vflags & HPTE_V_BOLTED)) {
276 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
277 			i, hpte_v, hpte_r);
278 	}
279 
280 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
281 		hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
282 		hpte_v = hpte_old_to_new_v(hpte_v);
283 	}
284 
285 	hptep->r = cpu_to_be64(hpte_r);
286 	/* Guarantee the second dword is visible before the valid bit */
287 	eieio();
288 	/*
289 	 * Now set the first dword including the valid bit
290 	 * NOTE: this also unlocks the hpte
291 	 */
292 	hptep->v = cpu_to_be64(hpte_v);
293 
294 	__asm__ __volatile__ ("ptesync" : : : "memory");
295 
296 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
297 }
298 
native_hpte_remove(unsigned long hpte_group)299 static long native_hpte_remove(unsigned long hpte_group)
300 {
301 	struct hash_pte *hptep;
302 	int i;
303 	int slot_offset;
304 	unsigned long hpte_v;
305 
306 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
307 
308 	/* pick a random entry to start at */
309 	slot_offset = mftb() & 0x7;
310 
311 	for (i = 0; i < HPTES_PER_GROUP; i++) {
312 		hptep = htab_address + hpte_group + slot_offset;
313 		hpte_v = be64_to_cpu(hptep->v);
314 
315 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
316 			/* retry with lock held */
317 			native_lock_hpte(hptep);
318 			hpte_v = be64_to_cpu(hptep->v);
319 			if ((hpte_v & HPTE_V_VALID)
320 			    && !(hpte_v & HPTE_V_BOLTED))
321 				break;
322 			native_unlock_hpte(hptep);
323 		}
324 
325 		slot_offset++;
326 		slot_offset &= 0x7;
327 	}
328 
329 	if (i == HPTES_PER_GROUP)
330 		return -1;
331 
332 	/* Invalidate the hpte. NOTE: this also unlocks it */
333 	hptep->v = 0;
334 
335 	return i;
336 }
337 
native_hpte_updatepp(unsigned long slot,unsigned long newpp,unsigned long vpn,int bpsize,int apsize,int ssize,unsigned long flags)338 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
339 				 unsigned long vpn, int bpsize,
340 				 int apsize, int ssize, unsigned long flags)
341 {
342 	struct hash_pte *hptep = htab_address + slot;
343 	unsigned long hpte_v, want_v;
344 	int ret = 0, local = 0;
345 
346 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
347 
348 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
349 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
350 
351 	hpte_v = be64_to_cpu(hptep->v);
352 	if (cpu_has_feature(CPU_FTR_ARCH_300))
353 		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
354 	/*
355 	 * We need to invalidate the TLB always because hpte_remove doesn't do
356 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
357 	 * random entry from it. When we do that we don't invalidate the TLB
358 	 * (hpte_remove) because we assume the old translation is still
359 	 * technically "valid".
360 	 */
361 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
362 		DBG_LOW(" -> miss\n");
363 		ret = -1;
364 	} else {
365 		native_lock_hpte(hptep);
366 		/* recheck with locks held */
367 		hpte_v = be64_to_cpu(hptep->v);
368 		if (cpu_has_feature(CPU_FTR_ARCH_300))
369 			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
370 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
371 			     !(hpte_v & HPTE_V_VALID))) {
372 			ret = -1;
373 		} else {
374 			DBG_LOW(" -> hit\n");
375 			/* Update the HPTE */
376 			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
377 						~(HPTE_R_PPP | HPTE_R_N)) |
378 					       (newpp & (HPTE_R_PPP | HPTE_R_N |
379 							 HPTE_R_C)));
380 		}
381 		native_unlock_hpte(hptep);
382 	}
383 
384 	if (flags & HPTE_LOCAL_UPDATE)
385 		local = 1;
386 	/*
387 	 * Ensure it is out of the tlb too if it is not a nohpte fault
388 	 */
389 	if (!(flags & HPTE_NOHPTE_UPDATE))
390 		tlbie(vpn, bpsize, apsize, ssize, local);
391 
392 	return ret;
393 }
394 
native_hpte_find(unsigned long vpn,int psize,int ssize)395 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
396 {
397 	struct hash_pte *hptep;
398 	unsigned long hash;
399 	unsigned long i;
400 	long slot;
401 	unsigned long want_v, hpte_v;
402 
403 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
404 	want_v = hpte_encode_avpn(vpn, psize, ssize);
405 
406 	/* Bolted mappings are only ever in the primary group */
407 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
408 	for (i = 0; i < HPTES_PER_GROUP; i++) {
409 		hptep = htab_address + slot;
410 		hpte_v = be64_to_cpu(hptep->v);
411 		if (cpu_has_feature(CPU_FTR_ARCH_300))
412 			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
413 
414 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
415 			/* HPTE matches */
416 			return slot;
417 		++slot;
418 	}
419 
420 	return -1;
421 }
422 
423 /*
424  * Update the page protection bits. Intended to be used to create
425  * guard pages for kernel data structures on pages which are bolted
426  * in the HPT. Assumes pages being operated on will not be stolen.
427  *
428  * No need to lock here because we should be the only user.
429  */
native_hpte_updateboltedpp(unsigned long newpp,unsigned long ea,int psize,int ssize)430 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
431 				       int psize, int ssize)
432 {
433 	unsigned long vpn;
434 	unsigned long vsid;
435 	long slot;
436 	struct hash_pte *hptep;
437 
438 	vsid = get_kernel_vsid(ea, ssize);
439 	vpn = hpt_vpn(ea, vsid, ssize);
440 
441 	slot = native_hpte_find(vpn, psize, ssize);
442 	if (slot == -1)
443 		panic("could not find page to bolt\n");
444 	hptep = htab_address + slot;
445 
446 	/* Update the HPTE */
447 	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
448 				~(HPTE_R_PPP | HPTE_R_N)) |
449 			       (newpp & (HPTE_R_PPP | HPTE_R_N)));
450 	/*
451 	 * Ensure it is out of the tlb too. Bolted entries base and
452 	 * actual page size will be same.
453 	 */
454 	tlbie(vpn, psize, psize, ssize, 0);
455 }
456 
457 /*
458  * Remove a bolted kernel entry. Memory hotplug uses this.
459  *
460  * No need to lock here because we should be the only user.
461  */
native_hpte_removebolted(unsigned long ea,int psize,int ssize)462 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
463 {
464 	unsigned long vpn;
465 	unsigned long vsid;
466 	long slot;
467 	struct hash_pte *hptep;
468 
469 	vsid = get_kernel_vsid(ea, ssize);
470 	vpn = hpt_vpn(ea, vsid, ssize);
471 
472 	slot = native_hpte_find(vpn, psize, ssize);
473 	if (slot == -1)
474 		return -ENOENT;
475 
476 	hptep = htab_address + slot;
477 
478 	VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
479 
480 	/* Invalidate the hpte */
481 	hptep->v = 0;
482 
483 	/* Invalidate the TLB */
484 	tlbie(vpn, psize, psize, ssize, 0);
485 	return 0;
486 }
487 
488 
native_hpte_invalidate(unsigned long slot,unsigned long vpn,int bpsize,int apsize,int ssize,int local)489 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
490 				   int bpsize, int apsize, int ssize, int local)
491 {
492 	struct hash_pte *hptep = htab_address + slot;
493 	unsigned long hpte_v;
494 	unsigned long want_v;
495 	unsigned long flags;
496 
497 	local_irq_save(flags);
498 
499 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
500 
501 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
502 	native_lock_hpte(hptep);
503 	hpte_v = be64_to_cpu(hptep->v);
504 	if (cpu_has_feature(CPU_FTR_ARCH_300))
505 		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
506 
507 	/*
508 	 * We need to invalidate the TLB always because hpte_remove doesn't do
509 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
510 	 * random entry from it. When we do that we don't invalidate the TLB
511 	 * (hpte_remove) because we assume the old translation is still
512 	 * technically "valid".
513 	 */
514 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
515 		native_unlock_hpte(hptep);
516 	else
517 		/* Invalidate the hpte. NOTE: this also unlocks it */
518 		hptep->v = 0;
519 
520 	/* Invalidate the TLB */
521 	tlbie(vpn, bpsize, apsize, ssize, local);
522 
523 	local_irq_restore(flags);
524 }
525 
526 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
native_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)527 static void native_hugepage_invalidate(unsigned long vsid,
528 				       unsigned long addr,
529 				       unsigned char *hpte_slot_array,
530 				       int psize, int ssize, int local)
531 {
532 	int i;
533 	struct hash_pte *hptep;
534 	int actual_psize = MMU_PAGE_16M;
535 	unsigned int max_hpte_count, valid;
536 	unsigned long flags, s_addr = addr;
537 	unsigned long hpte_v, want_v, shift;
538 	unsigned long hidx, vpn = 0, hash, slot;
539 
540 	shift = mmu_psize_defs[psize].shift;
541 	max_hpte_count = 1U << (PMD_SHIFT - shift);
542 
543 	local_irq_save(flags);
544 	for (i = 0; i < max_hpte_count; i++) {
545 		valid = hpte_valid(hpte_slot_array, i);
546 		if (!valid)
547 			continue;
548 		hidx =  hpte_hash_index(hpte_slot_array, i);
549 
550 		/* get the vpn */
551 		addr = s_addr + (i * (1ul << shift));
552 		vpn = hpt_vpn(addr, vsid, ssize);
553 		hash = hpt_hash(vpn, shift, ssize);
554 		if (hidx & _PTEIDX_SECONDARY)
555 			hash = ~hash;
556 
557 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
558 		slot += hidx & _PTEIDX_GROUP_IX;
559 
560 		hptep = htab_address + slot;
561 		want_v = hpte_encode_avpn(vpn, psize, ssize);
562 		native_lock_hpte(hptep);
563 		hpte_v = be64_to_cpu(hptep->v);
564 		if (cpu_has_feature(CPU_FTR_ARCH_300))
565 			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
566 
567 		/* Even if we miss, we need to invalidate the TLB */
568 		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
569 			native_unlock_hpte(hptep);
570 		else
571 			/* Invalidate the hpte. NOTE: this also unlocks it */
572 			hptep->v = 0;
573 		/*
574 		 * We need to do tlb invalidate for all the address, tlbie
575 		 * instruction compares entry_VA in tlb with the VA specified
576 		 * here
577 		 */
578 		tlbie(vpn, psize, actual_psize, ssize, local);
579 	}
580 	local_irq_restore(flags);
581 }
582 #else
native_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)583 static void native_hugepage_invalidate(unsigned long vsid,
584 				       unsigned long addr,
585 				       unsigned char *hpte_slot_array,
586 				       int psize, int ssize, int local)
587 {
588 	WARN(1, "%s called without THP support\n", __func__);
589 }
590 #endif
591 
hpte_decode(struct hash_pte * hpte,unsigned long slot,int * psize,int * apsize,int * ssize,unsigned long * vpn)592 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
593 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
594 {
595 	unsigned long avpn, pteg, vpi;
596 	unsigned long hpte_v = be64_to_cpu(hpte->v);
597 	unsigned long hpte_r = be64_to_cpu(hpte->r);
598 	unsigned long vsid, seg_off;
599 	int size, a_size, shift;
600 	/* Look at the 8 bit LP value */
601 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
602 
603 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
604 		hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
605 		hpte_r = hpte_new_to_old_r(hpte_r);
606 	}
607 	if (!(hpte_v & HPTE_V_LARGE)) {
608 		size   = MMU_PAGE_4K;
609 		a_size = MMU_PAGE_4K;
610 	} else {
611 		size = hpte_page_sizes[lp] & 0xf;
612 		a_size = hpte_page_sizes[lp] >> 4;
613 	}
614 	/* This works for all page sizes, and for 256M and 1T segments */
615 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
616 	shift = mmu_psize_defs[size].shift;
617 
618 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
619 	pteg = slot / HPTES_PER_GROUP;
620 	if (hpte_v & HPTE_V_SECONDARY)
621 		pteg = ~pteg;
622 
623 	switch (*ssize) {
624 	case MMU_SEGSIZE_256M:
625 		/* We only have 28 - 23 bits of seg_off in avpn */
626 		seg_off = (avpn & 0x1f) << 23;
627 		vsid    =  avpn >> 5;
628 		/* We can find more bits from the pteg value */
629 		if (shift < 23) {
630 			vpi = (vsid ^ pteg) & htab_hash_mask;
631 			seg_off |= vpi << shift;
632 		}
633 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
634 		break;
635 	case MMU_SEGSIZE_1T:
636 		/* We only have 40 - 23 bits of seg_off in avpn */
637 		seg_off = (avpn & 0x1ffff) << 23;
638 		vsid    = avpn >> 17;
639 		if (shift < 23) {
640 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
641 			seg_off |= vpi << shift;
642 		}
643 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
644 		break;
645 	default:
646 		*vpn = size = 0;
647 	}
648 	*psize  = size;
649 	*apsize = a_size;
650 }
651 
652 /*
653  * clear all mappings on kexec.  All cpus are in real mode (or they will
654  * be when they isi), and we are the only one left.  We rely on our kernel
655  * mapping being 0xC0's and the hardware ignoring those two real bits.
656  *
657  * This must be called with interrupts disabled.
658  *
659  * Taking the native_tlbie_lock is unsafe here due to the possibility of
660  * lockdep being on. On pre POWER5 hardware, not taking the lock could
661  * cause deadlock. POWER5 and newer not taking the lock is fine. This only
662  * gets called during boot before secondary CPUs have come up and during
663  * crashdump and all bets are off anyway.
664  *
665  * TODO: add batching support when enabled.  remember, no dynamic memory here,
666  * although there is the control page available...
667  */
native_hpte_clear(void)668 static void native_hpte_clear(void)
669 {
670 	unsigned long vpn = 0;
671 	unsigned long slot, slots;
672 	struct hash_pte *hptep = htab_address;
673 	unsigned long hpte_v;
674 	unsigned long pteg_count;
675 	int psize, apsize, ssize;
676 
677 	pteg_count = htab_hash_mask + 1;
678 
679 	slots = pteg_count * HPTES_PER_GROUP;
680 
681 	for (slot = 0; slot < slots; slot++, hptep++) {
682 		/*
683 		 * we could lock the pte here, but we are the only cpu
684 		 * running,  right?  and for crash dump, we probably
685 		 * don't want to wait for a maybe bad cpu.
686 		 */
687 		hpte_v = be64_to_cpu(hptep->v);
688 
689 		/*
690 		 * Call __tlbie() here rather than tlbie() since we can't take the
691 		 * native_tlbie_lock.
692 		 */
693 		if (hpte_v & HPTE_V_VALID) {
694 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
695 			hptep->v = 0;
696 			___tlbie(vpn, psize, apsize, ssize);
697 		}
698 	}
699 
700 	asm volatile("eieio; tlbsync; ptesync":::"memory");
701 }
702 
703 /*
704  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
705  * the lock all the time
706  */
native_flush_hash_range(unsigned long number,int local)707 static void native_flush_hash_range(unsigned long number, int local)
708 {
709 	unsigned long vpn = 0;
710 	unsigned long hash, index, hidx, shift, slot;
711 	struct hash_pte *hptep;
712 	unsigned long hpte_v;
713 	unsigned long want_v;
714 	unsigned long flags;
715 	real_pte_t pte;
716 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
717 	unsigned long psize = batch->psize;
718 	int ssize = batch->ssize;
719 	int i;
720 	unsigned int use_local;
721 
722 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
723 		mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
724 
725 	local_irq_save(flags);
726 
727 	for (i = 0; i < number; i++) {
728 		vpn = batch->vpn[i];
729 		pte = batch->pte[i];
730 
731 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
732 			hash = hpt_hash(vpn, shift, ssize);
733 			hidx = __rpte_to_hidx(pte, index);
734 			if (hidx & _PTEIDX_SECONDARY)
735 				hash = ~hash;
736 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
737 			slot += hidx & _PTEIDX_GROUP_IX;
738 			hptep = htab_address + slot;
739 			want_v = hpte_encode_avpn(vpn, psize, ssize);
740 			native_lock_hpte(hptep);
741 			hpte_v = be64_to_cpu(hptep->v);
742 			if (cpu_has_feature(CPU_FTR_ARCH_300))
743 				hpte_v = hpte_new_to_old_v(hpte_v,
744 						be64_to_cpu(hptep->r));
745 			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
746 			    !(hpte_v & HPTE_V_VALID))
747 				native_unlock_hpte(hptep);
748 			else
749 				hptep->v = 0;
750 		} pte_iterate_hashed_end();
751 	}
752 
753 	if (use_local) {
754 		asm volatile("ptesync":::"memory");
755 		for (i = 0; i < number; i++) {
756 			vpn = batch->vpn[i];
757 			pte = batch->pte[i];
758 
759 			pte_iterate_hashed_subpages(pte, psize,
760 						    vpn, index, shift) {
761 				__tlbiel(vpn, psize, psize, ssize);
762 			} pte_iterate_hashed_end();
763 		}
764 		asm volatile("ptesync":::"memory");
765 	} else {
766 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
767 
768 		if (lock_tlbie)
769 			raw_spin_lock(&native_tlbie_lock);
770 
771 		asm volatile("ptesync":::"memory");
772 		for (i = 0; i < number; i++) {
773 			vpn = batch->vpn[i];
774 			pte = batch->pte[i];
775 
776 			pte_iterate_hashed_subpages(pte, psize,
777 						    vpn, index, shift) {
778 				__tlbie(vpn, psize, psize, ssize);
779 			} pte_iterate_hashed_end();
780 		}
781 		/*
782 		 * Just do one more with the last used values.
783 		 */
784 		fixup_tlbie_vpn(vpn, psize, psize, ssize);
785 		asm volatile("eieio; tlbsync; ptesync":::"memory");
786 
787 		if (lock_tlbie)
788 			raw_spin_unlock(&native_tlbie_lock);
789 	}
790 
791 	local_irq_restore(flags);
792 }
793 
native_register_proc_table(unsigned long base,unsigned long page_size,unsigned long table_size)794 static int native_register_proc_table(unsigned long base, unsigned long page_size,
795 				      unsigned long table_size)
796 {
797 	unsigned long patb1 = base << 25; /* VSID */
798 
799 	patb1 |= (page_size << 5);  /* sllp */
800 	patb1 |= table_size;
801 
802 	partition_tb->patb1 = cpu_to_be64(patb1);
803 	return 0;
804 }
805 
hpte_init_native(void)806 void __init hpte_init_native(void)
807 {
808 	mmu_hash_ops.hpte_invalidate	= native_hpte_invalidate;
809 	mmu_hash_ops.hpte_updatepp	= native_hpte_updatepp;
810 	mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
811 	mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
812 	mmu_hash_ops.hpte_insert	= native_hpte_insert;
813 	mmu_hash_ops.hpte_remove	= native_hpte_remove;
814 	mmu_hash_ops.hpte_clear_all	= native_hpte_clear;
815 	mmu_hash_ops.flush_hash_range = native_flush_hash_range;
816 	mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
817 
818 	if (cpu_has_feature(CPU_FTR_ARCH_300))
819 		register_process_table = native_register_proc_table;
820 }
821