• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * native hashtable management.
3  *
4  * SMP scalability work:
5  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #undef DEBUG_LOW
14 
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
20 
21 #include <asm/machdep.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/tlb.h>
27 #include <asm/cputable.h>
28 #include <asm/udbg.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
31 
32 #ifdef DEBUG_LOW
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #else
35 #define DBG_LOW(fmt...)
36 #endif
37 
38 #define HPTE_LOCK_BIT 3
39 
40 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 
__tlbie(unsigned long vpn,int psize,int apsize,int ssize)42 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43 {
44 	unsigned long va;
45 	unsigned int penc;
46 
47 	/*
48 	 * We need 14 to 65 bits of va for a tlibe of 4K page
49 	 * With vpn we ignore the lower VPN_SHIFT bits already.
50 	 * And top two bits are already ignored because we can
51 	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 	 * of 12.
53 	 */
54 	va = vpn << VPN_SHIFT;
55 	/*
56 	 * clear top 16 bits of 64bit va, non SLS segment
57 	 * Older versions of the architecture (2.02 and earler) require the
58 	 * masking of the top 16 bits.
59 	 */
60 	va &= ~(0xffffULL << 48);
61 
62 	switch (psize) {
63 	case MMU_PAGE_4K:
64 		/* clear out bits after (52) [0....52.....63] */
65 		va &= ~((1ul << (64 - 52)) - 1);
66 		va |= ssize << 8;
67 		va |= mmu_psize_defs[apsize].sllp << 6;
68 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70 			     : "memory");
71 		break;
72 	default:
73 		/* We need 14 to 14 + i bits of va */
74 		penc = mmu_psize_defs[psize].penc[apsize];
75 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
76 		va |= penc << 12;
77 		va |= ssize << 8;
78 		/* Add AVAL part */
79 		if (psize != apsize) {
80 			/*
81 			 * MPSS, 64K base page size and 16MB parge page size
82 			 * We don't need all the bits, but rest of the bits
83 			 * must be ignored by the processor.
84 			 * vpn cover upto 65 bits of va. (0...65) and we need
85 			 * 58..64 bits of va.
86 			 */
87 			va |= (vpn & 0xfe);
88 		}
89 		va |= 1; /* L */
90 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
91 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
92 			     : "memory");
93 		break;
94 	}
95 }
96 
__tlbiel(unsigned long vpn,int psize,int apsize,int ssize)97 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
98 {
99 	unsigned long va;
100 	unsigned int penc;
101 
102 	/* VPN_SHIFT can be atmost 12 */
103 	va = vpn << VPN_SHIFT;
104 	/*
105 	 * clear top 16 bits of 64 bit va, non SLS segment
106 	 * Older versions of the architecture (2.02 and earler) require the
107 	 * masking of the top 16 bits.
108 	 */
109 	va &= ~(0xffffULL << 48);
110 
111 	switch (psize) {
112 	case MMU_PAGE_4K:
113 		/* clear out bits after(52) [0....52.....63] */
114 		va &= ~((1ul << (64 - 52)) - 1);
115 		va |= ssize << 8;
116 		va |= mmu_psize_defs[apsize].sllp << 6;
117 		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 			     : : "r"(va) : "memory");
119 		break;
120 	default:
121 		/* We need 14 to 14 + i bits of va */
122 		penc = mmu_psize_defs[psize].penc[apsize];
123 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
124 		va |= penc << 12;
125 		va |= ssize << 8;
126 		/* Add AVAL part */
127 		if (psize != apsize) {
128 			/*
129 			 * MPSS, 64K base page size and 16MB parge page size
130 			 * We don't need all the bits, but rest of the bits
131 			 * must be ignored by the processor.
132 			 * vpn cover upto 65 bits of va. (0...65) and we need
133 			 * 58..64 bits of va.
134 			 */
135 			va |= (vpn & 0xfe);
136 		}
137 		va |= 1; /* L */
138 		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
139 			     : : "r"(va) : "memory");
140 		break;
141 	}
142 
143 }
144 
tlbie(unsigned long vpn,int psize,int apsize,int ssize,int local)145 static inline void tlbie(unsigned long vpn, int psize, int apsize,
146 			 int ssize, int local)
147 {
148 	unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
149 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
150 
151 	if (use_local)
152 		use_local = mmu_psize_defs[psize].tlbiel;
153 	if (lock_tlbie && !use_local)
154 		raw_spin_lock(&native_tlbie_lock);
155 	asm volatile("ptesync": : :"memory");
156 	if (use_local) {
157 		__tlbiel(vpn, psize, apsize, ssize);
158 		asm volatile("ptesync": : :"memory");
159 	} else {
160 		__tlbie(vpn, psize, apsize, ssize);
161 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
162 	}
163 	if (lock_tlbie && !use_local)
164 		raw_spin_unlock(&native_tlbie_lock);
165 }
166 
native_lock_hpte(struct hash_pte * hptep)167 static inline void native_lock_hpte(struct hash_pte *hptep)
168 {
169 	unsigned long *word = &hptep->v;
170 
171 	while (1) {
172 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
173 			break;
174 		while(test_bit(HPTE_LOCK_BIT, word))
175 			cpu_relax();
176 	}
177 }
178 
native_unlock_hpte(struct hash_pte * hptep)179 static inline void native_unlock_hpte(struct hash_pte *hptep)
180 {
181 	unsigned long *word = &hptep->v;
182 
183 	clear_bit_unlock(HPTE_LOCK_BIT, word);
184 }
185 
native_hpte_insert(unsigned long hpte_group,unsigned long vpn,unsigned long pa,unsigned long rflags,unsigned long vflags,int psize,int apsize,int ssize)186 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
187 			unsigned long pa, unsigned long rflags,
188 			unsigned long vflags, int psize, int apsize, int ssize)
189 {
190 	struct hash_pte *hptep = htab_address + hpte_group;
191 	unsigned long hpte_v, hpte_r;
192 	int i;
193 
194 	if (!(vflags & HPTE_V_BOLTED)) {
195 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
196 			" rflags=%lx, vflags=%lx, psize=%d)\n",
197 			hpte_group, vpn, pa, rflags, vflags, psize);
198 	}
199 
200 	for (i = 0; i < HPTES_PER_GROUP; i++) {
201 		if (! (hptep->v & HPTE_V_VALID)) {
202 			/* retry with lock held */
203 			native_lock_hpte(hptep);
204 			if (! (hptep->v & HPTE_V_VALID))
205 				break;
206 			native_unlock_hpte(hptep);
207 		}
208 
209 		hptep++;
210 	}
211 
212 	if (i == HPTES_PER_GROUP)
213 		return -1;
214 
215 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
216 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
217 
218 	if (!(vflags & HPTE_V_BOLTED)) {
219 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
220 			i, hpte_v, hpte_r);
221 	}
222 
223 	hptep->r = hpte_r;
224 	/* Guarantee the second dword is visible before the valid bit */
225 	eieio();
226 	/*
227 	 * Now set the first dword including the valid bit
228 	 * NOTE: this also unlocks the hpte
229 	 */
230 	hptep->v = hpte_v;
231 
232 	__asm__ __volatile__ ("ptesync" : : : "memory");
233 
234 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
235 }
236 
native_hpte_remove(unsigned long hpte_group)237 static long native_hpte_remove(unsigned long hpte_group)
238 {
239 	struct hash_pte *hptep;
240 	int i;
241 	int slot_offset;
242 	unsigned long hpte_v;
243 
244 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
245 
246 	/* pick a random entry to start at */
247 	slot_offset = mftb() & 0x7;
248 
249 	for (i = 0; i < HPTES_PER_GROUP; i++) {
250 		hptep = htab_address + hpte_group + slot_offset;
251 		hpte_v = hptep->v;
252 
253 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
254 			/* retry with lock held */
255 			native_lock_hpte(hptep);
256 			hpte_v = hptep->v;
257 			if ((hpte_v & HPTE_V_VALID)
258 			    && !(hpte_v & HPTE_V_BOLTED))
259 				break;
260 			native_unlock_hpte(hptep);
261 		}
262 
263 		slot_offset++;
264 		slot_offset &= 0x7;
265 	}
266 
267 	if (i == HPTES_PER_GROUP)
268 		return -1;
269 
270 	/* Invalidate the hpte. NOTE: this also unlocks it */
271 	hptep->v = 0;
272 
273 	return i;
274 }
275 
__hpte_actual_psize(unsigned int lp,int psize)276 static inline int __hpte_actual_psize(unsigned int lp, int psize)
277 {
278 	int i, shift;
279 	unsigned int mask;
280 
281 	/* start from 1 ignoring MMU_PAGE_4K */
282 	for (i = 1; i < MMU_PAGE_COUNT; i++) {
283 
284 		/* invalid penc */
285 		if (mmu_psize_defs[psize].penc[i] == -1)
286 			continue;
287 		/*
288 		 * encoding bits per actual page size
289 		 *        PTE LP     actual page size
290 		 *    rrrr rrrz		>=8KB
291 		 *    rrrr rrzz		>=16KB
292 		 *    rrrr rzzz		>=32KB
293 		 *    rrrr zzzz		>=64KB
294 		 * .......
295 		 */
296 		shift = mmu_psize_defs[i].shift - LP_SHIFT;
297 		if (shift > LP_BITS)
298 			shift = LP_BITS;
299 		mask = (1 << shift) - 1;
300 		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
301 			return i;
302 	}
303 	return -1;
304 }
305 
hpte_actual_psize(struct hash_pte * hptep,int psize)306 static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
307 {
308 	/* Look at the 8 bit LP value */
309 	unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
310 
311 	if (!(hptep->v & HPTE_V_VALID))
312 		return -1;
313 
314 	/* First check if it is large page */
315 	if (!(hptep->v & HPTE_V_LARGE))
316 		return MMU_PAGE_4K;
317 
318 	return __hpte_actual_psize(lp, psize);
319 }
320 
native_hpte_updatepp(unsigned long slot,unsigned long newpp,unsigned long vpn,int psize,int ssize,int local)321 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
322 				 unsigned long vpn, int psize, int ssize,
323 				 int local)
324 {
325 	struct hash_pte *hptep = htab_address + slot;
326 	unsigned long hpte_v, want_v;
327 	int ret = 0;
328 	int actual_psize;
329 
330 	want_v = hpte_encode_avpn(vpn, psize, ssize);
331 
332 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
333 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
334 
335 	native_lock_hpte(hptep);
336 
337 	hpte_v = hptep->v;
338 	actual_psize = hpte_actual_psize(hptep, psize);
339 	/*
340 	 * We need to invalidate the TLB always because hpte_remove doesn't do
341 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
342 	 * random entry from it. When we do that we don't invalidate the TLB
343 	 * (hpte_remove) because we assume the old translation is still
344 	 * technically "valid".
345 	 */
346 	if (actual_psize < 0) {
347 		actual_psize = psize;
348 		ret = -1;
349 		goto err_out;
350 	}
351 	if (!HPTE_V_COMPARE(hpte_v, want_v)) {
352 		DBG_LOW(" -> miss\n");
353 		ret = -1;
354 	} else {
355 		DBG_LOW(" -> hit\n");
356 		/* Update the HPTE */
357 		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
358 			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
359 	}
360 err_out:
361 	native_unlock_hpte(hptep);
362 
363 	/* Ensure it is out of the tlb too. */
364 	tlbie(vpn, psize, actual_psize, ssize, local);
365 
366 	return ret;
367 }
368 
native_hpte_find(unsigned long vpn,int psize,int ssize)369 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
370 {
371 	struct hash_pte *hptep;
372 	unsigned long hash;
373 	unsigned long i;
374 	long slot;
375 	unsigned long want_v, hpte_v;
376 
377 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
378 	want_v = hpte_encode_avpn(vpn, psize, ssize);
379 
380 	/* Bolted mappings are only ever in the primary group */
381 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
382 	for (i = 0; i < HPTES_PER_GROUP; i++) {
383 		hptep = htab_address + slot;
384 		hpte_v = hptep->v;
385 
386 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
387 			/* HPTE matches */
388 			return slot;
389 		++slot;
390 	}
391 
392 	return -1;
393 }
394 
395 /*
396  * Update the page protection bits. Intended to be used to create
397  * guard pages for kernel data structures on pages which are bolted
398  * in the HPT. Assumes pages being operated on will not be stolen.
399  *
400  * No need to lock here because we should be the only user.
401  */
native_hpte_updateboltedpp(unsigned long newpp,unsigned long ea,int psize,int ssize)402 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
403 				       int psize, int ssize)
404 {
405 	int actual_psize;
406 	unsigned long vpn;
407 	unsigned long vsid;
408 	long slot;
409 	struct hash_pte *hptep;
410 
411 	vsid = get_kernel_vsid(ea, ssize);
412 	vpn = hpt_vpn(ea, vsid, ssize);
413 
414 	slot = native_hpte_find(vpn, psize, ssize);
415 	if (slot == -1)
416 		panic("could not find page to bolt\n");
417 	hptep = htab_address + slot;
418 	actual_psize = hpte_actual_psize(hptep, psize);
419 	if (actual_psize < 0)
420 		actual_psize = psize;
421 
422 	/* Update the HPTE */
423 	hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
424 		(newpp & (HPTE_R_PP | HPTE_R_N));
425 
426 	/* Ensure it is out of the tlb too. */
427 	tlbie(vpn, psize, actual_psize, ssize, 0);
428 }
429 
native_hpte_invalidate(unsigned long slot,unsigned long vpn,int psize,int ssize,int local)430 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
431 				   int psize, int ssize, int local)
432 {
433 	struct hash_pte *hptep = htab_address + slot;
434 	unsigned long hpte_v;
435 	unsigned long want_v;
436 	unsigned long flags;
437 	int actual_psize;
438 
439 	local_irq_save(flags);
440 
441 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
442 
443 	want_v = hpte_encode_avpn(vpn, psize, ssize);
444 	native_lock_hpte(hptep);
445 	hpte_v = hptep->v;
446 
447 	actual_psize = hpte_actual_psize(hptep, psize);
448 	/*
449 	 * We need to invalidate the TLB always because hpte_remove doesn't do
450 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
451 	 * random entry from it. When we do that we don't invalidate the TLB
452 	 * (hpte_remove) because we assume the old translation is still
453 	 * technically "valid".
454 	 */
455 	if (actual_psize < 0) {
456 		actual_psize = psize;
457 		native_unlock_hpte(hptep);
458 		goto err_out;
459 	}
460 	if (!HPTE_V_COMPARE(hpte_v, want_v))
461 		native_unlock_hpte(hptep);
462 	else
463 		/* Invalidate the hpte. NOTE: this also unlocks it */
464 		hptep->v = 0;
465 
466 err_out:
467 	/* Invalidate the TLB */
468 	tlbie(vpn, psize, actual_psize, ssize, local);
469 	local_irq_restore(flags);
470 }
471 
hpte_decode(struct hash_pte * hpte,unsigned long slot,int * psize,int * apsize,int * ssize,unsigned long * vpn)472 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
473 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
474 {
475 	unsigned long avpn, pteg, vpi;
476 	unsigned long hpte_v = hpte->v;
477 	unsigned long vsid, seg_off;
478 	int size, a_size, shift;
479 	/* Look at the 8 bit LP value */
480 	unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
481 
482 	if (!(hpte_v & HPTE_V_LARGE)) {
483 		size   = MMU_PAGE_4K;
484 		a_size = MMU_PAGE_4K;
485 	} else {
486 		for (size = 0; size < MMU_PAGE_COUNT; size++) {
487 
488 			/* valid entries have a shift value */
489 			if (!mmu_psize_defs[size].shift)
490 				continue;
491 
492 			a_size = __hpte_actual_psize(lp, size);
493 			if (a_size != -1)
494 				break;
495 		}
496 	}
497 	/* This works for all page sizes, and for 256M and 1T segments */
498 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
499 	shift = mmu_psize_defs[size].shift;
500 
501 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
502 	pteg = slot / HPTES_PER_GROUP;
503 	if (hpte_v & HPTE_V_SECONDARY)
504 		pteg = ~pteg;
505 
506 	switch (*ssize) {
507 	case MMU_SEGSIZE_256M:
508 		/* We only have 28 - 23 bits of seg_off in avpn */
509 		seg_off = (avpn & 0x1f) << 23;
510 		vsid    =  avpn >> 5;
511 		/* We can find more bits from the pteg value */
512 		if (shift < 23) {
513 			vpi = (vsid ^ pteg) & htab_hash_mask;
514 			seg_off |= vpi << shift;
515 		}
516 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
517 	case MMU_SEGSIZE_1T:
518 		/* We only have 40 - 23 bits of seg_off in avpn */
519 		seg_off = (avpn & 0x1ffff) << 23;
520 		vsid    = avpn >> 17;
521 		if (shift < 23) {
522 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
523 			seg_off |= vpi << shift;
524 		}
525 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
526 	default:
527 		*vpn = size = 0;
528 	}
529 	*psize  = size;
530 	*apsize = a_size;
531 }
532 
533 /*
534  * clear all mappings on kexec.  All cpus are in real mode (or they will
535  * be when they isi), and we are the only one left.  We rely on our kernel
536  * mapping being 0xC0's and the hardware ignoring those two real bits.
537  *
538  * TODO: add batching support when enabled.  remember, no dynamic memory here,
539  * athough there is the control page available...
540  */
native_hpte_clear(void)541 static void native_hpte_clear(void)
542 {
543 	unsigned long vpn = 0;
544 	unsigned long slot, slots, flags;
545 	struct hash_pte *hptep = htab_address;
546 	unsigned long hpte_v;
547 	unsigned long pteg_count;
548 	int psize, apsize, ssize;
549 
550 	pteg_count = htab_hash_mask + 1;
551 
552 	local_irq_save(flags);
553 
554 	/* we take the tlbie lock and hold it.  Some hardware will
555 	 * deadlock if we try to tlbie from two processors at once.
556 	 */
557 	raw_spin_lock(&native_tlbie_lock);
558 
559 	slots = pteg_count * HPTES_PER_GROUP;
560 
561 	for (slot = 0; slot < slots; slot++, hptep++) {
562 		/*
563 		 * we could lock the pte here, but we are the only cpu
564 		 * running,  right?  and for crash dump, we probably
565 		 * don't want to wait for a maybe bad cpu.
566 		 */
567 		hpte_v = hptep->v;
568 
569 		/*
570 		 * Call __tlbie() here rather than tlbie() since we
571 		 * already hold the native_tlbie_lock.
572 		 */
573 		if (hpte_v & HPTE_V_VALID) {
574 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
575 			hptep->v = 0;
576 			__tlbie(vpn, psize, apsize, ssize);
577 		}
578 	}
579 
580 	asm volatile("eieio; tlbsync; ptesync":::"memory");
581 	raw_spin_unlock(&native_tlbie_lock);
582 	local_irq_restore(flags);
583 }
584 
585 /*
586  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
587  * the lock all the time
588  */
native_flush_hash_range(unsigned long number,int local)589 static void native_flush_hash_range(unsigned long number, int local)
590 {
591 	unsigned long vpn;
592 	unsigned long hash, index, hidx, shift, slot;
593 	struct hash_pte *hptep;
594 	unsigned long hpte_v;
595 	unsigned long want_v;
596 	unsigned long flags;
597 	real_pte_t pte;
598 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
599 	unsigned long psize = batch->psize;
600 	int ssize = batch->ssize;
601 	int i;
602 
603 	local_irq_save(flags);
604 
605 	for (i = 0; i < number; i++) {
606 		vpn = batch->vpn[i];
607 		pte = batch->pte[i];
608 
609 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
610 			hash = hpt_hash(vpn, shift, ssize);
611 			hidx = __rpte_to_hidx(pte, index);
612 			if (hidx & _PTEIDX_SECONDARY)
613 				hash = ~hash;
614 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
615 			slot += hidx & _PTEIDX_GROUP_IX;
616 			hptep = htab_address + slot;
617 			want_v = hpte_encode_avpn(vpn, psize, ssize);
618 			native_lock_hpte(hptep);
619 			hpte_v = hptep->v;
620 			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
621 			    !(hpte_v & HPTE_V_VALID))
622 				native_unlock_hpte(hptep);
623 			else
624 				hptep->v = 0;
625 		} pte_iterate_hashed_end();
626 	}
627 
628 	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
629 	    mmu_psize_defs[psize].tlbiel && local) {
630 		asm volatile("ptesync":::"memory");
631 		for (i = 0; i < number; i++) {
632 			vpn = batch->vpn[i];
633 			pte = batch->pte[i];
634 
635 			pte_iterate_hashed_subpages(pte, psize,
636 						    vpn, index, shift) {
637 				__tlbiel(vpn, psize, psize, ssize);
638 			} pte_iterate_hashed_end();
639 		}
640 		asm volatile("ptesync":::"memory");
641 	} else {
642 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
643 
644 		if (lock_tlbie)
645 			raw_spin_lock(&native_tlbie_lock);
646 
647 		asm volatile("ptesync":::"memory");
648 		for (i = 0; i < number; i++) {
649 			vpn = batch->vpn[i];
650 			pte = batch->pte[i];
651 
652 			pte_iterate_hashed_subpages(pte, psize,
653 						    vpn, index, shift) {
654 				__tlbie(vpn, psize, psize, ssize);
655 			} pte_iterate_hashed_end();
656 		}
657 		asm volatile("eieio; tlbsync; ptesync":::"memory");
658 
659 		if (lock_tlbie)
660 			raw_spin_unlock(&native_tlbie_lock);
661 	}
662 
663 	local_irq_restore(flags);
664 }
665 
hpte_init_native(void)666 void __init hpte_init_native(void)
667 {
668 	ppc_md.hpte_invalidate	= native_hpte_invalidate;
669 	ppc_md.hpte_updatepp	= native_hpte_updatepp;
670 	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
671 	ppc_md.hpte_insert	= native_hpte_insert;
672 	ppc_md.hpte_remove	= native_hpte_remove;
673 	ppc_md.hpte_clear_all	= native_hpte_clear;
674 	ppc_md.flush_hash_range = native_flush_hash_range;
675 }
676