1 /*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 /* Enables debugging of low-level hash table routines - careful! */
23 #undef DEBUG
24
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <linux/jump_label.h>
30 #include <asm/processor.h>
31 #include <asm/mmu.h>
32 #include <asm/page.h>
33 #include <asm/pgtable.h>
34 #include <asm/machdep.h>
35 #include <asm/mmu_context.h>
36 #include <asm/iommu.h>
37 #include <asm/tlbflush.h>
38 #include <asm/tlb.h>
39 #include <asm/prom.h>
40 #include <asm/cputable.h>
41 #include <asm/udbg.h>
42 #include <asm/smp.h>
43 #include <asm/trace.h>
44 #include <asm/firmware.h>
45 #include <asm/plpar_wrappers.h>
46 #include <asm/kexec.h>
47 #include <asm/fadump.h>
48 #include <asm/asm-prototypes.h>
49
50 #include "pseries.h"
51
52 /* Flag bits for H_BULK_REMOVE */
53 #define HBR_REQUEST 0x4000000000000000UL
54 #define HBR_RESPONSE 0x8000000000000000UL
55 #define HBR_END 0xc000000000000000UL
56 #define HBR_AVPN 0x0200000000000000UL
57 #define HBR_ANDCOND 0x0100000000000000UL
58
59
60 /* in hvCall.S */
61 EXPORT_SYMBOL(plpar_hcall);
62 EXPORT_SYMBOL(plpar_hcall9);
63 EXPORT_SYMBOL(plpar_hcall_norets);
64
vpa_init(int cpu)65 void vpa_init(int cpu)
66 {
67 int hwcpu = get_hard_smp_processor_id(cpu);
68 unsigned long addr;
69 long ret;
70 struct paca_struct *pp;
71 struct dtl_entry *dtl;
72
73 /*
74 * The spec says it "may be problematic" if CPU x registers the VPA of
75 * CPU y. We should never do that, but wail if we ever do.
76 */
77 WARN_ON(cpu != smp_processor_id());
78
79 if (cpu_has_feature(CPU_FTR_ALTIVEC))
80 lppaca_of(cpu).vmxregs_in_use = 1;
81
82 if (cpu_has_feature(CPU_FTR_ARCH_207S))
83 lppaca_of(cpu).ebb_regs_in_use = 1;
84
85 addr = __pa(&lppaca_of(cpu));
86 ret = register_vpa(hwcpu, addr);
87
88 if (ret) {
89 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
90 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
91 return;
92 }
93
94 #ifdef CONFIG_PPC_STD_MMU_64
95 /*
96 * PAPR says this feature is SLB-Buffer but firmware never
97 * reports that. All SPLPAR support SLB shadow buffer.
98 */
99 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
100 addr = __pa(paca[cpu].slb_shadow_ptr);
101 ret = register_slb_shadow(hwcpu, addr);
102 if (ret)
103 pr_err("WARNING: SLB shadow buffer registration for "
104 "cpu %d (hw %d) of area %lx failed with %ld\n",
105 cpu, hwcpu, addr, ret);
106 }
107 #endif /* CONFIG_PPC_STD_MMU_64 */
108
109 /*
110 * Register dispatch trace log, if one has been allocated.
111 */
112 pp = &paca[cpu];
113 dtl = pp->dispatch_log;
114 if (dtl) {
115 pp->dtl_ridx = 0;
116 pp->dtl_curr = dtl;
117 lppaca_of(cpu).dtl_idx = 0;
118
119 /* hypervisor reads buffer length from this field */
120 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
121 ret = register_dtl(hwcpu, __pa(dtl));
122 if (ret)
123 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
124 "failed with %ld\n", smp_processor_id(),
125 hwcpu, ret);
126 lppaca_of(cpu).dtl_enable_mask = 2;
127 }
128 }
129
130 #ifdef CONFIG_PPC_STD_MMU_64
131
pSeries_lpar_hpte_insert(unsigned long hpte_group,unsigned long vpn,unsigned long pa,unsigned long rflags,unsigned long vflags,int psize,int apsize,int ssize)132 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
133 unsigned long vpn, unsigned long pa,
134 unsigned long rflags, unsigned long vflags,
135 int psize, int apsize, int ssize)
136 {
137 unsigned long lpar_rc;
138 unsigned long flags;
139 unsigned long slot;
140 unsigned long hpte_v, hpte_r;
141
142 if (!(vflags & HPTE_V_BOLTED))
143 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
144 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
145 hpte_group, vpn, pa, rflags, vflags, psize);
146
147 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
148 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
149
150 if (!(vflags & HPTE_V_BOLTED))
151 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
152
153 /* Now fill in the actual HPTE */
154 /* Set CEC cookie to 0 */
155 /* Zero page = 0 */
156 /* I-cache Invalidate = 0 */
157 /* I-cache synchronize = 0 */
158 /* Exact = 0 */
159 flags = 0;
160
161 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
162 flags |= H_COALESCE_CAND;
163
164 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
165 if (unlikely(lpar_rc == H_PTEG_FULL)) {
166 if (!(vflags & HPTE_V_BOLTED))
167 pr_devel(" full\n");
168 return -1;
169 }
170
171 /*
172 * Since we try and ioremap PHBs we don't own, the pte insert
173 * will fail. However we must catch the failure in hash_page
174 * or we will loop forever, so return -2 in this case.
175 */
176 if (unlikely(lpar_rc != H_SUCCESS)) {
177 if (!(vflags & HPTE_V_BOLTED))
178 pr_devel(" lpar err %ld\n", lpar_rc);
179 return -2;
180 }
181 if (!(vflags & HPTE_V_BOLTED))
182 pr_devel(" -> slot: %lu\n", slot & 7);
183
184 /* Because of iSeries, we have to pass down the secondary
185 * bucket bit here as well
186 */
187 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
188 }
189
190 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
191
pSeries_lpar_hpte_remove(unsigned long hpte_group)192 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
193 {
194 unsigned long slot_offset;
195 unsigned long lpar_rc;
196 int i;
197 unsigned long dummy1, dummy2;
198
199 /* pick a random slot to start at */
200 slot_offset = mftb() & 0x7;
201
202 for (i = 0; i < HPTES_PER_GROUP; i++) {
203
204 /* don't remove a bolted entry */
205 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
206 (0x1UL << 4), &dummy1, &dummy2);
207 if (lpar_rc == H_SUCCESS)
208 return i;
209
210 /*
211 * The test for adjunct partition is performed before the
212 * ANDCOND test. H_RESOURCE may be returned, so we need to
213 * check for that as well.
214 */
215 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
216
217 slot_offset++;
218 slot_offset &= 0x7;
219 }
220
221 return -1;
222 }
223
pSeries_lpar_hptab_clear(void)224 static void pSeries_lpar_hptab_clear(void)
225 {
226 unsigned long size_bytes = 1UL << ppc64_pft_size;
227 unsigned long hpte_count = size_bytes >> 4;
228 struct {
229 unsigned long pteh;
230 unsigned long ptel;
231 } ptes[4];
232 long lpar_rc;
233 unsigned long i, j;
234
235 /* Read in batches of 4,
236 * invalidate only valid entries not in the VRMA
237 * hpte_count will be a multiple of 4
238 */
239 for (i = 0; i < hpte_count; i += 4) {
240 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
241 if (lpar_rc != H_SUCCESS)
242 continue;
243 for (j = 0; j < 4; j++){
244 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
245 HPTE_V_VRMA_MASK)
246 continue;
247 if (ptes[j].pteh & HPTE_V_VALID)
248 plpar_pte_remove_raw(0, i + j, 0,
249 &(ptes[j].pteh), &(ptes[j].ptel));
250 }
251 }
252
253 #ifdef __LITTLE_ENDIAN__
254 /*
255 * Reset exceptions to big endian.
256 *
257 * FIXME this is a hack for kexec, we need to reset the exception
258 * endian before starting the new kernel and this is a convenient place
259 * to do it.
260 *
261 * This is also called on boot when a fadump happens. In that case we
262 * must not change the exception endian mode.
263 */
264 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
265 pseries_big_endian_exceptions();
266 #endif
267 }
268
269 /*
270 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
271 * the low 3 bits of flags happen to line up. So no transform is needed.
272 * We can probably optimize here and assume the high bits of newpp are
273 * already zero. For now I am paranoid.
274 */
pSeries_lpar_hpte_updatepp(unsigned long slot,unsigned long newpp,unsigned long vpn,int psize,int apsize,int ssize,unsigned long inv_flags)275 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
276 unsigned long newpp,
277 unsigned long vpn,
278 int psize, int apsize,
279 int ssize, unsigned long inv_flags)
280 {
281 unsigned long lpar_rc;
282 unsigned long flags;
283 unsigned long want_v;
284
285 want_v = hpte_encode_avpn(vpn, psize, ssize);
286
287 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
288 want_v, slot, flags, psize);
289
290 flags = (newpp & 7) | H_AVPN;
291 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
292 /* Move pp0 into bit 8 (IBM 55) */
293 flags |= (newpp & HPTE_R_PP0) >> 55;
294
295 lpar_rc = plpar_pte_protect(flags, slot, want_v);
296
297 if (lpar_rc == H_NOT_FOUND) {
298 pr_devel("not found !\n");
299 return -1;
300 }
301
302 pr_devel("ok\n");
303
304 BUG_ON(lpar_rc != H_SUCCESS);
305
306 return 0;
307 }
308
__pSeries_lpar_hpte_find(unsigned long want_v,unsigned long hpte_group)309 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
310 {
311 long lpar_rc;
312 unsigned long i, j;
313 struct {
314 unsigned long pteh;
315 unsigned long ptel;
316 } ptes[4];
317
318 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
319
320 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
321 if (lpar_rc != H_SUCCESS)
322 continue;
323
324 for (j = 0; j < 4; j++) {
325 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
326 (ptes[j].pteh & HPTE_V_VALID))
327 return i + j;
328 }
329 }
330
331 return -1;
332 }
333
pSeries_lpar_hpte_find(unsigned long vpn,int psize,int ssize)334 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
335 {
336 long slot;
337 unsigned long hash;
338 unsigned long want_v;
339 unsigned long hpte_group;
340
341 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
342 want_v = hpte_encode_avpn(vpn, psize, ssize);
343
344 /* Bolted entries are always in the primary group */
345 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
346 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
347 if (slot < 0)
348 return -1;
349 return hpte_group + slot;
350 }
351
pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,unsigned long ea,int psize,int ssize)352 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
353 unsigned long ea,
354 int psize, int ssize)
355 {
356 unsigned long vpn;
357 unsigned long lpar_rc, slot, vsid, flags;
358
359 vsid = get_kernel_vsid(ea, ssize);
360 vpn = hpt_vpn(ea, vsid, ssize);
361
362 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
363 BUG_ON(slot == -1);
364
365 flags = newpp & 7;
366 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
367 /* Move pp0 into bit 8 (IBM 55) */
368 flags |= (newpp & HPTE_R_PP0) >> 55;
369
370 lpar_rc = plpar_pte_protect(flags, slot, 0);
371
372 BUG_ON(lpar_rc != H_SUCCESS);
373 }
374
pSeries_lpar_hpte_invalidate(unsigned long slot,unsigned long vpn,int psize,int apsize,int ssize,int local)375 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
376 int psize, int apsize,
377 int ssize, int local)
378 {
379 unsigned long want_v;
380 unsigned long lpar_rc;
381 unsigned long dummy1, dummy2;
382
383 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
384 slot, vpn, psize, local);
385
386 want_v = hpte_encode_avpn(vpn, psize, ssize);
387 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
388 if (lpar_rc == H_NOT_FOUND)
389 return;
390
391 BUG_ON(lpar_rc != H_SUCCESS);
392 }
393
394 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
395 /*
396 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
397 * to make sure that we avoid bouncing the hypervisor tlbie lock.
398 */
399 #define PPC64_HUGE_HPTE_BATCH 12
400
__pSeries_lpar_hugepage_invalidate(unsigned long * slot,unsigned long * vpn,int count,int psize,int ssize)401 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
402 unsigned long *vpn, int count,
403 int psize, int ssize)
404 {
405 unsigned long param[PLPAR_HCALL9_BUFSIZE];
406 int i = 0, pix = 0, rc;
407 unsigned long flags = 0;
408 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
409
410 if (lock_tlbie)
411 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
412
413 for (i = 0; i < count; i++) {
414
415 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
416 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
417 ssize, 0);
418 } else {
419 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
420 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
421 pix += 2;
422 if (pix == 8) {
423 rc = plpar_hcall9(H_BULK_REMOVE, param,
424 param[0], param[1], param[2],
425 param[3], param[4], param[5],
426 param[6], param[7]);
427 BUG_ON(rc != H_SUCCESS);
428 pix = 0;
429 }
430 }
431 }
432 if (pix) {
433 param[pix] = HBR_END;
434 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
435 param[2], param[3], param[4], param[5],
436 param[6], param[7]);
437 BUG_ON(rc != H_SUCCESS);
438 }
439
440 if (lock_tlbie)
441 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
442 }
443
pSeries_lpar_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)444 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
445 unsigned long addr,
446 unsigned char *hpte_slot_array,
447 int psize, int ssize, int local)
448 {
449 int i, index = 0;
450 unsigned long s_addr = addr;
451 unsigned int max_hpte_count, valid;
452 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
453 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
454 unsigned long shift, hidx, vpn = 0, hash, slot;
455
456 shift = mmu_psize_defs[psize].shift;
457 max_hpte_count = 1U << (PMD_SHIFT - shift);
458
459 for (i = 0; i < max_hpte_count; i++) {
460 valid = hpte_valid(hpte_slot_array, i);
461 if (!valid)
462 continue;
463 hidx = hpte_hash_index(hpte_slot_array, i);
464
465 /* get the vpn */
466 addr = s_addr + (i * (1ul << shift));
467 vpn = hpt_vpn(addr, vsid, ssize);
468 hash = hpt_hash(vpn, shift, ssize);
469 if (hidx & _PTEIDX_SECONDARY)
470 hash = ~hash;
471
472 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
473 slot += hidx & _PTEIDX_GROUP_IX;
474
475 slot_array[index] = slot;
476 vpn_array[index] = vpn;
477 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
478 /*
479 * Now do a bluk invalidate
480 */
481 __pSeries_lpar_hugepage_invalidate(slot_array,
482 vpn_array,
483 PPC64_HUGE_HPTE_BATCH,
484 psize, ssize);
485 index = 0;
486 } else
487 index++;
488 }
489 if (index)
490 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
491 index, psize, ssize);
492 }
493 #else
pSeries_lpar_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)494 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
495 unsigned long addr,
496 unsigned char *hpte_slot_array,
497 int psize, int ssize, int local)
498 {
499 WARN(1, "%s called without THP support\n", __func__);
500 }
501 #endif
502
pSeries_lpar_hpte_removebolted(unsigned long ea,int psize,int ssize)503 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
504 int psize, int ssize)
505 {
506 unsigned long vpn;
507 unsigned long slot, vsid;
508
509 vsid = get_kernel_vsid(ea, ssize);
510 vpn = hpt_vpn(ea, vsid, ssize);
511
512 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
513 if (slot == -1)
514 return -ENOENT;
515
516 /*
517 * lpar doesn't use the passed actual page size
518 */
519 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
520 return 0;
521 }
522
523 /*
524 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
525 * lock.
526 */
pSeries_lpar_flush_hash_range(unsigned long number,int local)527 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
528 {
529 unsigned long vpn;
530 unsigned long i, pix, rc;
531 unsigned long flags = 0;
532 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
533 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
534 unsigned long param[PLPAR_HCALL9_BUFSIZE];
535 unsigned long hash, index, shift, hidx, slot;
536 real_pte_t pte;
537 int psize, ssize;
538
539 if (lock_tlbie)
540 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
541
542 psize = batch->psize;
543 ssize = batch->ssize;
544 pix = 0;
545 for (i = 0; i < number; i++) {
546 vpn = batch->vpn[i];
547 pte = batch->pte[i];
548 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
549 hash = hpt_hash(vpn, shift, ssize);
550 hidx = __rpte_to_hidx(pte, index);
551 if (hidx & _PTEIDX_SECONDARY)
552 hash = ~hash;
553 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
554 slot += hidx & _PTEIDX_GROUP_IX;
555 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
556 /*
557 * lpar doesn't use the passed actual page size
558 */
559 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
560 0, ssize, local);
561 } else {
562 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
563 param[pix+1] = hpte_encode_avpn(vpn, psize,
564 ssize);
565 pix += 2;
566 if (pix == 8) {
567 rc = plpar_hcall9(H_BULK_REMOVE, param,
568 param[0], param[1], param[2],
569 param[3], param[4], param[5],
570 param[6], param[7]);
571 BUG_ON(rc != H_SUCCESS);
572 pix = 0;
573 }
574 }
575 } pte_iterate_hashed_end();
576 }
577 if (pix) {
578 param[pix] = HBR_END;
579 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
580 param[2], param[3], param[4], param[5],
581 param[6], param[7]);
582 BUG_ON(rc != H_SUCCESS);
583 }
584
585 if (lock_tlbie)
586 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
587 }
588
disable_bulk_remove(char * str)589 static int __init disable_bulk_remove(char *str)
590 {
591 if (strcmp(str, "off") == 0 &&
592 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
593 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
594 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
595 }
596 return 1;
597 }
598
599 __setup("bulk_remove=", disable_bulk_remove);
600
hpte_init_pseries(void)601 void __init hpte_init_pseries(void)
602 {
603 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
604 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
605 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
606 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
607 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
608 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
609 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
610 mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
611 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
612 }
613
614 #ifdef CONFIG_PPC_SMLPAR
615 #define CMO_FREE_HINT_DEFAULT 1
616 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
617
cmo_free_hint(char * str)618 static int __init cmo_free_hint(char *str)
619 {
620 char *parm;
621 parm = strstrip(str);
622
623 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
624 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
625 cmo_free_hint_flag = 0;
626 return 1;
627 }
628
629 cmo_free_hint_flag = 1;
630 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
631
632 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
633 return 1;
634
635 return 0;
636 }
637
638 __setup("cmo_free_hint=", cmo_free_hint);
639
pSeries_set_page_state(struct page * page,int order,unsigned long state)640 static void pSeries_set_page_state(struct page *page, int order,
641 unsigned long state)
642 {
643 int i, j;
644 unsigned long cmo_page_sz, addr;
645
646 cmo_page_sz = cmo_get_page_size();
647 addr = __pa((unsigned long)page_address(page));
648
649 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
650 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
651 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
652 }
653 }
654
arch_free_page(struct page * page,int order)655 void arch_free_page(struct page *page, int order)
656 {
657 if (radix_enabled())
658 return;
659 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
660 return;
661
662 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
663 }
664 EXPORT_SYMBOL(arch_free_page);
665
666 #endif /* CONFIG_PPC_SMLPAR */
667 #endif /* CONFIG_PPC_STD_MMU_64 */
668
669 #ifdef CONFIG_TRACEPOINTS
670 #ifdef HAVE_JUMP_LABEL
671 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
672
hcall_tracepoint_regfunc(void)673 void hcall_tracepoint_regfunc(void)
674 {
675 static_key_slow_inc(&hcall_tracepoint_key);
676 }
677
hcall_tracepoint_unregfunc(void)678 void hcall_tracepoint_unregfunc(void)
679 {
680 static_key_slow_dec(&hcall_tracepoint_key);
681 }
682 #else
683 /*
684 * We optimise our hcall path by placing hcall_tracepoint_refcount
685 * directly in the TOC so we can check if the hcall tracepoints are
686 * enabled via a single load.
687 */
688
689 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
690 extern long hcall_tracepoint_refcount;
691
hcall_tracepoint_regfunc(void)692 void hcall_tracepoint_regfunc(void)
693 {
694 hcall_tracepoint_refcount++;
695 }
696
hcall_tracepoint_unregfunc(void)697 void hcall_tracepoint_unregfunc(void)
698 {
699 hcall_tracepoint_refcount--;
700 }
701 #endif
702
703 /*
704 * Since the tracing code might execute hcalls we need to guard against
705 * recursion. One example of this are spinlocks calling H_YIELD on
706 * shared processor partitions.
707 */
708 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
709
710
__trace_hcall_entry(unsigned long opcode,unsigned long * args)711 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
712 {
713 unsigned long flags;
714 unsigned int *depth;
715
716 /*
717 * We cannot call tracepoints inside RCU idle regions which
718 * means we must not trace H_CEDE.
719 */
720 if (opcode == H_CEDE)
721 return;
722
723 local_irq_save(flags);
724
725 depth = this_cpu_ptr(&hcall_trace_depth);
726
727 if (*depth)
728 goto out;
729
730 (*depth)++;
731 preempt_disable();
732 trace_hcall_entry(opcode, args);
733 (*depth)--;
734
735 out:
736 local_irq_restore(flags);
737 }
738
__trace_hcall_exit(long opcode,unsigned long retval,unsigned long * retbuf)739 void __trace_hcall_exit(long opcode, unsigned long retval,
740 unsigned long *retbuf)
741 {
742 unsigned long flags;
743 unsigned int *depth;
744
745 if (opcode == H_CEDE)
746 return;
747
748 local_irq_save(flags);
749
750 depth = this_cpu_ptr(&hcall_trace_depth);
751
752 if (*depth)
753 goto out;
754
755 (*depth)++;
756 trace_hcall_exit(opcode, retval, retbuf);
757 preempt_enable();
758 (*depth)--;
759
760 out:
761 local_irq_restore(flags);
762 }
763 #endif
764
765 /**
766 * h_get_mpp
767 * H_GET_MPP hcall returns info in 7 parms
768 */
h_get_mpp(struct hvcall_mpp_data * mpp_data)769 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
770 {
771 int rc;
772 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
773
774 rc = plpar_hcall9(H_GET_MPP, retbuf);
775
776 mpp_data->entitled_mem = retbuf[0];
777 mpp_data->mapped_mem = retbuf[1];
778
779 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
780 mpp_data->pool_num = retbuf[2] & 0xffff;
781
782 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
783 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
784 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
785
786 mpp_data->pool_size = retbuf[4];
787 mpp_data->loan_request = retbuf[5];
788 mpp_data->backing_mem = retbuf[6];
789
790 return rc;
791 }
792 EXPORT_SYMBOL(h_get_mpp);
793
h_get_mpp_x(struct hvcall_mpp_x_data * mpp_x_data)794 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
795 {
796 int rc;
797 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
798
799 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
800
801 mpp_x_data->coalesced_bytes = retbuf[0];
802 mpp_x_data->pool_coalesced_bytes = retbuf[1];
803 mpp_x_data->pool_purr_cycles = retbuf[2];
804 mpp_x_data->pool_spurr_cycles = retbuf[3];
805
806 return rc;
807 }
808