1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31
32 int split_tlb __ro_after_init;
33 int dcache_stride __ro_after_init;
34 int icache_stride __ro_after_init;
35 EXPORT_SYMBOL(dcache_stride);
36
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41
42
43 /* On some machines (i.e., ones with the Merced bus), there can be
44 * only a single PxTLB broadcast at a time; this must be guaranteed
45 * by software. We need a spinlock around all TLB flushes to ensure
46 * this.
47 */
48 DEFINE_SPINLOCK(pa_tlb_flush_lock);
49
50 /* Swapper page setup lock. */
51 DEFINE_SPINLOCK(pa_swapper_pg_lock);
52
53 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
54 int pa_serialize_tlb_flushes __ro_after_init;
55 #endif
56
57 struct pdc_cache_info cache_info __ro_after_init;
58 #ifndef CONFIG_PA20
59 static struct pdc_btlb_info btlb_info __ro_after_init;
60 #endif
61
62 #ifdef CONFIG_SMP
63 void
flush_data_cache(void)64 flush_data_cache(void)
65 {
66 on_each_cpu(flush_data_cache_local, NULL, 1);
67 }
68 void
flush_instruction_cache(void)69 flush_instruction_cache(void)
70 {
71 on_each_cpu(flush_instruction_cache_local, NULL, 1);
72 }
73 #endif
74
75 void
flush_cache_all_local(void)76 flush_cache_all_local(void)
77 {
78 flush_instruction_cache_local(NULL);
79 flush_data_cache_local(NULL);
80 }
81 EXPORT_SYMBOL(flush_cache_all_local);
82
83 /* Virtual address of pfn. */
84 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
85
86 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)87 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
88 {
89 unsigned long pfn = pte_pfn(*ptep);
90 struct page *page;
91
92 /* We don't have pte special. As a result, we can be called with
93 an invalid pfn and we don't need to flush the kernel dcache page.
94 This occurs with FireGL card in C8000. */
95 if (!pfn_valid(pfn))
96 return;
97
98 page = pfn_to_page(pfn);
99 if (page_mapping_file(page) &&
100 test_bit(PG_dcache_dirty, &page->flags)) {
101 flush_kernel_dcache_page_addr(pfn_va(pfn));
102 clear_bit(PG_dcache_dirty, &page->flags);
103 } else if (parisc_requires_coherency())
104 flush_kernel_dcache_page_addr(pfn_va(pfn));
105 }
106
107 void
show_cache_info(struct seq_file * m)108 show_cache_info(struct seq_file *m)
109 {
110 char buf[32];
111
112 seq_printf(m, "I-cache\t\t: %ld KB\n",
113 cache_info.ic_size/1024 );
114 if (cache_info.dc_loop != 1)
115 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
116 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
117 cache_info.dc_size/1024,
118 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
119 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
120 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
121 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
122 cache_info.it_size,
123 cache_info.dt_size,
124 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
125 );
126
127 #ifndef CONFIG_PA20
128 /* BTLB - Block TLB */
129 if (btlb_info.max_size==0) {
130 seq_printf(m, "BTLB\t\t: not supported\n" );
131 } else {
132 seq_printf(m,
133 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
134 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
135 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
136 btlb_info.max_size, (int)4096,
137 btlb_info.max_size>>8,
138 btlb_info.fixed_range_info.num_i,
139 btlb_info.fixed_range_info.num_d,
140 btlb_info.fixed_range_info.num_comb,
141 btlb_info.variable_range_info.num_i,
142 btlb_info.variable_range_info.num_d,
143 btlb_info.variable_range_info.num_comb
144 );
145 }
146 #endif
147 }
148
149 void __init
parisc_cache_init(void)150 parisc_cache_init(void)
151 {
152 if (pdc_cache_info(&cache_info) < 0)
153 panic("parisc_cache_init: pdc_cache_info failed");
154
155 #if 0
156 printk("ic_size %lx dc_size %lx it_size %lx\n",
157 cache_info.ic_size,
158 cache_info.dc_size,
159 cache_info.it_size);
160
161 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 cache_info.dc_base,
163 cache_info.dc_stride,
164 cache_info.dc_count,
165 cache_info.dc_loop);
166
167 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
168 *(unsigned long *) (&cache_info.dc_conf),
169 cache_info.dc_conf.cc_alias,
170 cache_info.dc_conf.cc_block,
171 cache_info.dc_conf.cc_line,
172 cache_info.dc_conf.cc_shift);
173 printk(" wt %d sh %d cst %d hv %d\n",
174 cache_info.dc_conf.cc_wt,
175 cache_info.dc_conf.cc_sh,
176 cache_info.dc_conf.cc_cst,
177 cache_info.dc_conf.cc_hv);
178
179 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180 cache_info.ic_base,
181 cache_info.ic_stride,
182 cache_info.ic_count,
183 cache_info.ic_loop);
184
185 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186 cache_info.it_sp_base,
187 cache_info.it_sp_stride,
188 cache_info.it_sp_count,
189 cache_info.it_loop,
190 cache_info.it_off_base,
191 cache_info.it_off_stride,
192 cache_info.it_off_count);
193
194 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
195 cache_info.dt_sp_base,
196 cache_info.dt_sp_stride,
197 cache_info.dt_sp_count,
198 cache_info.dt_loop,
199 cache_info.dt_off_base,
200 cache_info.dt_off_stride,
201 cache_info.dt_off_count);
202
203 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
204 *(unsigned long *) (&cache_info.ic_conf),
205 cache_info.ic_conf.cc_alias,
206 cache_info.ic_conf.cc_block,
207 cache_info.ic_conf.cc_line,
208 cache_info.ic_conf.cc_shift);
209 printk(" wt %d sh %d cst %d hv %d\n",
210 cache_info.ic_conf.cc_wt,
211 cache_info.ic_conf.cc_sh,
212 cache_info.ic_conf.cc_cst,
213 cache_info.ic_conf.cc_hv);
214
215 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
216 cache_info.dt_conf.tc_sh,
217 cache_info.dt_conf.tc_page,
218 cache_info.dt_conf.tc_cst,
219 cache_info.dt_conf.tc_aid,
220 cache_info.dt_conf.tc_sr);
221
222 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
223 cache_info.it_conf.tc_sh,
224 cache_info.it_conf.tc_page,
225 cache_info.it_conf.tc_cst,
226 cache_info.it_conf.tc_aid,
227 cache_info.it_conf.tc_sr);
228 #endif
229
230 split_tlb = 0;
231 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
232 if (cache_info.dt_conf.tc_sh == 2)
233 printk(KERN_WARNING "Unexpected TLB configuration. "
234 "Will flush I/D separately (could be optimized).\n");
235
236 split_tlb = 1;
237 }
238
239 /* "New and Improved" version from Jim Hull
240 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
241 * The following CAFL_STRIDE is an optimized version, see
242 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
243 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244 */
245 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
246 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
247 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
248 #undef CAFL_STRIDE
249
250 #ifndef CONFIG_PA20
251 if (pdc_btlb_info(&btlb_info) < 0) {
252 memset(&btlb_info, 0, sizeof btlb_info);
253 }
254 #endif
255
256 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
257 PDC_MODEL_NVA_UNSUPPORTED) {
258 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
259 #if 0
260 panic("SMP kernel required to avoid non-equivalent aliasing");
261 #endif
262 }
263 }
264
disable_sr_hashing(void)265 void __init disable_sr_hashing(void)
266 {
267 int srhash_type, retval;
268 unsigned long space_bits;
269
270 switch (boot_cpu_data.cpu_type) {
271 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
272 BUG();
273 return;
274
275 case pcxs:
276 case pcxt:
277 case pcxt_:
278 srhash_type = SRHASH_PCXST;
279 break;
280
281 case pcxl:
282 srhash_type = SRHASH_PCXL;
283 break;
284
285 case pcxl2: /* pcxl2 doesn't support space register hashing */
286 return;
287
288 default: /* Currently all PA2.0 machines use the same ins. sequence */
289 srhash_type = SRHASH_PA20;
290 break;
291 }
292
293 disable_sr_hashing_asm(srhash_type);
294
295 retval = pdc_spaceid_bits(&space_bits);
296 /* If this procedure isn't implemented, don't panic. */
297 if (retval < 0 && retval != PDC_BAD_OPTION)
298 panic("pdc_spaceid_bits call failed.\n");
299 if (space_bits != 0)
300 panic("SpaceID hashing is still on!\n");
301 }
302
303 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)304 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
305 unsigned long physaddr)
306 {
307 preempt_disable();
308 flush_dcache_page_asm(physaddr, vmaddr);
309 if (vma->vm_flags & VM_EXEC)
310 flush_icache_page_asm(physaddr, vmaddr);
311 preempt_enable();
312 }
313
314 static inline void
__purge_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)315 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316 unsigned long physaddr)
317 {
318 preempt_disable();
319 purge_dcache_page_asm(physaddr, vmaddr);
320 if (vma->vm_flags & VM_EXEC)
321 flush_icache_page_asm(physaddr, vmaddr);
322 preempt_enable();
323 }
324
flush_dcache_page(struct page * page)325 void flush_dcache_page(struct page *page)
326 {
327 struct address_space *mapping = page_mapping_file(page);
328 struct vm_area_struct *mpnt;
329 unsigned long offset;
330 unsigned long addr, old_addr = 0;
331 unsigned long flags;
332 pgoff_t pgoff;
333
334 if (mapping && !mapping_mapped(mapping)) {
335 set_bit(PG_dcache_dirty, &page->flags);
336 return;
337 }
338
339 flush_kernel_dcache_page(page);
340
341 if (!mapping)
342 return;
343
344 pgoff = page->index;
345
346 /* We have carefully arranged in arch_get_unmapped_area() that
347 * *any* mappings of a file are always congruently mapped (whether
348 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
349 * to flush one address here for them all to become coherent */
350
351 flush_dcache_mmap_lock_irqsave(mapping, flags);
352 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
353 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
354 addr = mpnt->vm_start + offset;
355
356 /* The TLB is the engine of coherence on parisc: The
357 * CPU is entitled to speculate any page with a TLB
358 * mapping, so here we kill the mapping then flush the
359 * page along a special flush only alias mapping.
360 * This guarantees that the page is no-longer in the
361 * cache for any process and nor may it be
362 * speculatively read in (until the user or kernel
363 * specifically accesses it, of course) */
364
365 flush_tlb_page(mpnt, addr);
366 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
367 != (addr & (SHM_COLOUR - 1))) {
368 __flush_cache_page(mpnt, addr, page_to_phys(page));
369 if (old_addr)
370 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
371 old_addr = addr;
372 }
373 }
374 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
375 }
376 EXPORT_SYMBOL(flush_dcache_page);
377
378 /* Defined in arch/parisc/kernel/pacache.S */
379 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
380 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
381 EXPORT_SYMBOL(flush_data_cache_local);
382 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
383
384 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
385 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
386
387 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
388 static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
389
parisc_setup_cache_timing(void)390 void __init parisc_setup_cache_timing(void)
391 {
392 unsigned long rangetime, alltime;
393 unsigned long size, start;
394 unsigned long threshold;
395
396 alltime = mfctl(16);
397 flush_data_cache();
398 alltime = mfctl(16) - alltime;
399
400 size = (unsigned long)(_end - _text);
401 rangetime = mfctl(16);
402 flush_kernel_dcache_range((unsigned long)_text, size);
403 rangetime = mfctl(16) - rangetime;
404
405 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
406 alltime, size, rangetime);
407
408 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
409 if (threshold > cache_info.dc_size)
410 threshold = cache_info.dc_size;
411 if (threshold)
412 parisc_cache_flush_threshold = threshold;
413 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
414 parisc_cache_flush_threshold/1024);
415
416 /* calculate TLB flush threshold */
417
418 /* On SMP machines, skip the TLB measure of kernel text which
419 * has been mapped as huge pages. */
420 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
421 threshold = max(cache_info.it_size, cache_info.dt_size);
422 threshold *= PAGE_SIZE;
423 threshold /= num_online_cpus();
424 goto set_tlb_threshold;
425 }
426
427 size = 0;
428 start = (unsigned long) _text;
429 rangetime = mfctl(16);
430 while (start < (unsigned long) _end) {
431 flush_tlb_kernel_range(start, start + PAGE_SIZE);
432 start += PAGE_SIZE;
433 size += PAGE_SIZE;
434 }
435 rangetime = mfctl(16) - rangetime;
436
437 alltime = mfctl(16);
438 flush_tlb_all();
439 alltime = mfctl(16) - alltime;
440
441 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
442 alltime, size, rangetime);
443
444 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
445 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
446 threshold/1024);
447
448 set_tlb_threshold:
449 if (threshold > parisc_tlb_flush_threshold)
450 parisc_tlb_flush_threshold = threshold;
451 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
452 parisc_tlb_flush_threshold/1024);
453 }
454
455 extern void purge_kernel_dcache_page_asm(unsigned long);
456 extern void clear_user_page_asm(void *, unsigned long);
457 extern void copy_user_page_asm(void *, void *, unsigned long);
458
flush_kernel_dcache_page_addr(void * addr)459 void flush_kernel_dcache_page_addr(void *addr)
460 {
461 unsigned long flags;
462
463 flush_kernel_dcache_page_asm(addr);
464 purge_tlb_start(flags);
465 pdtlb_kernel(addr);
466 purge_tlb_end(flags);
467 }
468 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
469
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)470 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
471 struct page *pg)
472 {
473 /* Copy using kernel mapping. No coherency is needed (all in
474 kunmap) for the `to' page. However, the `from' page needs to
475 be flushed through a mapping equivalent to the user mapping
476 before it can be accessed through the kernel mapping. */
477 preempt_disable();
478 flush_dcache_page_asm(__pa(vfrom), vaddr);
479 copy_page_asm(vto, vfrom);
480 preempt_enable();
481 }
482 EXPORT_SYMBOL(copy_user_page);
483
484 /* __flush_tlb_range()
485 *
486 * returns 1 if all TLBs were flushed.
487 */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)488 int __flush_tlb_range(unsigned long sid, unsigned long start,
489 unsigned long end)
490 {
491 unsigned long flags;
492
493 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
494 end - start >= parisc_tlb_flush_threshold) {
495 flush_tlb_all();
496 return 1;
497 }
498
499 /* Purge TLB entries for small ranges using the pdtlb and
500 pitlb instructions. These instructions execute locally
501 but cause a purge request to be broadcast to other TLBs. */
502 while (start < end) {
503 purge_tlb_start(flags);
504 mtsp(sid, 1);
505 pdtlb(start);
506 pitlb(start);
507 purge_tlb_end(flags);
508 start += PAGE_SIZE;
509 }
510 return 0;
511 }
512
cacheflush_h_tmp_function(void * dummy)513 static void cacheflush_h_tmp_function(void *dummy)
514 {
515 flush_cache_all_local();
516 }
517
flush_cache_all(void)518 void flush_cache_all(void)
519 {
520 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
521 }
522
mm_total_size(struct mm_struct * mm)523 static inline unsigned long mm_total_size(struct mm_struct *mm)
524 {
525 struct vm_area_struct *vma;
526 unsigned long usize = 0;
527
528 for (vma = mm->mmap; vma; vma = vma->vm_next)
529 usize += vma->vm_end - vma->vm_start;
530 return usize;
531 }
532
get_ptep(pgd_t * pgd,unsigned long addr)533 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
534 {
535 pte_t *ptep = NULL;
536
537 if (!pgd_none(*pgd)) {
538 pud_t *pud = pud_offset(pgd, addr);
539 if (!pud_none(*pud)) {
540 pmd_t *pmd = pmd_offset(pud, addr);
541 if (!pmd_none(*pmd))
542 ptep = pte_offset_map(pmd, addr);
543 }
544 }
545 return ptep;
546 }
547
flush_cache_mm(struct mm_struct * mm)548 void flush_cache_mm(struct mm_struct *mm)
549 {
550 struct vm_area_struct *vma;
551 pgd_t *pgd;
552
553 /* Flushing the whole cache on each cpu takes forever on
554 rp3440, etc. So, avoid it if the mm isn't too big. */
555 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
556 mm_total_size(mm) >= parisc_cache_flush_threshold) {
557 if (mm->context)
558 flush_tlb_all();
559 flush_cache_all();
560 return;
561 }
562
563 if (mm->context == mfsp(3)) {
564 for (vma = mm->mmap; vma; vma = vma->vm_next) {
565 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
566 if (vma->vm_flags & VM_EXEC)
567 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
568 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
569 }
570 return;
571 }
572
573 pgd = mm->pgd;
574 for (vma = mm->mmap; vma; vma = vma->vm_next) {
575 unsigned long addr;
576
577 for (addr = vma->vm_start; addr < vma->vm_end;
578 addr += PAGE_SIZE) {
579 unsigned long pfn;
580 pte_t *ptep = get_ptep(pgd, addr);
581 if (!ptep)
582 continue;
583 pfn = pte_pfn(*ptep);
584 if (!pfn_valid(pfn))
585 continue;
586 if (unlikely(mm->context)) {
587 flush_tlb_page(vma, addr);
588 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
589 } else {
590 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
591 }
592 }
593 }
594 }
595
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)596 void flush_cache_range(struct vm_area_struct *vma,
597 unsigned long start, unsigned long end)
598 {
599 pgd_t *pgd;
600 unsigned long addr;
601
602 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
603 end - start >= parisc_cache_flush_threshold) {
604 if (vma->vm_mm->context)
605 flush_tlb_range(vma, start, end);
606 flush_cache_all();
607 return;
608 }
609
610 if (vma->vm_mm->context == mfsp(3)) {
611 flush_user_dcache_range_asm(start, end);
612 if (vma->vm_flags & VM_EXEC)
613 flush_user_icache_range_asm(start, end);
614 flush_tlb_range(vma, start, end);
615 return;
616 }
617
618 pgd = vma->vm_mm->pgd;
619 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
620 unsigned long pfn;
621 pte_t *ptep = get_ptep(pgd, addr);
622 if (!ptep)
623 continue;
624 pfn = pte_pfn(*ptep);
625 if (pfn_valid(pfn)) {
626 if (unlikely(vma->vm_mm->context)) {
627 flush_tlb_page(vma, addr);
628 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
629 } else {
630 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
631 }
632 }
633 }
634 }
635
636 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)637 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
638 {
639 if (pfn_valid(pfn)) {
640 if (likely(vma->vm_mm->context)) {
641 flush_tlb_page(vma, vmaddr);
642 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
643 } else {
644 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
645 }
646 }
647 }
648
flush_kernel_vmap_range(void * vaddr,int size)649 void flush_kernel_vmap_range(void *vaddr, int size)
650 {
651 unsigned long start = (unsigned long)vaddr;
652 unsigned long end = start + size;
653
654 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
655 (unsigned long)size >= parisc_cache_flush_threshold) {
656 flush_tlb_kernel_range(start, end);
657 flush_data_cache();
658 return;
659 }
660
661 flush_kernel_dcache_range_asm(start, end);
662 flush_tlb_kernel_range(start, end);
663 }
664 EXPORT_SYMBOL(flush_kernel_vmap_range);
665
invalidate_kernel_vmap_range(void * vaddr,int size)666 void invalidate_kernel_vmap_range(void *vaddr, int size)
667 {
668 unsigned long start = (unsigned long)vaddr;
669 unsigned long end = start + size;
670
671 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
672 (unsigned long)size >= parisc_cache_flush_threshold) {
673 flush_tlb_kernel_range(start, end);
674 flush_data_cache();
675 return;
676 }
677
678 purge_kernel_dcache_range_asm(start, end);
679 flush_tlb_kernel_range(start, end);
680 }
681 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
682