1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30
31 int split_tlb __ro_after_init;
32 int dcache_stride __ro_after_init;
33 int icache_stride __ro_after_init;
34 EXPORT_SYMBOL(dcache_stride);
35
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
41
42 /* On some machines (i.e., ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We need a spinlock around all TLB flushes to ensure
45 * this.
46 */
47 DEFINE_SPINLOCK(pa_tlb_flush_lock);
48
49 /* Swapper page setup lock. */
50 DEFINE_SPINLOCK(pa_swapper_pg_lock);
51
52 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53 int pa_serialize_tlb_flushes __ro_after_init;
54 #endif
55
56 struct pdc_cache_info cache_info __ro_after_init;
57 #ifndef CONFIG_PA20
58 static struct pdc_btlb_info btlb_info __ro_after_init;
59 #endif
60
61 #ifdef CONFIG_SMP
62 void
flush_data_cache(void)63 flush_data_cache(void)
64 {
65 on_each_cpu(flush_data_cache_local, NULL, 1);
66 }
67 void
flush_instruction_cache(void)68 flush_instruction_cache(void)
69 {
70 on_each_cpu(flush_instruction_cache_local, NULL, 1);
71 }
72 #endif
73
74 void
flush_cache_all_local(void)75 flush_cache_all_local(void)
76 {
77 flush_instruction_cache_local(NULL);
78 flush_data_cache_local(NULL);
79 }
80 EXPORT_SYMBOL(flush_cache_all_local);
81
82 /* Virtual address of pfn. */
83 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
84
85 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)86 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
87 {
88 unsigned long pfn = pte_pfn(*ptep);
89 struct page *page;
90
91 /* We don't have pte special. As a result, we can be called with
92 an invalid pfn and we don't need to flush the kernel dcache page.
93 This occurs with FireGL card in C8000. */
94 if (!pfn_valid(pfn))
95 return;
96
97 page = pfn_to_page(pfn);
98 if (page_mapping_file(page) &&
99 test_bit(PG_dcache_dirty, &page->flags)) {
100 flush_kernel_dcache_page_addr(pfn_va(pfn));
101 clear_bit(PG_dcache_dirty, &page->flags);
102 } else if (parisc_requires_coherency())
103 flush_kernel_dcache_page_addr(pfn_va(pfn));
104 }
105
106 void
show_cache_info(struct seq_file * m)107 show_cache_info(struct seq_file *m)
108 {
109 char buf[32];
110
111 seq_printf(m, "I-cache\t\t: %ld KB\n",
112 cache_info.ic_size/1024 );
113 if (cache_info.dc_loop != 1)
114 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
115 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
116 cache_info.dc_size/1024,
117 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
118 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
119 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
120 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
121 cache_info.it_size,
122 cache_info.dt_size,
123 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
124 );
125
126 #ifndef CONFIG_PA20
127 /* BTLB - Block TLB */
128 if (btlb_info.max_size==0) {
129 seq_printf(m, "BTLB\t\t: not supported\n" );
130 } else {
131 seq_printf(m,
132 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
133 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
134 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
135 btlb_info.max_size, (int)4096,
136 btlb_info.max_size>>8,
137 btlb_info.fixed_range_info.num_i,
138 btlb_info.fixed_range_info.num_d,
139 btlb_info.fixed_range_info.num_comb,
140 btlb_info.variable_range_info.num_i,
141 btlb_info.variable_range_info.num_d,
142 btlb_info.variable_range_info.num_comb
143 );
144 }
145 #endif
146 }
147
148 void __init
parisc_cache_init(void)149 parisc_cache_init(void)
150 {
151 if (pdc_cache_info(&cache_info) < 0)
152 panic("parisc_cache_init: pdc_cache_info failed");
153
154 #if 0
155 printk("ic_size %lx dc_size %lx it_size %lx\n",
156 cache_info.ic_size,
157 cache_info.dc_size,
158 cache_info.it_size);
159
160 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 cache_info.dc_base,
162 cache_info.dc_stride,
163 cache_info.dc_count,
164 cache_info.dc_loop);
165
166 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.dc_conf),
168 cache_info.dc_conf.cc_alias,
169 cache_info.dc_conf.cc_block,
170 cache_info.dc_conf.cc_line,
171 cache_info.dc_conf.cc_shift);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info.dc_conf.cc_wt,
174 cache_info.dc_conf.cc_sh,
175 cache_info.dc_conf.cc_cst,
176 cache_info.dc_conf.cc_hv);
177
178 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179 cache_info.ic_base,
180 cache_info.ic_stride,
181 cache_info.ic_count,
182 cache_info.ic_loop);
183
184 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info.it_sp_base,
186 cache_info.it_sp_stride,
187 cache_info.it_sp_count,
188 cache_info.it_loop,
189 cache_info.it_off_base,
190 cache_info.it_off_stride,
191 cache_info.it_off_count);
192
193 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
194 cache_info.dt_sp_base,
195 cache_info.dt_sp_stride,
196 cache_info.dt_sp_count,
197 cache_info.dt_loop,
198 cache_info.dt_off_base,
199 cache_info.dt_off_stride,
200 cache_info.dt_off_count);
201
202 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
203 *(unsigned long *) (&cache_info.ic_conf),
204 cache_info.ic_conf.cc_alias,
205 cache_info.ic_conf.cc_block,
206 cache_info.ic_conf.cc_line,
207 cache_info.ic_conf.cc_shift);
208 printk(" wt %d sh %d cst %d hv %d\n",
209 cache_info.ic_conf.cc_wt,
210 cache_info.ic_conf.cc_sh,
211 cache_info.ic_conf.cc_cst,
212 cache_info.ic_conf.cc_hv);
213
214 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215 cache_info.dt_conf.tc_sh,
216 cache_info.dt_conf.tc_page,
217 cache_info.dt_conf.tc_cst,
218 cache_info.dt_conf.tc_aid,
219 cache_info.dt_conf.tc_sr);
220
221 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
222 cache_info.it_conf.tc_sh,
223 cache_info.it_conf.tc_page,
224 cache_info.it_conf.tc_cst,
225 cache_info.it_conf.tc_aid,
226 cache_info.it_conf.tc_sr);
227 #endif
228
229 split_tlb = 0;
230 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
231 if (cache_info.dt_conf.tc_sh == 2)
232 printk(KERN_WARNING "Unexpected TLB configuration. "
233 "Will flush I/D separately (could be optimized).\n");
234
235 split_tlb = 1;
236 }
237
238 /* "New and Improved" version from Jim Hull
239 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
240 * The following CAFL_STRIDE is an optimized version, see
241 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
242 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
243 */
244 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
245 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
246 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
247 #undef CAFL_STRIDE
248
249 #ifndef CONFIG_PA20
250 if (pdc_btlb_info(&btlb_info) < 0) {
251 memset(&btlb_info, 0, sizeof btlb_info);
252 }
253 #endif
254
255 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
256 PDC_MODEL_NVA_UNSUPPORTED) {
257 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
258 #if 0
259 panic("SMP kernel required to avoid non-equivalent aliasing");
260 #endif
261 }
262 }
263
disable_sr_hashing(void)264 void __init disable_sr_hashing(void)
265 {
266 int srhash_type, retval;
267 unsigned long space_bits;
268
269 switch (boot_cpu_data.cpu_type) {
270 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
271 BUG();
272 return;
273
274 case pcxs:
275 case pcxt:
276 case pcxt_:
277 srhash_type = SRHASH_PCXST;
278 break;
279
280 case pcxl:
281 srhash_type = SRHASH_PCXL;
282 break;
283
284 case pcxl2: /* pcxl2 doesn't support space register hashing */
285 return;
286
287 default: /* Currently all PA2.0 machines use the same ins. sequence */
288 srhash_type = SRHASH_PA20;
289 break;
290 }
291
292 disable_sr_hashing_asm(srhash_type);
293
294 retval = pdc_spaceid_bits(&space_bits);
295 /* If this procedure isn't implemented, don't panic. */
296 if (retval < 0 && retval != PDC_BAD_OPTION)
297 panic("pdc_spaceid_bits call failed.\n");
298 if (space_bits != 0)
299 panic("SpaceID hashing is still on!\n");
300 }
301
302 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)303 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
304 unsigned long physaddr)
305 {
306 preempt_disable();
307 flush_dcache_page_asm(physaddr, vmaddr);
308 if (vma->vm_flags & VM_EXEC)
309 flush_icache_page_asm(physaddr, vmaddr);
310 preempt_enable();
311 }
312
313 static inline void
__purge_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)314 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
315 unsigned long physaddr)
316 {
317 preempt_disable();
318 purge_dcache_page_asm(physaddr, vmaddr);
319 if (vma->vm_flags & VM_EXEC)
320 flush_icache_page_asm(physaddr, vmaddr);
321 preempt_enable();
322 }
323
flush_dcache_page(struct page * page)324 void flush_dcache_page(struct page *page)
325 {
326 struct address_space *mapping = page_mapping_file(page);
327 struct vm_area_struct *mpnt;
328 unsigned long offset;
329 unsigned long addr, old_addr = 0;
330 pgoff_t pgoff;
331
332 if (mapping && !mapping_mapped(mapping)) {
333 set_bit(PG_dcache_dirty, &page->flags);
334 return;
335 }
336
337 flush_kernel_dcache_page(page);
338
339 if (!mapping)
340 return;
341
342 pgoff = page->index;
343
344 /* We have carefully arranged in arch_get_unmapped_area() that
345 * *any* mappings of a file are always congruently mapped (whether
346 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
347 * to flush one address here for them all to become coherent */
348
349 flush_dcache_mmap_lock(mapping);
350 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
351 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
352 addr = mpnt->vm_start + offset;
353
354 /* The TLB is the engine of coherence on parisc: The
355 * CPU is entitled to speculate any page with a TLB
356 * mapping, so here we kill the mapping then flush the
357 * page along a special flush only alias mapping.
358 * This guarantees that the page is no-longer in the
359 * cache for any process and nor may it be
360 * speculatively read in (until the user or kernel
361 * specifically accesses it, of course) */
362
363 flush_tlb_page(mpnt, addr);
364 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
365 != (addr & (SHM_COLOUR - 1))) {
366 __flush_cache_page(mpnt, addr, page_to_phys(page));
367 if (parisc_requires_coherency() && old_addr)
368 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
369 old_addr = addr;
370 }
371 }
372 flush_dcache_mmap_unlock(mapping);
373 }
374 EXPORT_SYMBOL(flush_dcache_page);
375
376 /* Defined in arch/parisc/kernel/pacache.S */
377 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
378 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
379 EXPORT_SYMBOL(flush_data_cache_local);
380 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
381
382 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
383 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
384
385 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
386 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
387
parisc_setup_cache_timing(void)388 void __init parisc_setup_cache_timing(void)
389 {
390 unsigned long rangetime, alltime;
391 unsigned long size;
392 unsigned long threshold;
393
394 alltime = mfctl(16);
395 flush_data_cache();
396 alltime = mfctl(16) - alltime;
397
398 size = (unsigned long)(_end - _text);
399 rangetime = mfctl(16);
400 flush_kernel_dcache_range((unsigned long)_text, size);
401 rangetime = mfctl(16) - rangetime;
402
403 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
404 alltime, size, rangetime);
405
406 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
407 if (threshold > cache_info.dc_size)
408 threshold = cache_info.dc_size;
409 if (threshold)
410 parisc_cache_flush_threshold = threshold;
411 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
412 parisc_cache_flush_threshold/1024);
413
414 /* calculate TLB flush threshold */
415
416 /* On SMP machines, skip the TLB measure of kernel text which
417 * has been mapped as huge pages. */
418 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
419 threshold = max(cache_info.it_size, cache_info.dt_size);
420 threshold *= PAGE_SIZE;
421 threshold /= num_online_cpus();
422 goto set_tlb_threshold;
423 }
424
425 size = (unsigned long)_end - (unsigned long)_text;
426 rangetime = mfctl(16);
427 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
428 rangetime = mfctl(16) - rangetime;
429
430 alltime = mfctl(16);
431 flush_tlb_all();
432 alltime = mfctl(16) - alltime;
433
434 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
435 alltime, size, rangetime);
436
437 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
438 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
439 threshold/1024);
440
441 set_tlb_threshold:
442 if (threshold > FLUSH_TLB_THRESHOLD)
443 parisc_tlb_flush_threshold = threshold;
444 else
445 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
446
447 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
448 parisc_tlb_flush_threshold/1024);
449 }
450
451 extern void purge_kernel_dcache_page_asm(unsigned long);
452 extern void clear_user_page_asm(void *, unsigned long);
453 extern void copy_user_page_asm(void *, void *, unsigned long);
454
flush_kernel_dcache_page_addr(void * addr)455 void flush_kernel_dcache_page_addr(void *addr)
456 {
457 unsigned long flags;
458
459 flush_kernel_dcache_page_asm(addr);
460 purge_tlb_start(flags);
461 pdtlb_kernel(addr);
462 purge_tlb_end(flags);
463 }
464 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
465
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)466 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
467 struct page *pg)
468 {
469 /* Copy using kernel mapping. No coherency is needed (all in
470 kunmap) for the `to' page. However, the `from' page needs to
471 be flushed through a mapping equivalent to the user mapping
472 before it can be accessed through the kernel mapping. */
473 preempt_disable();
474 flush_dcache_page_asm(__pa(vfrom), vaddr);
475 copy_page_asm(vto, vfrom);
476 preempt_enable();
477 }
478 EXPORT_SYMBOL(copy_user_page);
479
480 /* __flush_tlb_range()
481 *
482 * returns 1 if all TLBs were flushed.
483 */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)484 int __flush_tlb_range(unsigned long sid, unsigned long start,
485 unsigned long end)
486 {
487 unsigned long flags;
488
489 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
490 end - start >= parisc_tlb_flush_threshold) {
491 flush_tlb_all();
492 return 1;
493 }
494
495 /* Purge TLB entries for small ranges using the pdtlb and
496 pitlb instructions. These instructions execute locally
497 but cause a purge request to be broadcast to other TLBs. */
498 while (start < end) {
499 purge_tlb_start(flags);
500 mtsp(sid, 1);
501 pdtlb(start);
502 pitlb(start);
503 purge_tlb_end(flags);
504 start += PAGE_SIZE;
505 }
506 return 0;
507 }
508
cacheflush_h_tmp_function(void * dummy)509 static void cacheflush_h_tmp_function(void *dummy)
510 {
511 flush_cache_all_local();
512 }
513
flush_cache_all(void)514 void flush_cache_all(void)
515 {
516 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
517 }
518
mm_total_size(struct mm_struct * mm)519 static inline unsigned long mm_total_size(struct mm_struct *mm)
520 {
521 struct vm_area_struct *vma;
522 unsigned long usize = 0;
523
524 for (vma = mm->mmap; vma; vma = vma->vm_next)
525 usize += vma->vm_end - vma->vm_start;
526 return usize;
527 }
528
get_ptep(pgd_t * pgd,unsigned long addr)529 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
530 {
531 pte_t *ptep = NULL;
532
533 if (!pgd_none(*pgd)) {
534 p4d_t *p4d = p4d_offset(pgd, addr);
535 if (!p4d_none(*p4d)) {
536 pud_t *pud = pud_offset(p4d, addr);
537 if (!pud_none(*pud)) {
538 pmd_t *pmd = pmd_offset(pud, addr);
539 if (!pmd_none(*pmd))
540 ptep = pte_offset_map(pmd, addr);
541 }
542 }
543 }
544 return ptep;
545 }
546
flush_cache_mm(struct mm_struct * mm)547 void flush_cache_mm(struct mm_struct *mm)
548 {
549 struct vm_area_struct *vma;
550 pgd_t *pgd;
551
552 /* Flushing the whole cache on each cpu takes forever on
553 rp3440, etc. So, avoid it if the mm isn't too big. */
554 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
555 mm_total_size(mm) >= parisc_cache_flush_threshold) {
556 if (mm->context)
557 flush_tlb_all();
558 flush_cache_all();
559 return;
560 }
561
562 if (mm->context == mfsp(3)) {
563 for (vma = mm->mmap; vma; vma = vma->vm_next) {
564 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
565 if (vma->vm_flags & VM_EXEC)
566 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
567 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
568 }
569 return;
570 }
571
572 pgd = mm->pgd;
573 for (vma = mm->mmap; vma; vma = vma->vm_next) {
574 unsigned long addr;
575
576 for (addr = vma->vm_start; addr < vma->vm_end;
577 addr += PAGE_SIZE) {
578 unsigned long pfn;
579 pte_t *ptep = get_ptep(pgd, addr);
580 if (!ptep)
581 continue;
582 pfn = pte_pfn(*ptep);
583 if (!pfn_valid(pfn))
584 continue;
585 if (unlikely(mm->context)) {
586 flush_tlb_page(vma, addr);
587 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
588 } else {
589 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
590 }
591 }
592 }
593 }
594
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)595 void flush_cache_range(struct vm_area_struct *vma,
596 unsigned long start, unsigned long end)
597 {
598 pgd_t *pgd;
599 unsigned long addr;
600
601 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
602 end - start >= parisc_cache_flush_threshold) {
603 if (vma->vm_mm->context)
604 flush_tlb_range(vma, start, end);
605 flush_cache_all();
606 return;
607 }
608
609 if (vma->vm_mm->context == mfsp(3)) {
610 flush_user_dcache_range_asm(start, end);
611 if (vma->vm_flags & VM_EXEC)
612 flush_user_icache_range_asm(start, end);
613 flush_tlb_range(vma, start, end);
614 return;
615 }
616
617 pgd = vma->vm_mm->pgd;
618 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
619 unsigned long pfn;
620 pte_t *ptep = get_ptep(pgd, addr);
621 if (!ptep)
622 continue;
623 pfn = pte_pfn(*ptep);
624 if (pfn_valid(pfn)) {
625 if (unlikely(vma->vm_mm->context)) {
626 flush_tlb_page(vma, addr);
627 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
628 } else {
629 __purge_cache_page(vma, addr, PFN_PHYS(pfn));
630 }
631 }
632 }
633 }
634
635 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)636 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
637 {
638 if (pfn_valid(pfn)) {
639 if (likely(vma->vm_mm->context)) {
640 flush_tlb_page(vma, vmaddr);
641 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
642 } else {
643 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
644 }
645 }
646 }
647
flush_kernel_vmap_range(void * vaddr,int size)648 void flush_kernel_vmap_range(void *vaddr, int size)
649 {
650 unsigned long start = (unsigned long)vaddr;
651 unsigned long end = start + size;
652
653 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
654 (unsigned long)size >= parisc_cache_flush_threshold) {
655 flush_tlb_kernel_range(start, end);
656 flush_data_cache();
657 return;
658 }
659
660 flush_kernel_dcache_range_asm(start, end);
661 flush_tlb_kernel_range(start, end);
662 }
663 EXPORT_SYMBOL(flush_kernel_vmap_range);
664
invalidate_kernel_vmap_range(void * vaddr,int size)665 void invalidate_kernel_vmap_range(void *vaddr, int size)
666 {
667 unsigned long start = (unsigned long)vaddr;
668 unsigned long end = start + size;
669
670 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
671 (unsigned long)size >= parisc_cache_flush_threshold) {
672 flush_tlb_kernel_range(start, end);
673 flush_data_cache();
674 return;
675 }
676
677 purge_kernel_dcache_range_asm(start, end);
678 flush_tlb_kernel_range(start, end);
679 }
680 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
681