1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41 /* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52
53 #ifdef CONFIG_SMP
54 void
flush_data_cache(void)55 flush_data_cache(void)
56 {
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void
flush_instruction_cache(void)60 flush_instruction_cache(void)
61 {
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65
66 void
flush_cache_all_local(void)67 flush_cache_all_local(void)
68 {
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73
74 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)75 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76 {
77 struct page *page = pte_page(*ptep);
78
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
80 test_bit(PG_dcache_dirty, &page->flags)) {
81
82 flush_kernel_dcache_page(page);
83 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page);
86 }
87
88 void
show_cache_info(struct seq_file * m)89 show_cache_info(struct seq_file *m)
90 {
91 char buf[32];
92
93 seq_printf(m, "I-cache\t\t: %ld KB\n",
94 cache_info.ic_size/1024 );
95 if (cache_info.dc_loop != 1)
96 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
97 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
98 cache_info.dc_size/1024,
99 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
100 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
101 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
102 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
103 cache_info.it_size,
104 cache_info.dt_size,
105 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
106 );
107
108 #ifndef CONFIG_PA20
109 /* BTLB - Block TLB */
110 if (btlb_info.max_size==0) {
111 seq_printf(m, "BTLB\t\t: not supported\n" );
112 } else {
113 seq_printf(m,
114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 btlb_info.max_size, (int)4096,
118 btlb_info.max_size>>8,
119 btlb_info.fixed_range_info.num_i,
120 btlb_info.fixed_range_info.num_d,
121 btlb_info.fixed_range_info.num_comb,
122 btlb_info.variable_range_info.num_i,
123 btlb_info.variable_range_info.num_d,
124 btlb_info.variable_range_info.num_comb
125 );
126 }
127 #endif
128 }
129
130 void __init
parisc_cache_init(void)131 parisc_cache_init(void)
132 {
133 if (pdc_cache_info(&cache_info) < 0)
134 panic("parisc_cache_init: pdc_cache_info failed");
135
136 #if 0
137 printk("ic_size %lx dc_size %lx it_size %lx\n",
138 cache_info.ic_size,
139 cache_info.dc_size,
140 cache_info.it_size);
141
142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
143 cache_info.dc_base,
144 cache_info.dc_stride,
145 cache_info.dc_count,
146 cache_info.dc_loop);
147
148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
149 *(unsigned long *) (&cache_info.dc_conf),
150 cache_info.dc_conf.cc_alias,
151 cache_info.dc_conf.cc_block,
152 cache_info.dc_conf.cc_line,
153 cache_info.dc_conf.cc_shift);
154 printk(" wt %d sh %d cst %d hv %d\n",
155 cache_info.dc_conf.cc_wt,
156 cache_info.dc_conf.cc_sh,
157 cache_info.dc_conf.cc_cst,
158 cache_info.dc_conf.cc_hv);
159
160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 cache_info.ic_base,
162 cache_info.ic_stride,
163 cache_info.ic_count,
164 cache_info.ic_loop);
165
166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.ic_conf),
168 cache_info.ic_conf.cc_alias,
169 cache_info.ic_conf.cc_block,
170 cache_info.ic_conf.cc_line,
171 cache_info.ic_conf.cc_shift);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info.ic_conf.cc_wt,
174 cache_info.ic_conf.cc_sh,
175 cache_info.ic_conf.cc_cst,
176 cache_info.ic_conf.cc_hv);
177
178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
179 cache_info.dt_conf.tc_sh,
180 cache_info.dt_conf.tc_page,
181 cache_info.dt_conf.tc_cst,
182 cache_info.dt_conf.tc_aid,
183 cache_info.dt_conf.tc_pad1);
184
185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
186 cache_info.it_conf.tc_sh,
187 cache_info.it_conf.tc_page,
188 cache_info.it_conf.tc_cst,
189 cache_info.it_conf.tc_aid,
190 cache_info.it_conf.tc_pad1);
191 #endif
192
193 split_tlb = 0;
194 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
195 if (cache_info.dt_conf.tc_sh == 2)
196 printk(KERN_WARNING "Unexpected TLB configuration. "
197 "Will flush I/D separately (could be optimized).\n");
198
199 split_tlb = 1;
200 }
201
202 /* "New and Improved" version from Jim Hull
203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
204 * The following CAFL_STRIDE is an optimized version, see
205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
207 */
208 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
210 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
211 #undef CAFL_STRIDE
212
213 #ifndef CONFIG_PA20
214 if (pdc_btlb_info(&btlb_info) < 0) {
215 memset(&btlb_info, 0, sizeof btlb_info);
216 }
217 #endif
218
219 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
220 PDC_MODEL_NVA_UNSUPPORTED) {
221 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
222 #if 0
223 panic("SMP kernel required to avoid non-equivalent aliasing");
224 #endif
225 }
226 }
227
disable_sr_hashing(void)228 void disable_sr_hashing(void)
229 {
230 int srhash_type, retval;
231 unsigned long space_bits;
232
233 switch (boot_cpu_data.cpu_type) {
234 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
235 BUG();
236 return;
237
238 case pcxs:
239 case pcxt:
240 case pcxt_:
241 srhash_type = SRHASH_PCXST;
242 break;
243
244 case pcxl:
245 srhash_type = SRHASH_PCXL;
246 break;
247
248 case pcxl2: /* pcxl2 doesn't support space register hashing */
249 return;
250
251 default: /* Currently all PA2.0 machines use the same ins. sequence */
252 srhash_type = SRHASH_PA20;
253 break;
254 }
255
256 disable_sr_hashing_asm(srhash_type);
257
258 retval = pdc_spaceid_bits(&space_bits);
259 /* If this procedure isn't implemented, don't panic. */
260 if (retval < 0 && retval != PDC_BAD_OPTION)
261 panic("pdc_spaceid_bits call failed.\n");
262 if (space_bits != 0)
263 panic("SpaceID hashing is still on!\n");
264 }
265
266 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)267 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 unsigned long physaddr)
269 {
270 preempt_disable();
271 flush_dcache_page_asm(physaddr, vmaddr);
272 if (vma->vm_flags & VM_EXEC)
273 flush_icache_page_asm(physaddr, vmaddr);
274 preempt_enable();
275 }
276
flush_dcache_page(struct page * page)277 void flush_dcache_page(struct page *page)
278 {
279 struct address_space *mapping = page_mapping(page);
280 struct vm_area_struct *mpnt;
281 unsigned long offset;
282 unsigned long addr, old_addr = 0;
283 pgoff_t pgoff;
284
285 if (mapping && !mapping_mapped(mapping)) {
286 set_bit(PG_dcache_dirty, &page->flags);
287 return;
288 }
289
290 flush_kernel_dcache_page(page);
291
292 if (!mapping)
293 return;
294
295 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
296
297 /* We have carefully arranged in arch_get_unmapped_area() that
298 * *any* mappings of a file are always congruently mapped (whether
299 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
300 * to flush one address here for them all to become coherent */
301
302 flush_dcache_mmap_lock(mapping);
303 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
305 addr = mpnt->vm_start + offset;
306
307 /* The TLB is the engine of coherence on parisc: The
308 * CPU is entitled to speculate any page with a TLB
309 * mapping, so here we kill the mapping then flush the
310 * page along a special flush only alias mapping.
311 * This guarantees that the page is no-longer in the
312 * cache for any process and nor may it be
313 * speculatively read in (until the user or kernel
314 * specifically accesses it, of course) */
315
316 flush_tlb_page(mpnt, addr);
317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
318 __flush_cache_page(mpnt, addr, page_to_phys(page));
319 if (old_addr)
320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
321 old_addr = addr;
322 }
323 }
324 flush_dcache_mmap_unlock(mapping);
325 }
326 EXPORT_SYMBOL(flush_dcache_page);
327
328 /* Defined in arch/parisc/kernel/pacache.S */
329 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
330 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
331 EXPORT_SYMBOL(flush_data_cache_local);
332 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
333
334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
335 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
336
parisc_setup_cache_timing(void)337 void __init parisc_setup_cache_timing(void)
338 {
339 unsigned long rangetime, alltime;
340 unsigned long size;
341
342 alltime = mfctl(16);
343 flush_data_cache();
344 alltime = mfctl(16) - alltime;
345
346 size = (unsigned long)(_end - _text);
347 rangetime = mfctl(16);
348 flush_kernel_dcache_range((unsigned long)_text, size);
349 rangetime = mfctl(16) - rangetime;
350
351 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
352 alltime, size, rangetime);
353
354 /* Racy, but if we see an intermediate value, it's ok too... */
355 parisc_cache_flush_threshold = size * alltime / rangetime;
356
357 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
358 if (!parisc_cache_flush_threshold)
359 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
360
361 if (parisc_cache_flush_threshold > cache_info.dc_size)
362 parisc_cache_flush_threshold = cache_info.dc_size;
363
364 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
365 }
366
367 extern void purge_kernel_dcache_page_asm(unsigned long);
368 extern void clear_user_page_asm(void *, unsigned long);
369 extern void copy_user_page_asm(void *, void *, unsigned long);
370
flush_kernel_dcache_page_addr(void * addr)371 void flush_kernel_dcache_page_addr(void *addr)
372 {
373 unsigned long flags;
374
375 flush_kernel_dcache_page_asm(addr);
376 purge_tlb_start(flags);
377 pdtlb_kernel(addr);
378 purge_tlb_end(flags);
379 }
380 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
381
clear_user_page(void * vto,unsigned long vaddr,struct page * page)382 void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
383 {
384 clear_page_asm(vto);
385 if (!parisc_requires_coherency())
386 flush_kernel_dcache_page_asm(vto);
387 }
388 EXPORT_SYMBOL(clear_user_page);
389
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)390 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
391 struct page *pg)
392 {
393 /* Copy using kernel mapping. No coherency is needed
394 (all in kmap/kunmap) on machines that don't support
395 non-equivalent aliasing. However, the `from' page
396 needs to be flushed before it can be accessed through
397 the kernel mapping. */
398 preempt_disable();
399 flush_dcache_page_asm(__pa(vfrom), vaddr);
400 preempt_enable();
401 copy_page_asm(vto, vfrom);
402 if (!parisc_requires_coherency())
403 flush_kernel_dcache_page_asm(vto);
404 }
405 EXPORT_SYMBOL(copy_user_page);
406
407 #ifdef CONFIG_PA8X00
408
kunmap_parisc(void * addr)409 void kunmap_parisc(void *addr)
410 {
411 if (parisc_requires_coherency())
412 flush_kernel_dcache_page_addr(addr);
413 }
414 EXPORT_SYMBOL(kunmap_parisc);
415 #endif
416
purge_tlb_entries(struct mm_struct * mm,unsigned long addr)417 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
418 {
419 unsigned long flags;
420
421 /* Note: purge_tlb_entries can be called at startup with
422 no context. */
423
424 purge_tlb_start(flags);
425 mtsp(mm->context, 1);
426 pdtlb(addr);
427 pitlb(addr);
428 purge_tlb_end(flags);
429 }
430 EXPORT_SYMBOL(purge_tlb_entries);
431
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)432 void __flush_tlb_range(unsigned long sid, unsigned long start,
433 unsigned long end)
434 {
435 unsigned long npages;
436
437 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
438 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
439 flush_tlb_all();
440 else {
441 unsigned long flags;
442
443 mtsp(sid, 1);
444 purge_tlb_start(flags);
445 if (split_tlb) {
446 while (npages--) {
447 pdtlb(start);
448 pitlb(start);
449 start += PAGE_SIZE;
450 }
451 } else {
452 while (npages--) {
453 pdtlb(start);
454 start += PAGE_SIZE;
455 }
456 }
457 purge_tlb_end(flags);
458 }
459 }
460
cacheflush_h_tmp_function(void * dummy)461 static void cacheflush_h_tmp_function(void *dummy)
462 {
463 flush_cache_all_local();
464 }
465
flush_cache_all(void)466 void flush_cache_all(void)
467 {
468 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
469 }
470
mm_total_size(struct mm_struct * mm)471 static inline unsigned long mm_total_size(struct mm_struct *mm)
472 {
473 struct vm_area_struct *vma;
474 unsigned long usize = 0;
475
476 for (vma = mm->mmap; vma; vma = vma->vm_next)
477 usize += vma->vm_end - vma->vm_start;
478 return usize;
479 }
480
get_ptep(pgd_t * pgd,unsigned long addr)481 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
482 {
483 pte_t *ptep = NULL;
484
485 if (!pgd_none(*pgd)) {
486 pud_t *pud = pud_offset(pgd, addr);
487 if (!pud_none(*pud)) {
488 pmd_t *pmd = pmd_offset(pud, addr);
489 if (!pmd_none(*pmd))
490 ptep = pte_offset_map(pmd, addr);
491 }
492 }
493 return ptep;
494 }
495
flush_cache_mm(struct mm_struct * mm)496 void flush_cache_mm(struct mm_struct *mm)
497 {
498 /* Flushing the whole cache on each cpu takes forever on
499 rp3440, etc. So, avoid it if the mm isn't too big. */
500 if (mm_total_size(mm) < parisc_cache_flush_threshold) {
501 struct vm_area_struct *vma;
502
503 if (mm->context == mfsp(3)) {
504 for (vma = mm->mmap; vma; vma = vma->vm_next) {
505 flush_user_dcache_range_asm(vma->vm_start,
506 vma->vm_end);
507 if (vma->vm_flags & VM_EXEC)
508 flush_user_icache_range_asm(
509 vma->vm_start, vma->vm_end);
510 }
511 } else {
512 pgd_t *pgd = mm->pgd;
513
514 for (vma = mm->mmap; vma; vma = vma->vm_next) {
515 unsigned long addr;
516
517 for (addr = vma->vm_start; addr < vma->vm_end;
518 addr += PAGE_SIZE) {
519 pte_t *ptep = get_ptep(pgd, addr);
520 if (ptep != NULL) {
521 pte_t pte = *ptep;
522 __flush_cache_page(vma, addr,
523 page_to_phys(pte_page(pte)));
524 }
525 }
526 }
527 }
528 return;
529 }
530
531 #ifdef CONFIG_SMP
532 flush_cache_all();
533 #else
534 flush_cache_all_local();
535 #endif
536 }
537
538 void
flush_user_dcache_range(unsigned long start,unsigned long end)539 flush_user_dcache_range(unsigned long start, unsigned long end)
540 {
541 if ((end - start) < parisc_cache_flush_threshold)
542 flush_user_dcache_range_asm(start,end);
543 else
544 flush_data_cache();
545 }
546
547 void
flush_user_icache_range(unsigned long start,unsigned long end)548 flush_user_icache_range(unsigned long start, unsigned long end)
549 {
550 if ((end - start) < parisc_cache_flush_threshold)
551 flush_user_icache_range_asm(start,end);
552 else
553 flush_instruction_cache();
554 }
555
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)556 void flush_cache_range(struct vm_area_struct *vma,
557 unsigned long start, unsigned long end)
558 {
559 BUG_ON(!vma->vm_mm->context);
560
561 if ((end - start) < parisc_cache_flush_threshold) {
562 if (vma->vm_mm->context == mfsp(3)) {
563 flush_user_dcache_range_asm(start, end);
564 if (vma->vm_flags & VM_EXEC)
565 flush_user_icache_range_asm(start, end);
566 } else {
567 unsigned long addr;
568 pgd_t *pgd = vma->vm_mm->pgd;
569
570 for (addr = start & PAGE_MASK; addr < end;
571 addr += PAGE_SIZE) {
572 pte_t *ptep = get_ptep(pgd, addr);
573 if (ptep != NULL) {
574 pte_t pte = *ptep;
575 flush_cache_page(vma,
576 addr, pte_pfn(pte));
577 }
578 }
579 }
580 } else {
581 #ifdef CONFIG_SMP
582 flush_cache_all();
583 #else
584 flush_cache_all_local();
585 #endif
586 }
587 }
588
589 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)590 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
591 {
592 BUG_ON(!vma->vm_mm->context);
593
594 flush_tlb_page(vma, vmaddr);
595 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
596
597 }
598
599 #ifdef CONFIG_PARISC_TMPALIAS
600
clear_user_highpage(struct page * page,unsigned long vaddr)601 void clear_user_highpage(struct page *page, unsigned long vaddr)
602 {
603 void *vto;
604 unsigned long flags;
605
606 /* Clear using TMPALIAS region. The page doesn't need to
607 be flushed but the kernel mapping needs to be purged. */
608
609 vto = kmap_atomic(page);
610
611 /* The PA-RISC 2.0 Architecture book states on page F-6:
612 "Before a write-capable translation is enabled, *all*
613 non-equivalently-aliased translations must be removed
614 from the page table and purged from the TLB. (Note
615 that the caches are not required to be flushed at this
616 time.) Before any non-equivalent aliased translation
617 is re-enabled, the virtual address range for the writeable
618 page (the entire page) must be flushed from the cache,
619 and the write-capable translation removed from the page
620 table and purged from the TLB." */
621
622 purge_kernel_dcache_page_asm((unsigned long)vto);
623 purge_tlb_start(flags);
624 pdtlb_kernel(vto);
625 purge_tlb_end(flags);
626 preempt_disable();
627 clear_user_page_asm(vto, vaddr);
628 preempt_enable();
629
630 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
631 }
632
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)633 void copy_user_highpage(struct page *to, struct page *from,
634 unsigned long vaddr, struct vm_area_struct *vma)
635 {
636 void *vfrom, *vto;
637 unsigned long flags;
638
639 /* Copy using TMPALIAS region. This has the advantage
640 that the `from' page doesn't need to be flushed. However,
641 the `to' page must be flushed in copy_user_page_asm since
642 it can be used to bring in executable code. */
643
644 vfrom = kmap_atomic(from);
645 vto = kmap_atomic(to);
646
647 purge_kernel_dcache_page_asm((unsigned long)vto);
648 purge_tlb_start(flags);
649 pdtlb_kernel(vto);
650 pdtlb_kernel(vfrom);
651 purge_tlb_end(flags);
652 preempt_disable();
653 copy_user_page_asm(vto, vfrom, vaddr);
654 flush_dcache_page_asm(__pa(vto), vaddr);
655 preempt_enable();
656
657 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
658 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
659 }
660
661 #endif /* CONFIG_PARISC_TMPALIAS */
662