1 /*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/init.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/highmem.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/shmparam.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20
21 #include "mm.h"
22
23 #if SHMLBA > 16384
24 #error FIX ME
25 #endif
26
27 #define from_address (0xffff8000)
28 #define to_address (0xffffc000)
29
30 static DEFINE_SPINLOCK(v6_lock);
31
32 /*
33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages.
35 */
v6_copy_user_highpage_nonaliasing(struct page * to,struct page * from,unsigned long vaddr)36 static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr)
38 {
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0);
46 }
47
48 /*
49 * Clear the user page. No aliasing to deal with so we can just
50 * attack the kernel's existing mapping of this page.
51 */
v6_clear_user_highpage_nonaliasing(struct page * page,unsigned long vaddr)52 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
53 {
54 void *kaddr = kmap_atomic(page, KM_USER0);
55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0);
57 }
58
59 /*
60 * Discard data in the kernel mapping for the new page.
61 * FIXME: needs this MCRR to be supported.
62 */
discard_old_kernel_data(void * kto)63 static void discard_old_kernel_data(void *kto)
64 {
65 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
66 :
67 : "r" (kto),
68 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
69 : "cc");
70 }
71
72 /*
73 * Copy the page, taking account of the cache colour.
74 */
v6_copy_user_highpage_aliasing(struct page * to,struct page * from,unsigned long vaddr)75 static void v6_copy_user_highpage_aliasing(struct page *to,
76 struct page *from, unsigned long vaddr)
77 {
78 unsigned int offset = CACHE_COLOUR(vaddr);
79 unsigned long kfrom, kto;
80
81 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
82 __flush_dcache_page(page_mapping(from), from);
83
84 /* FIXME: not highmem safe */
85 discard_old_kernel_data(page_address(to));
86
87 /*
88 * Now copy the page using the same cache colour as the
89 * pages ultimate destination.
90 */
91 spin_lock(&v6_lock);
92
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
95
96 kfrom = from_address + (offset << PAGE_SHIFT);
97 kto = to_address + (offset << PAGE_SHIFT);
98
99 flush_tlb_kernel_page(kfrom);
100 flush_tlb_kernel_page(kto);
101
102 copy_page((void *)kto, (void *)kfrom);
103
104 spin_unlock(&v6_lock);
105 }
106
107 /*
108 * Clear the user page. We need to deal with the aliasing issues,
109 * so remap the kernel page into the same cache colour as the user
110 * page.
111 */
v6_clear_user_highpage_aliasing(struct page * page,unsigned long vaddr)112 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
113 {
114 unsigned int offset = CACHE_COLOUR(vaddr);
115 unsigned long to = to_address + (offset << PAGE_SHIFT);
116
117 /* FIXME: not highmem safe */
118 discard_old_kernel_data(page_address(page));
119
120 /*
121 * Now clear the page using the same cache colour as
122 * the pages ultimate destination.
123 */
124 spin_lock(&v6_lock);
125
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
127 flush_tlb_kernel_page(to);
128 clear_page((void *)to);
129
130 spin_unlock(&v6_lock);
131 }
132
133 struct cpu_user_fns v6_user_fns __initdata = {
134 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
135 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
136 };
137
v6_userpage_init(void)138 static int __init v6_userpage_init(void)
139 {
140 if (cache_is_vipt_aliasing()) {
141 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
142 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
143 }
144
145 return 0;
146 }
147
148 core_initcall(v6_userpage_init);
149