1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/copypage-v6.c
4 *
5 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
6 */
7 #include <linux/init.h>
8 #include <linux/spinlock.h>
9 #include <linux/mm.h>
10 #include <linux/highmem.h>
11
12 #include <asm/shmparam.h>
13 #include <asm/tlbflush.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cachetype.h>
16
17 #include "mm.h"
18
19 #if SHMLBA > 16384
20 #error FIX ME
21 #endif
22
23 static DEFINE_RAW_SPINLOCK(v6_lock);
24
25 /*
26 * Copy the user page. No aliasing to deal with so we can just
27 * attack the kernel's existing mapping of these pages.
28 */
v6_copy_user_highpage_nonaliasing(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)29 static void v6_copy_user_highpage_nonaliasing(struct page *to,
30 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
31 {
32 void *kto, *kfrom;
33
34 kfrom = kmap_atomic(from);
35 kto = kmap_atomic(to);
36 copy_page(kto, kfrom);
37 kunmap_atomic(kto);
38 kunmap_atomic(kfrom);
39 }
40
41 /*
42 * Clear the user page. No aliasing to deal with so we can just
43 * attack the kernel's existing mapping of this page.
44 */
v6_clear_user_highpage_nonaliasing(struct page * page,unsigned long vaddr)45 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
46 {
47 void *kaddr = kmap_atomic(page);
48 clear_page(kaddr);
49 kunmap_atomic(kaddr);
50 }
51
52 /*
53 * Discard data in the kernel mapping for the new page.
54 * FIXME: needs this MCRR to be supported.
55 */
discard_old_kernel_data(void * kto)56 static void discard_old_kernel_data(void *kto)
57 {
58 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
59 :
60 : "r" (kto),
61 "r" ((unsigned long)kto + PAGE_SIZE - 1)
62 : "cc");
63 }
64
65 /*
66 * Copy the page, taking account of the cache colour.
67 */
v6_copy_user_highpage_aliasing(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)68 static void v6_copy_user_highpage_aliasing(struct page *to,
69 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
70 {
71 unsigned int offset = CACHE_COLOUR(vaddr);
72 unsigned long kfrom, kto;
73
74 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
75 __flush_dcache_page(page_mapping_file(from), from);
76
77 /* FIXME: not highmem safe */
78 discard_old_kernel_data(page_address(to));
79
80 /*
81 * Now copy the page using the same cache colour as the
82 * pages ultimate destination.
83 */
84 raw_spin_lock(&v6_lock);
85
86 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
87 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
88
89 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
90 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
91
92 copy_page((void *)kto, (void *)kfrom);
93
94 raw_spin_unlock(&v6_lock);
95 }
96
97 /*
98 * Clear the user page. We need to deal with the aliasing issues,
99 * so remap the kernel page into the same cache colour as the user
100 * page.
101 */
v6_clear_user_highpage_aliasing(struct page * page,unsigned long vaddr)102 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
103 {
104 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
105
106 /* FIXME: not highmem safe */
107 discard_old_kernel_data(page_address(page));
108
109 /*
110 * Now clear the page using the same cache colour as
111 * the pages ultimate destination.
112 */
113 raw_spin_lock(&v6_lock);
114
115 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
116 clear_page((void *)to);
117
118 raw_spin_unlock(&v6_lock);
119 }
120
121 struct cpu_user_fns v6_user_fns __initdata = {
122 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
123 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
124 };
125
v6_userpage_init(void)126 static int __init v6_userpage_init(void)
127 {
128 if (cache_is_vipt_aliasing()) {
129 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
130 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
131 }
132
133 return 0;
134 }
135
136 core_initcall(v6_userpage_init);
137