• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * User address space access functions.
3  *
4  * Copyright 1997 Andi Kleen <ak@muc.de>
5  * Copyright 1997 Linus Torvalds
6  * Copyright 2002 Andi Kleen <ak@suse.de>
7  */
8 #include <linux/export.h>
9 #include <linux/uaccess.h>
10 #include <linux/highmem.h>
11 
12 /*
13  * Zero Userspace
14  */
15 
__clear_user(void __user * addr,unsigned long size)16 unsigned long __clear_user(void __user *addr, unsigned long size)
17 {
18 	long __d0;
19 	might_fault();
20 	/* no memory constraint because it doesn't change any memory gcc knows
21 	   about */
22 	stac();
23 	asm volatile(
24 		"	testq  %[size8],%[size8]\n"
25 		"	jz     4f\n"
26 		"	.align 16\n"
27 		"0:	movq $0,(%[dst])\n"
28 		"	addq   $8,%[dst]\n"
29 		"	decl %%ecx ; jnz   0b\n"
30 		"4:	movq  %[size1],%%rcx\n"
31 		"	testl %%ecx,%%ecx\n"
32 		"	jz     2f\n"
33 		"1:	movb   $0,(%[dst])\n"
34 		"	incq   %[dst]\n"
35 		"	decl %%ecx ; jnz  1b\n"
36 		"2:\n"
37 		".section .fixup,\"ax\"\n"
38 		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
39 		"	jmp 2b\n"
40 		".previous\n"
41 		_ASM_EXTABLE(0b,3b)
42 		_ASM_EXTABLE(1b,2b)
43 		: [size8] "=&c"(size), [dst] "=&D" (__d0)
44 		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
45 	clac();
46 	return size;
47 }
48 EXPORT_SYMBOL(__clear_user);
49 
clear_user(void __user * to,unsigned long n)50 unsigned long clear_user(void __user *to, unsigned long n)
51 {
52 	if (access_ok(VERIFY_WRITE, to, n))
53 		return __clear_user(to, n);
54 	return n;
55 }
56 EXPORT_SYMBOL(clear_user);
57 
58 /*
59  * Try to copy last bytes and clear the rest if needed.
60  * Since protection fault in copy_from/to_user is not a normal situation,
61  * it is not necessary to optimize tail handling.
62  */
63 __visible unsigned long
copy_user_handle_tail(char * to,char * from,unsigned len)64 copy_user_handle_tail(char *to, char *from, unsigned len)
65 {
66 	for (; len; --len, to++) {
67 		char c;
68 
69 		if (__get_user_nocheck(c, from++, sizeof(char)))
70 			break;
71 		if (__put_user_nocheck(c, to, sizeof(char)))
72 			break;
73 	}
74 	clac();
75 	return len;
76 }
77 
78 /*
79  * Similar to copy_user_handle_tail, probe for the write fault point,
80  * but reuse __memcpy_mcsafe in case a new read error is encountered.
81  * clac() is handled in _copy_to_iter_mcsafe().
82  */
83 __visible unsigned long
mcsafe_handle_tail(char * to,char * from,unsigned len)84 mcsafe_handle_tail(char *to, char *from, unsigned len)
85 {
86 	for (; len; --len, to++, from++) {
87 		/*
88 		 * Call the assembly routine back directly since
89 		 * memcpy_mcsafe() may silently fallback to memcpy.
90 		 */
91 		unsigned long rem = __memcpy_mcsafe(to, from, 1);
92 
93 		if (rem)
94 			break;
95 	}
96 	return len;
97 }
98 
99 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
100 /**
101  * clean_cache_range - write back a cache range with CLWB
102  * @vaddr:	virtual start address
103  * @size:	number of bytes to write back
104  *
105  * Write back a cache range using the CLWB (cache line write back)
106  * instruction. Note that @size is internally rounded up to be cache
107  * line size aligned.
108  */
clean_cache_range(void * addr,size_t size)109 static void clean_cache_range(void *addr, size_t size)
110 {
111 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
112 	unsigned long clflush_mask = x86_clflush_size - 1;
113 	void *vend = addr + size;
114 	void *p;
115 
116 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
117 	     p < vend; p += x86_clflush_size)
118 		clwb(p);
119 }
120 
arch_wb_cache_pmem(void * addr,size_t size)121 void arch_wb_cache_pmem(void *addr, size_t size)
122 {
123 	clean_cache_range(addr, size);
124 }
125 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
126 
__copy_user_flushcache(void * dst,const void __user * src,unsigned size)127 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
128 {
129 	unsigned long flushed, dest = (unsigned long) dst;
130 	long rc = __copy_user_nocache(dst, src, size, 0);
131 
132 	/*
133 	 * __copy_user_nocache() uses non-temporal stores for the bulk
134 	 * of the transfer, but we need to manually flush if the
135 	 * transfer is unaligned. A cached memory copy is used when
136 	 * destination or size is not naturally aligned. That is:
137 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
138 	 *   - Require 4-byte alignment when size is 4 bytes.
139 	 */
140 	if (size < 8) {
141 		if (!IS_ALIGNED(dest, 4) || size != 4)
142 			clean_cache_range(dst, size);
143 	} else {
144 		if (!IS_ALIGNED(dest, 8)) {
145 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
146 			clean_cache_range(dst, 1);
147 		}
148 
149 		flushed = dest - (unsigned long) dst;
150 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
151 			clean_cache_range(dst + size - 1, 1);
152 	}
153 
154 	return rc;
155 }
156 
memcpy_flushcache(void * _dst,const void * _src,size_t size)157 void memcpy_flushcache(void *_dst, const void *_src, size_t size)
158 {
159 	unsigned long dest = (unsigned long) _dst;
160 	unsigned long source = (unsigned long) _src;
161 
162 	/* cache copy and flush to align dest */
163 	if (!IS_ALIGNED(dest, 8)) {
164 		unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
165 
166 		memcpy((void *) dest, (void *) source, len);
167 		clean_cache_range((void *) dest, len);
168 		dest += len;
169 		source += len;
170 		size -= len;
171 		if (!size)
172 			return;
173 	}
174 
175 	/* 4x8 movnti loop */
176 	while (size >= 32) {
177 		asm("movq    (%0), %%r8\n"
178 		    "movq   8(%0), %%r9\n"
179 		    "movq  16(%0), %%r10\n"
180 		    "movq  24(%0), %%r11\n"
181 		    "movnti  %%r8,   (%1)\n"
182 		    "movnti  %%r9,  8(%1)\n"
183 		    "movnti %%r10, 16(%1)\n"
184 		    "movnti %%r11, 24(%1)\n"
185 		    :: "r" (source), "r" (dest)
186 		    : "memory", "r8", "r9", "r10", "r11");
187 		dest += 32;
188 		source += 32;
189 		size -= 32;
190 	}
191 
192 	/* 1x8 movnti loop */
193 	while (size >= 8) {
194 		asm("movq    (%0), %%r8\n"
195 		    "movnti  %%r8,   (%1)\n"
196 		    :: "r" (source), "r" (dest)
197 		    : "memory", "r8");
198 		dest += 8;
199 		source += 8;
200 		size -= 8;
201 	}
202 
203 	/* 1x4 movnti loop */
204 	while (size >= 4) {
205 		asm("movl    (%0), %%r8d\n"
206 		    "movnti  %%r8d,   (%1)\n"
207 		    :: "r" (source), "r" (dest)
208 		    : "memory", "r8");
209 		dest += 4;
210 		source += 4;
211 		size -= 4;
212 	}
213 
214 	/* cache copy for remaining bytes */
215 	if (size) {
216 		memcpy((void *) dest, (void *) source, size);
217 		clean_cache_range((void *) dest, size);
218 	}
219 }
220 EXPORT_SYMBOL_GPL(memcpy_flushcache);
221 
memcpy_page_flushcache(char * to,struct page * page,size_t offset,size_t len)222 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
223 		size_t len)
224 {
225 	char *from = kmap_atomic(page);
226 
227 	memcpy_flushcache(to, from + offset, len);
228 	kunmap_atomic(from);
229 }
230 #endif
231