• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 
13 unsigned long __must_check __copy_to_user_ll
14 		(void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 		(void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 		(void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 		(void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 		(void *to, const void __user *from, unsigned long n);
23 
24 /**
25  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26  * @to:   Destination address, in user space.
27  * @from: Source address, in kernel space.
28  * @n:    Number of bytes to copy.
29  *
30  * Context: User context only.
31  *
32  * Copy data from kernel space to user space.  Caller must check
33  * the specified block with access_ok() before calling this function.
34  * The caller should also make sure he pins the user space address
35  * so that we don't result in page fault and sleep.
36  *
37  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
38  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39  * If a store crosses a page boundary and gets a fault, the x86 will not write
40  * anything, so this is accurate.
41  */
42 
43 static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user * to,const void * from,unsigned long n)44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45 {
46 	check_object_size(from, n, true);
47 	if (__builtin_constant_p(n)) {
48 		unsigned long ret;
49 
50 		switch (n) {
51 		case 1:
52 			__uaccess_begin();
53 			__put_user_size(*(u8 *)from, (u8 __user *)to,
54 					1, ret, 1);
55 			__uaccess_end();
56 			return ret;
57 		case 2:
58 			__uaccess_begin();
59 			__put_user_size(*(u16 *)from, (u16 __user *)to,
60 					2, ret, 2);
61 			__uaccess_end();
62 			return ret;
63 		case 4:
64 			__uaccess_begin();
65 			__put_user_size(*(u32 *)from, (u32 __user *)to,
66 					4, ret, 4);
67 			__uaccess_end();
68 			return ret;
69 		}
70 	}
71 	return __copy_to_user_ll(to, from, n);
72 }
73 
74 /**
75  * __copy_to_user: - Copy a block of data into user space, with less checking.
76  * @to:   Destination address, in user space.
77  * @from: Source address, in kernel space.
78  * @n:    Number of bytes to copy.
79  *
80  * Context: User context only.  This function may sleep.
81  *
82  * Copy data from kernel space to user space.  Caller must check
83  * the specified block with access_ok() before calling this function.
84  *
85  * Returns number of bytes that could not be copied.
86  * On success, this will be zero.
87  */
88 static __always_inline unsigned long __must_check
__copy_to_user(void __user * to,const void * from,unsigned long n)89 __copy_to_user(void __user *to, const void *from, unsigned long n)
90 {
91 	might_fault();
92 	return __copy_to_user_inatomic(to, from, n);
93 }
94 
95 static __always_inline unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)96 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
97 {
98 	/* Avoid zeroing the tail if the copy fails..
99 	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
100 	 * but as the zeroing behaviour is only significant when n is not
101 	 * constant, that shouldn't be a problem.
102 	 */
103 	if (__builtin_constant_p(n)) {
104 		unsigned long ret;
105 
106 		switch (n) {
107 		case 1:
108 			__uaccess_begin();
109 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
110 			__uaccess_end();
111 			return ret;
112 		case 2:
113 			__uaccess_begin();
114 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
115 			__uaccess_end();
116 			return ret;
117 		case 4:
118 			__uaccess_begin();
119 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
120 			__uaccess_end();
121 			return ret;
122 		}
123 	}
124 	return __copy_from_user_ll_nozero(to, from, n);
125 }
126 
127 /**
128  * __copy_from_user: - Copy a block of data from user space, with less checking.
129  * @to:   Destination address, in kernel space.
130  * @from: Source address, in user space.
131  * @n:    Number of bytes to copy.
132  *
133  * Context: User context only.  This function may sleep.
134  *
135  * Copy data from user space to kernel space.  Caller must check
136  * the specified block with access_ok() before calling this function.
137  *
138  * Returns number of bytes that could not be copied.
139  * On success, this will be zero.
140  *
141  * If some data could not be copied, this function will pad the copied
142  * data to the requested size using zero bytes.
143  *
144  * An alternate version - __copy_from_user_inatomic() - may be called from
145  * atomic context and will fail rather than sleep.  In this case the
146  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
147  * for explanation of why this is needed.
148  */
149 static __always_inline unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)150 __copy_from_user(void *to, const void __user *from, unsigned long n)
151 {
152 	might_fault();
153 	check_object_size(to, n, false);
154 	if (__builtin_constant_p(n)) {
155 		unsigned long ret;
156 
157 		switch (n) {
158 		case 1:
159 			__uaccess_begin();
160 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
161 			__uaccess_end();
162 			return ret;
163 		case 2:
164 			__uaccess_begin();
165 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
166 			__uaccess_end();
167 			return ret;
168 		case 4:
169 			__uaccess_begin();
170 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
171 			__uaccess_end();
172 			return ret;
173 		}
174 	}
175 	return __copy_from_user_ll(to, from, n);
176 }
177 
__copy_from_user_nocache(void * to,const void __user * from,unsigned long n)178 static __always_inline unsigned long __copy_from_user_nocache(void *to,
179 				const void __user *from, unsigned long n)
180 {
181 	might_fault();
182 	if (__builtin_constant_p(n)) {
183 		unsigned long ret;
184 
185 		switch (n) {
186 		case 1:
187 			__uaccess_begin();
188 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
189 			__uaccess_end();
190 			return ret;
191 		case 2:
192 			__uaccess_begin();
193 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
194 			__uaccess_end();
195 			return ret;
196 		case 4:
197 			__uaccess_begin();
198 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
199 			__uaccess_end();
200 			return ret;
201 		}
202 	}
203 	return __copy_from_user_ll_nocache(to, from, n);
204 }
205 
206 static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void * to,const void __user * from,unsigned long n)207 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
208 				  unsigned long n)
209 {
210        return __copy_from_user_ll_nocache_nozero(to, from, n);
211 }
212 
213 #endif /* _ASM_X86_UACCESS_32_H */
214