• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <asm/page.h>
8 #include <asm/cache.h>
9 #include <asm/errno.h>
10 #include <asm-generic/uaccess-unaligned.h>
11 
12 #include <linux/bug.h>
13 #include <linux/string.h>
14 #include <linux/thread_info.h>
15 
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
18 
19 #define KERNEL_DS	((mm_segment_t){0})
20 #define USER_DS 	((mm_segment_t){1})
21 
22 #define segment_eq(a, b) ((a).seg == (b).seg)
23 
24 #define get_ds()	(KERNEL_DS)
25 #define get_fs()	(current_thread_info()->addr_limit)
26 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
27 
28 /*
29  * Note that since kernel addresses are in a separate address space on
30  * parisc, we don't need to do anything for access_ok().
31  * We just let the page fault handler do the right thing. This also means
32  * that put_user is the same as __put_user, etc.
33  */
34 
access_ok(int type,const void __user * addr,unsigned long size)35 static inline long access_ok(int type, const void __user * addr,
36 		unsigned long size)
37 {
38 	return 1;
39 }
40 
41 #define put_user __put_user
42 #define get_user __get_user
43 
44 #if !defined(CONFIG_64BIT)
45 #define LDD_USER(val, ptr)	__get_user_asm64(val, ptr)
46 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
47 #else
48 #define LDD_USER(val, ptr)	__get_user_asm(val, "ldd", ptr)
49 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
50 #endif
51 
52 /*
53  * The exception table contains two values: the first is the relative offset to
54  * the address of the instruction that is allowed to fault, and the second is
55  * the relative offset to the address of the fixup routine. Since relative
56  * addresses are used, 32bit values are sufficient even on 64bit kernel.
57  */
58 
59 #define ARCH_HAS_RELATIVE_EXTABLE
60 struct exception_table_entry {
61 	int insn;	/* relative address of insn that is allowed to fault. */
62 	int fixup;	/* relative address of fixup routine */
63 };
64 
65 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
66 	".section __ex_table,\"aw\"\n"			   \
67 	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
68 	".previous\n"
69 
70 /*
71  * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
72  * (with lowest bit set) for which the fault handler in fixup_exception() will
73  * load -EFAULT into %r8 for a read or write fault, and zeroes the target
74  * register in case of a read fault in get_user().
75  */
76 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
77 	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
78 
79 /*
80  * The page fault handler stores, in a per-cpu area, the following information
81  * if a fixup routine is available.
82  */
83 struct exception_data {
84 	unsigned long fault_ip;
85 	unsigned long fault_gp;
86 	unsigned long fault_space;
87 	unsigned long fault_addr;
88 };
89 
90 /*
91  * load_sr2() preloads the space register %%sr2 - based on the value of
92  * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
93  * is 0), or with the current value of %%sr3 to access user space (USER_DS)
94  * memory. The following __get_user_asm() and __put_user_asm() functions have
95  * %%sr2 hard-coded to access the requested memory.
96  */
97 #define load_sr2() \
98 	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
99 		" mfsp %%sr3,%0\n\t"		\
100 		" mtsp %0,%%sr2\n\t"		\
101 		: : "r"(get_fs()) : )
102 
103 #define __get_user_internal(val, ptr)			\
104 ({							\
105 	register long __gu_err __asm__ ("r8") = 0;	\
106 							\
107 	switch (sizeof(*(ptr))) {			\
108 	case 1: __get_user_asm(val, "ldb", ptr); break;	\
109 	case 2: __get_user_asm(val, "ldh", ptr); break; \
110 	case 4: __get_user_asm(val, "ldw", ptr); break; \
111 	case 8: LDD_USER(val, ptr); break;		\
112 	default: BUILD_BUG();				\
113 	}						\
114 							\
115 	__gu_err;					\
116 })
117 
118 #define __get_user(val, ptr)				\
119 ({							\
120 	load_sr2();					\
121 	__get_user_internal(val, ptr);			\
122 })
123 
124 #define __get_user_asm(val, ldx, ptr)			\
125 {							\
126 	register long __gu_val;				\
127 							\
128 	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
129 		"9:\n"					\
130 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
131 		: "=r"(__gu_val), "=r"(__gu_err)        \
132 		: "r"(ptr), "1"(__gu_err));		\
133 							\
134 	(val) = (__force __typeof__(*(ptr))) __gu_val;	\
135 }
136 
137 #if !defined(CONFIG_64BIT)
138 
139 #define __get_user_asm64(val, ptr)			\
140 {							\
141 	union {						\
142 		unsigned long long	l;		\
143 		__typeof__(*(ptr))	t;		\
144 	} __gu_tmp;					\
145 							\
146 	__asm__("   copy %%r0,%R0\n"			\
147 		"1: ldw 0(%%sr2,%2),%0\n"		\
148 		"2: ldw 4(%%sr2,%2),%R0\n"		\
149 		"9:\n"					\
150 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
151 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
152 		: "=&r"(__gu_tmp.l), "=r"(__gu_err)	\
153 		: "r"(ptr), "1"(__gu_err));		\
154 							\
155 	(val) = __gu_tmp.t;				\
156 }
157 
158 #endif /* !defined(CONFIG_64BIT) */
159 
160 
161 #define __put_user_internal(x, ptr)				\
162 ({								\
163 	register long __pu_err __asm__ ("r8") = 0;      	\
164         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
165 								\
166 	switch (sizeof(*(ptr))) {				\
167 	case 1: __put_user_asm("stb", __x, ptr); break;		\
168 	case 2: __put_user_asm("sth", __x, ptr); break;		\
169 	case 4: __put_user_asm("stw", __x, ptr); break;		\
170 	case 8: STD_USER(__x, ptr); break;			\
171 	default: BUILD_BUG();					\
172 	}							\
173 								\
174 	__pu_err;						\
175 })
176 
177 #define __put_user(x, ptr)					\
178 ({								\
179 	load_sr2();						\
180 	__put_user_internal(x, ptr);				\
181 })
182 
183 
184 /*
185  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
186  * instead of writing. This is because they do not write to any memory
187  * gcc knows about, so there are no aliasing issues. These macros must
188  * also be aware that fixups are executed in the context of the fault,
189  * and any registers used there must be listed as clobbers.
190  * r8 is already listed as err.
191  */
192 
193 #define __put_user_asm(stx, x, ptr)                         \
194 	__asm__ __volatile__ (                              \
195 		"1: " stx " %2,0(%%sr2,%1)\n"		    \
196 		"9:\n"					    \
197 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
198 		: "=r"(__pu_err)                            \
199 		: "r"(ptr), "r"(x), "0"(__pu_err))
200 
201 
202 #if !defined(CONFIG_64BIT)
203 
204 #define __put_user_asm64(__val, ptr) do {	    	    \
205 	__asm__ __volatile__ (				    \
206 		"1: stw %2,0(%%sr2,%1)\n"		    \
207 		"2: stw %R2,4(%%sr2,%1)\n"		    \
208 		"9:\n"					    \
209 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
210 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
211 		: "=r"(__pu_err)                            \
212 		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
213 } while (0)
214 
215 #endif /* !defined(CONFIG_64BIT) */
216 
217 
218 /*
219  * Complex access routines -- external declarations
220  */
221 
222 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
223 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
224 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
225 extern long strncpy_from_user(char *, const char __user *, long);
226 extern unsigned lclear_user(void __user *, unsigned long);
227 extern long lstrnlen_user(const char __user *, long);
228 /*
229  * Complex access routines -- macros
230  */
231 #define user_addr_max() (~0UL)
232 
233 #define strnlen_user lstrnlen_user
234 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
235 #define clear_user lclear_user
236 #define __clear_user lclear_user
237 
238 unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
239 					  unsigned long len);
240 unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
241 					  unsigned long len);
242 unsigned long copy_in_user(void __user *dst, const void __user *src,
243 			   unsigned long len);
244 #define __copy_in_user copy_in_user
245 #define __copy_to_user_inatomic __copy_to_user
246 #define __copy_from_user_inatomic __copy_from_user
247 
248 extern void __compiletime_error("usercopy buffer size is too small")
249 __bad_copy_user(void);
250 
copy_user_overflow(int size,unsigned long count)251 static inline void copy_user_overflow(int size, unsigned long count)
252 {
253 	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
254 }
255 
256 static __always_inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)257 copy_from_user(void *to, const void __user *from, unsigned long n)
258 {
259 	int sz = __compiletime_object_size(to);
260 	unsigned long ret = n;
261 
262 	if (likely(sz < 0 || sz >= n)) {
263 		check_object_size(to, n, false);
264 		ret = __copy_from_user(to, from, n);
265 	} else if (!__builtin_constant_p(n))
266 		copy_user_overflow(sz, n);
267 	else
268 		__bad_copy_user();
269 
270 	if (unlikely(ret))
271 		memset(to + (n - ret), 0, ret);
272 
273 	return ret;
274 }
275 
276 static __always_inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)277 copy_to_user(void __user *to, const void *from, unsigned long n)
278 {
279 	int sz = __compiletime_object_size(from);
280 
281 	if (likely(sz < 0 || sz >= n)) {
282 		check_object_size(from, n, true);
283 		n = __copy_to_user(to, from, n);
284 	} else if (!__builtin_constant_p(n))
285 		copy_user_overflow(sz, n);
286 	else
287 		__bad_copy_user();
288 
289 	return n;
290 }
291 
292 struct pt_regs;
293 int fixup_exception(struct pt_regs *regs);
294 
295 #endif /* __PARISC_UACCESS_H */
296