1/*
2 * uaccess.h: User space memore access functions.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7#ifndef _ASM_UACCESS_H
8#define _ASM_UACCESS_H
9
10#ifdef __KERNEL__
11#include <linux/compiler.h>
12#include <linux/sched.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#endif
16
17#ifndef __ASSEMBLY__
18
19#include <asm/processor.h>
20
21#define ARCH_HAS_SORT_EXTABLE
22#define ARCH_HAS_SEARCH_EXTABLE
23
24/* Sparc is not segmented, however we need to be able to fool access_ok()
25 * when doing system calls from kernel mode legitimately.
26 *
27 * "For historical reasons, these macros are grossly misnamed." -Linus
28 */
29
30#define KERNEL_DS   ((mm_segment_t) { 0 })
31#define USER_DS     ((mm_segment_t) { -1 })
32
33#define VERIFY_READ	0
34#define VERIFY_WRITE	1
35
36#define get_ds()	(KERNEL_DS)
37#define get_fs()	(current->thread.current_ds)
38#define set_fs(val)	((current->thread.current_ds) = (val))
39
40#define segment_eq(a, b) ((a).seg == (b).seg)
41
42/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
43 * can be fairly lightweight.
44 * No one can read/write anything from userland in the kernel space by setting
45 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
46 */
47#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
48#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
49#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
50#define access_ok(type, addr, size) \
51	({ (void)(type); __access_ok((unsigned long)(addr), size); })
52
53/*
54 * The exception table consists of pairs of addresses: the first is the
55 * address of an instruction that is allowed to fault, and the second is
56 * the address at which the program should continue.  No registers are
57 * modified, so it is entirely up to the continuation code to figure out
58 * what to do.
59 *
60 * All the routines below use bits of fixup code that are out of line
61 * with the main instruction path.  This means when everything is well,
62 * we don't even have to jump over them.  Further, they do not intrude
63 * on our cache or tlb entries.
64 *
65 * There is a special way how to put a range of potentially faulting
66 * insns (like twenty ldd/std's with now intervening other instructions)
67 * You specify address of first in insn and 0 in fixup and in the next
68 * exception_table_entry you specify last potentially faulting insn + 1
69 * and in fixup the routine which should handle the fault.
70 * That fixup code will get
71 * (faulting_insn_address - first_insn_in_the_range_address)/4
72 * in %g2 (ie. index of the faulting instruction in the range).
73 */
74
75struct exception_table_entry
76{
77        unsigned long insn, fixup;
78};
79
80/* Returns 0 if exception not found and fixup otherwise.  */
81unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
82
83void __ret_efault(void);
84
85/* Uh, these should become the main single-value transfer routines..
86 * They automatically use the right size if we just have the right
87 * pointer type..
88 *
89 * This gets kind of ugly. We want to return _two_ values in "get_user()"
90 * and yet we don't want to do any pointers, because that is too much
91 * of a performance impact. Thus we have a few rather ugly macros here,
92 * and hide all the ugliness from the user.
93 */
94#define put_user(x, ptr) ({ \
95	unsigned long __pu_addr = (unsigned long)(ptr); \
96	__chk_user_ptr(ptr); \
97	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
98})
99
100#define get_user(x, ptr) ({ \
101	unsigned long __gu_addr = (unsigned long)(ptr); \
102	__chk_user_ptr(ptr); \
103	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
104})
105
106/*
107 * The "__xxx" versions do not do address space checking, useful when
108 * doing multiple accesses to the same area (the user has to do the
109 * checks by hand with "access_ok()")
110 */
111#define __put_user(x, ptr) \
112	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
113#define __get_user(x, ptr) \
114    __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
115
116struct __large_struct { unsigned long buf[100]; };
117#define __m(x) ((struct __large_struct __user *)(x))
118
119#define __put_user_check(x, addr, size) ({ \
120	register int __pu_ret; \
121	if (__access_ok(addr, size)) { \
122		switch (size) { \
123		case 1: \
124			__put_user_asm(x, b, addr, __pu_ret); \
125			break; \
126		case 2: \
127			__put_user_asm(x, h, addr, __pu_ret); \
128			break; \
129		case 4: \
130			__put_user_asm(x, , addr, __pu_ret); \
131			break; \
132		case 8: \
133			__put_user_asm(x, d, addr, __pu_ret); \
134			break; \
135		default: \
136			__pu_ret = __put_user_bad(); \
137			break; \
138		} \
139	} else { \
140		__pu_ret = -EFAULT; \
141	} \
142	__pu_ret; \
143})
144
145#define __put_user_nocheck(x, addr, size) ({			\
146	register int __pu_ret;					\
147	switch (size) {						\
148	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
149	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
150	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
151	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
152	default: __pu_ret = __put_user_bad(); break;		\
153	} \
154	__pu_ret; \
155})
156
157#define __put_user_asm(x, size, addr, ret)				\
158__asm__ __volatile__(							\
159		"/* Put user asm, inline. */\n"				\
160	"1:\t"	"st"#size " %1, %2\n\t"					\
161		"clr	%0\n"						\
162	"2:\n\n\t"							\
163		".section .fixup,#alloc,#execinstr\n\t"			\
164		".align	4\n"						\
165	"3:\n\t"							\
166		"b	2b\n\t"						\
167		" mov	%3, %0\n\t"					\
168		".previous\n\n\t"					\
169		".section __ex_table,#alloc\n\t"			\
170		".align	4\n\t"						\
171		".word	1b, 3b\n\t"					\
172		".previous\n\n\t"					\
173	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
174		 "i" (-EFAULT))
175
176int __put_user_bad(void);
177
178#define __get_user_check(x, addr, size, type) ({ \
179	register int __gu_ret; \
180	register unsigned long __gu_val; \
181	if (__access_ok(addr, size)) { \
182		switch (size) { \
183		case 1: \
184			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
185			break; \
186		case 2: \
187			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
188			break; \
189		case 4: \
190			__get_user_asm(__gu_val, , addr, __gu_ret); \
191			break; \
192		case 8: \
193			__get_user_asm(__gu_val, d, addr, __gu_ret); \
194			break; \
195		default: \
196			__gu_val = 0; \
197			__gu_ret = __get_user_bad(); \
198			break; \
199		} \
200	 } else { \
201		 __gu_val = 0; \
202		 __gu_ret = -EFAULT; \
203	} \
204	x = (__force type) __gu_val; \
205	__gu_ret; \
206})
207
208#define __get_user_check_ret(x, addr, size, type, retval) ({ \
209	register unsigned long __gu_val __asm__ ("l1"); \
210	if (__access_ok(addr, size)) { \
211		switch (size) { \
212		case 1: \
213			__get_user_asm_ret(__gu_val, ub, addr, retval); \
214			break; \
215		case 2: \
216			__get_user_asm_ret(__gu_val, uh, addr, retval); \
217			break; \
218		case 4: \
219			__get_user_asm_ret(__gu_val, , addr, retval); \
220			break; \
221		case 8: \
222			__get_user_asm_ret(__gu_val, d, addr, retval); \
223			break; \
224		default: \
225			if (__get_user_bad()) \
226				return retval; \
227		} \
228		x = (__force type) __gu_val; \
229	} else \
230		return retval; \
231})
232
233#define __get_user_nocheck(x, addr, size, type) ({			\
234	register int __gu_ret;						\
235	register unsigned long __gu_val;				\
236	switch (size) {							\
237	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
238	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
239	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
240	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
241	default:							\
242		__gu_val = 0;						\
243		__gu_ret = __get_user_bad();				\
244		break;							\
245	}								\
246	x = (__force type) __gu_val;					\
247	__gu_ret;							\
248})
249
250#define __get_user_nocheck_ret(x, addr, size, type, retval) ({		\
251	register unsigned long __gu_val __asm__ ("l1");			\
252	switch (size) {							\
253	case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break;	\
254	case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break;	\
255	case 4: __get_user_asm_ret(__gu_val, , addr, retval);  break;	\
256	case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break;	\
257	default:							\
258		if (__get_user_bad())					\
259			return retval;					\
260	}								\
261	x = (__force type) __gu_val;					\
262})
263
264#define __get_user_asm(x, size, addr, ret)				\
265__asm__ __volatile__(							\
266		"/* Get user asm, inline. */\n"				\
267	"1:\t"	"ld"#size " %2, %1\n\t"					\
268		"clr	%0\n"						\
269	"2:\n\n\t"							\
270		".section .fixup,#alloc,#execinstr\n\t"			\
271		".align	4\n"						\
272	"3:\n\t"							\
273		"clr	%1\n\t"						\
274		"b	2b\n\t"						\
275		" mov	%3, %0\n\n\t"					\
276		".previous\n\t"						\
277		".section __ex_table,#alloc\n\t"			\
278		".align	4\n\t"						\
279		".word	1b, 3b\n\n\t"					\
280		".previous\n\t"						\
281	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
282		 "i" (-EFAULT))
283
284#define __get_user_asm_ret(x, size, addr, retval)			\
285if (__builtin_constant_p(retval) && retval == -EFAULT)			\
286	__asm__ __volatile__(						\
287			"/* Get user asm ret, inline. */\n"		\
288		"1:\t"	"ld"#size " %1, %0\n\n\t"			\
289			".section __ex_table,#alloc\n\t"		\
290			".align	4\n\t"					\
291			".word	1b,__ret_efault\n\n\t"			\
292			".previous\n\t"					\
293		       : "=&r" (x) : "m" (*__m(addr)));			\
294else									\
295	__asm__ __volatile__(						\
296			"/* Get user asm ret, inline. */\n"		\
297		"1:\t"	"ld"#size " %1, %0\n\n\t"			\
298			".section .fixup,#alloc,#execinstr\n\t"		\
299			".align	4\n"					\
300		"3:\n\t"						\
301			"ret\n\t"					\
302			" restore %%g0, %2, %%o0\n\n\t"			\
303			".previous\n\t"					\
304			".section __ex_table,#alloc\n\t"		\
305			".align	4\n\t"					\
306			".word	1b, 3b\n\n\t"				\
307			".previous\n\t"					\
308		       : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
309
310int __get_user_bad(void);
311
312unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
313
314static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
315{
316	if (n && __access_ok((unsigned long) to, n))
317		return __copy_user(to, (__force void __user *) from, n);
318	else
319		return n;
320}
321
322static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
323{
324	return __copy_user(to, (__force void __user *) from, n);
325}
326
327static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
328{
329	if (n && __access_ok((unsigned long) from, n))
330		return __copy_user((__force void __user *) to, from, n);
331	else
332		return n;
333}
334
335static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
336{
337	return __copy_user((__force void __user *) to, from, n);
338}
339
340#define __copy_to_user_inatomic __copy_to_user
341#define __copy_from_user_inatomic __copy_from_user
342
343static inline unsigned long __clear_user(void __user *addr, unsigned long size)
344{
345	unsigned long ret;
346
347	__asm__ __volatile__ (
348		".section __ex_table,#alloc\n\t"
349		".align 4\n\t"
350		".word 1f,3\n\t"
351		".previous\n\t"
352		"mov %2, %%o1\n"
353		"1:\n\t"
354		"call __bzero\n\t"
355		" mov %1, %%o0\n\t"
356		"mov %%o0, %0\n"
357		: "=r" (ret) : "r" (addr), "r" (size) :
358		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
359		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
360
361	return ret;
362}
363
364static inline unsigned long clear_user(void __user *addr, unsigned long n)
365{
366	if (n && __access_ok((unsigned long) addr, n))
367		return __clear_user(addr, n);
368	else
369		return n;
370}
371
372__must_check long strlen_user(const char __user *str);
373__must_check long strnlen_user(const char __user *str, long n);
374
375#endif  /* __ASSEMBLY__ */
376
377#endif /* _ASM_UACCESS_H */
378