1/*
2 * BK Id: SCCS/s.uaccess.h 1.10 05/21/02 21:44:32 paulus
3 */
4#ifdef __KERNEL__
5#ifndef _PPC_UACCESS_H
6#define _PPC_UACCESS_H
7
8#ifndef __ASSEMBLY__
9#include <linux/sched.h>
10#include <linux/errno.h>
11#include <asm/processor.h>
12
13#define VERIFY_READ	0
14#define VERIFY_WRITE	1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not.  If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 */
23
24#define KERNEL_DS	((mm_segment_t) { 0 })
25#define USER_DS		((mm_segment_t) { 1 })
26
27#define get_ds()	(KERNEL_DS)
28#define get_fs()	(current->thread.fs)
29#define set_fs(val)	(current->thread.fs = (val))
30
31#define segment_eq(a,b)	((a).seg == (b).seg)
32
33#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
34#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
35#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
36#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
37
38extern inline int verify_area(int type, const void * addr, unsigned long size)
39{
40	return access_ok(type,addr,size) ? 0 : -EFAULT;
41}
42
43
44/*
45 * The exception table consists of pairs of addresses: the first is the
46 * address of an instruction that is allowed to fault, and the second is
47 * the address at which the program should continue.  No registers are
48 * modified, so it is entirely up to the continuation code to figure out
49 * what to do.
50 *
51 * All the routines below use bits of fixup code that are out of line
52 * with the main instruction path.  This means when everything is well,
53 * we don't even have to jump over them.  Further, they do not intrude
54 * on our cache or tlb entries.
55 */
56
57struct exception_table_entry
58{
59	unsigned long insn, fixup;
60};
61
62/* Returns 0 if exception not found and fixup otherwise.  */
63extern unsigned long search_exception_table(unsigned long);
64extern void sort_exception_table(void);
65
66/*
67 * These are the main single-value transfer routines.  They automatically
68 * use the right size if we just have the right pointer type.
69 *
70 * This gets kind of ugly. We want to return _two_ values in "get_user()"
71 * and yet we don't want to do any pointers, because that is too much
72 * of a performance impact. Thus we have a few rather ugly macros here,
73 * and hide all the uglyness from the user.
74 *
75 * The "__xxx" versions of the user access functions are versions that
76 * do not verify the address space, that must have been done previously
77 * with a separate "access_ok()" call (this is used when we do multiple
78 * accesses to the same area of user memory).
79 *
80 * As we use the same address space for kernel and user data on the
81 * PowerPC, we can just do these as direct assignments.  (Of course, the
82 * exception handling means that it's no longer "just"...)
83 */
84#define get_user(x,ptr) \
85  __get_user_check((x),(ptr),sizeof(*(ptr)))
86#define put_user(x,ptr) \
87  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88
89#define __get_user(x,ptr) \
90  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
91#define __put_user(x,ptr) \
92  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
93
94extern long __put_user_bad(void);
95
96#define __put_user_nocheck(x,ptr,size)			\
97({							\
98	long __pu_err;					\
99	__put_user_size((x),(ptr),(size),__pu_err);	\
100	__pu_err;					\
101})
102
103#define __put_user_check(x,ptr,size)				\
104({								\
105	long __pu_err = -EFAULT;				\
106	__typeof__(*(ptr)) *__pu_addr = (ptr);			\
107	if (access_ok(VERIFY_WRITE,__pu_addr,size))		\
108		__put_user_size((x),__pu_addr,(size),__pu_err);	\
109	__pu_err;						\
110})
111
112#define __put_user_size(x,ptr,size,retval)			\
113do {								\
114	retval = 0;						\
115	switch (size) {						\
116	  case 1: __put_user_asm(x,ptr,retval,"stb"); break;	\
117	  case 2: __put_user_asm(x,ptr,retval,"sth"); break;	\
118	  case 4: __put_user_asm(x,ptr,retval,"stw"); break;	\
119	  case 8: __put_user_asm2(x,ptr,retval); break;		\
120	  default: __put_user_bad();				\
121	}							\
122} while (0)
123
124struct __large_struct { unsigned long buf[100]; };
125#define __m(x) (*(struct __large_struct *)(x))
126
127/*
128 * We don't tell gcc that we are accessing memory, but this is OK
129 * because we do not write to any memory gcc knows about, so there
130 * are no aliasing issues.
131 */
132#define __put_user_asm(x, addr, err, op)			\
133	__asm__ __volatile__(					\
134		"1:	"op" %1,0(%2)\n"			\
135		"2:\n"						\
136		".section .fixup,\"ax\"\n"			\
137		"3:	li %0,%3\n"				\
138		"	b 2b\n"					\
139		".previous\n"					\
140		".section __ex_table,\"a\"\n"			\
141		"	.align 2\n"				\
142		"	.long 1b,3b\n"				\
143		".previous"					\
144		: "=r"(err)					\
145		: "r"(x), "b"(addr), "i"(-EFAULT), "0"(err))
146
147#define __put_user_asm2(x, addr, err)				\
148	__asm__ __volatile__(					\
149		"1:	stw %1,0(%2)\n"				\
150		"2:	stw %1+1,4(%2)\n"				\
151		"3:\n"						\
152		".section .fixup,\"ax\"\n"			\
153		"4:	li %0,%3\n"				\
154		"	b 3b\n"					\
155		".previous\n"					\
156		".section __ex_table,\"a\"\n"			\
157		"	.align 2\n"				\
158		"	.long 1b,4b\n"				\
159		"	.long 2b,4b\n"				\
160		".previous"					\
161		: "=r"(err)					\
162		: "r"(x), "b"(addr), "i"(-EFAULT), "0"(err))
163
164#define __get_user_nocheck(x,ptr,size)				\
165({								\
166	long __gu_err, __gu_val;				\
167	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
168	(x) = (__typeof__(*(ptr)))__gu_val;			\
169	__gu_err;						\
170})
171
172#define __get_user_check(x,ptr,size)					\
173({									\
174	long __gu_err = -EFAULT, __gu_val = 0;				\
175	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
176	if (access_ok(VERIFY_READ,__gu_addr,size))			\
177		__get_user_size(__gu_val,__gu_addr,(size),__gu_err);	\
178	(x) = (__typeof__(*(ptr)))__gu_val;				\
179	__gu_err;							\
180})
181
182extern long __get_user_bad(void);
183
184#define __get_user_size(x,ptr,size,retval)			\
185do {								\
186	retval = 0;						\
187	switch (size) {						\
188	  case 1: __get_user_asm(x,ptr,retval,"lbz"); break;	\
189	  case 2: __get_user_asm(x,ptr,retval,"lhz"); break;	\
190	  case 4: __get_user_asm(x,ptr,retval,"lwz"); break;	\
191	  case 8: __get_user_asm2(x, ptr, retval);		\
192	  default: (x) = __get_user_bad();			\
193	}							\
194} while (0)
195
196#define __get_user_asm(x, addr, err, op)		\
197	__asm__ __volatile__(				\
198		"1:	"op" %1,0(%2)\n"		\
199		"2:\n"					\
200		".section .fixup,\"ax\"\n"		\
201		"3:	li %0,%3\n"			\
202		"	li %1,0\n"			\
203		"	b 2b\n"				\
204		".previous\n"				\
205		".section __ex_table,\"a\"\n"		\
206		"	.align 2\n"			\
207		"	.long 1b,3b\n"			\
208		".previous"				\
209		: "=r"(err), "=r"(x)			\
210		: "b"(addr), "i"(-EFAULT), "0"(err))
211
212#define __get_user_asm2(x, addr, err)			\
213	__asm__ __volatile__(				\
214		"1:	lwz %1,0(%2)\n"			\
215		"2:	lwz %1+1,4(%2)\n"		\
216		"3:\n"					\
217		".section .fixup,\"ax\"\n"		\
218		"4:	li %0,%3\n"			\
219		"	li %1,0\n"			\
220		"	li %1+1,0\n"			\
221		"	b 3b\n"				\
222		".previous\n"				\
223		".section __ex_table,\"a\"\n"		\
224		"	.align 2\n"			\
225		"	.long 1b,4b\n"			\
226		"	.long 2b,4b\n"			\
227		".previous"				\
228		: "=r"(err), "=&r"(x)			\
229		: "b"(addr), "i"(-EFAULT), "0"(err))
230
231/* more complex routines */
232
233extern int __copy_tofrom_user(void *to, const void *from, unsigned long size);
234
235extern inline unsigned long
236copy_from_user(void *to, const void *from, unsigned long n)
237{
238	unsigned long over;
239
240	if (access_ok(VERIFY_READ, from, n))
241		return __copy_tofrom_user(to, from, n);
242	if ((unsigned long)from < TASK_SIZE) {
243		over = (unsigned long)from + n - TASK_SIZE;
244		return __copy_tofrom_user(to, from, n - over) + over;
245	}
246	return n;
247}
248
249extern inline unsigned long
250copy_to_user(void *to, const void *from, unsigned long n)
251{
252	unsigned long over;
253
254	if (access_ok(VERIFY_WRITE, to, n))
255		return __copy_tofrom_user(to, from, n);
256	if ((unsigned long)to < TASK_SIZE) {
257		over = (unsigned long)to + n - TASK_SIZE;
258		return __copy_tofrom_user(to, from, n - over) + over;
259	}
260	return n;
261}
262
263#define __copy_from_user(to, from, size) \
264	__copy_tofrom_user((to), (from), (size))
265#define __copy_to_user(to, from, size) \
266	__copy_tofrom_user((to), (from), (size))
267
268extern unsigned long __clear_user(void *addr, unsigned long size);
269
270extern inline unsigned long
271clear_user(void *addr, unsigned long size)
272{
273	if (access_ok(VERIFY_WRITE, addr, size))
274		return __clear_user(addr, size);
275	if ((unsigned long)addr < TASK_SIZE) {
276		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
277		return __clear_user(addr, size - over) + over;
278	}
279	return size;
280}
281
282extern int __strncpy_from_user(char *dst, const char *src, long count);
283
284extern inline long
285strncpy_from_user(char *dst, const char *src, long count)
286{
287	if (access_ok(VERIFY_READ, src, 1))
288		return __strncpy_from_user(dst, src, count);
289	return -EFAULT;
290}
291
292/*
293 * Return the size of a string (including the ending 0)
294 *
295 * Return 0 for error
296 */
297
298extern int __strnlen_user(const char *str, long len, unsigned long top);
299
300/*
301 * Returns the length of the string at str (including the null byte),
302 * or 0 if we hit a page we can't access,
303 * or something > len if we didn't find a null byte.
304 *
305 * The `top' parameter to __strnlen_user is to make sure that
306 * we can never overflow from the user area into kernel space.
307 */
308extern __inline__ int strnlen_user(const char *str, long len)
309{
310	unsigned long top = __kernel_ok? ~0UL: TASK_SIZE - 1;
311
312	if ((unsigned long)str > top)
313		return 0;
314	return __strnlen_user(str, len, top);
315}
316
317#define strlen_user(str)	strnlen_user((str), 0x7ffffffe)
318
319#endif  /* __ASSEMBLY__ */
320
321#endif	/* _PPC_UACCESS_H */
322#endif /* __KERNEL__ */
323