1#ifndef _ASM_IA64_UACCESS_H
2#define _ASM_IA64_UACCESS_H
3
4/*
5 * This file defines various macros to transfer memory areas across
6 * the user/kernel boundary.  This needs to be done carefully because
7 * this code is executed in kernel mode and uses user-specified
8 * addresses.  Thus, we need to be careful not to let the user to
9 * trick us into accessing kernel memory that would normally be
10 * inaccessible.  This code is also fairly performance sensitive,
11 * so we want to spend as little time doing saftey checks as
12 * possible.
13 *
14 * To make matters a bit more interesting, these macros sometimes also
15 * called from within the kernel itself, in which case the address
16 * validity check must be skipped.  The get_fs() macro tells us what
17 * to do: if get_fs()==USER_DS, checking is performed, if
18 * get_fs()==KERNEL_DS, checking is bypassed.
19 *
20 * Note that even if the memory area specified by the user is in a
21 * valid address range, it is still possible that we'll get a page
22 * fault while accessing it.  This is handled by filling out an
23 * exception handler fixup entry for each instruction that has the
24 * potential to fault.  When such a fault occurs, the page fault
25 * handler checks to see whether the faulting instruction has a fixup
26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
27 * then resumes execution at the continuation point.
28 *
29 * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
30 * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
31 */
32
33#include <linux/errno.h>
34#include <linux/sched.h>
35
36#include <asm/pgtable.h>
37
38/*
39 * For historical reasons, the following macros are grossly misnamed:
40 */
41#define KERNEL_DS	((mm_segment_t) { ~0UL })		/* cf. access_ok() */
42#define USER_DS		((mm_segment_t) { TASK_SIZE-1 })	/* cf. access_ok() */
43
44#define VERIFY_READ	0
45#define VERIFY_WRITE	1
46
47#define get_ds()  (KERNEL_DS)
48#define get_fs()  (current->addr_limit)
49#define set_fs(x) (current->addr_limit = (x))
50
51#define segment_eq(a,b)	((a).seg == (b).seg)
52
53/*
54 * When accessing user memory, we need to make sure the entire area really is in
55 * user-level space.  In order to do this efficiently, we make sure that the page at
56 * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
57 * point inside the virtually mapped linear page table.
58 */
59#define __access_ok(addr,size,segment)	(((unsigned long) (addr)) <= (segment).seg		\
60	 && ((segment).seg == KERNEL_DS.seg || rgn_offset((unsigned long) (addr)) < RGN_MAP_LIMIT))
61#define access_ok(type,addr,size)	__access_ok((addr),(size),get_fs())
62
63static inline int
64verify_area (int type, const void *addr, unsigned long size)
65{
66	return access_ok(type,addr,size) ? 0 : -EFAULT;
67}
68
69/*
70 * These are the main single-value transfer routines.  They automatically
71 * use the right size if we just have the right pointer type.
72 *
73 * Careful to not
74 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
75 * (b) require any knowledge of processes at this stage
76 */
77#define put_user(x,ptr)	__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
78#define get_user(x,ptr)	__get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
79
80/*
81 * The "__xxx" versions do not do address space checking, useful when
82 * doing multiple accesses to the same area (the programmer has to do the
83 * checks by hand with "access_ok()")
84 */
85#define __put_user(x,ptr)	__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
86#define __get_user(x,ptr)	__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
87
88extern void __get_user_unknown (void);
89
90#define __get_user_nocheck(x,ptr,size)		\
91({						\
92	register long __gu_err asm ("r8") = 0;	\
93	register long __gu_val asm ("r9") = 0;	\
94	switch (size) {				\
95	  case 1: __get_user_8(ptr); break;	\
96	  case 2: __get_user_16(ptr); break;	\
97	  case 4: __get_user_32(ptr); break;	\
98	  case 8: __get_user_64(ptr); break;	\
99	  default: __get_user_unknown(); break;	\
100	}					\
101	(x) = (__typeof__(*(ptr))) __gu_val;	\
102	__gu_err;				\
103})
104
105#define __get_user_check(x,ptr,size,segment)			\
106({								\
107	register long __gu_err asm ("r8") = -EFAULT;		\
108	register long __gu_val asm ("r9") = 0;			\
109	const __typeof__(*(ptr)) *__gu_addr = (ptr);		\
110	if (__access_ok((long)__gu_addr,size,segment)) {	\
111		__gu_err = 0;					\
112		switch (size) {					\
113		  case 1: __get_user_8(__gu_addr); break;	\
114		  case 2: __get_user_16(__gu_addr); break;	\
115		  case 4: __get_user_32(__gu_addr); break;	\
116		  case 8: __get_user_64(__gu_addr); break;	\
117		  default: __get_user_unknown(); break;		\
118		}						\
119	}							\
120	(x) = (__typeof__(*(ptr))) __gu_val;			\
121	__gu_err;						\
122})
123
124struct __large_struct { unsigned long buf[100]; };
125#define __m(x) (*(struct __large_struct *)(x))
126
127/* We need to declare the __ex_table section before we can use it in .xdata.  */
128asm (".section \"__ex_table\", \"a\"\n\t.previous");
129
130#if __GNUC__ >= 3
131#  define GAS_HAS_LOCAL_TAGS	/* define if gas supports local tags a la [1:] */
132#endif
133
134#ifdef GAS_HAS_LOCAL_TAGS
135# define _LL	"[1:]"
136#else
137# define _LL	"1:"
138#endif
139
140#define __get_user_64(addr)									\
141	asm ("\n"_LL"\tld8 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
142	     "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n"				\
143	     _LL										\
144	     : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
145
146#define __get_user_32(addr)									\
147	asm ("\n"_LL"\tld4 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
148	     "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n"				\
149	     _LL										\
150	     : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
151
152#define __get_user_16(addr)									\
153	asm ("\n"_LL"\tld2 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
154	     "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n"				\
155	     _LL										\
156	     : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
157
158#define __get_user_8(addr)									\
159	asm ("\n"_LL"\tld1 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
160	     "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n"				\
161	     _LL										\
162	     : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
163
164extern void __put_user_unknown (void);
165
166#define __put_user_nocheck(x,ptr,size)		\
167({						\
168	register long __pu_err asm ("r8") = 0;	\
169	switch (size) {				\
170	  case 1: __put_user_8(x,ptr); break;	\
171	  case 2: __put_user_16(x,ptr); break;	\
172	  case 4: __put_user_32(x,ptr); break;	\
173	  case 8: __put_user_64(x,ptr); break;	\
174	  default: __put_user_unknown(); break;	\
175	}					\
176	__pu_err;				\
177})
178
179#define __put_user_check(x,ptr,size,segment)			\
180({								\
181	register long __pu_err asm ("r8") = -EFAULT;		\
182	__typeof__(*(ptr)) *__pu_addr = (ptr);			\
183	if (__access_ok((long)__pu_addr,size,segment)) {	\
184		__pu_err = 0;					\
185		switch (size) {					\
186		  case 1: __put_user_8(x,__pu_addr); break;	\
187		  case 2: __put_user_16(x,__pu_addr); break;	\
188		  case 4: __put_user_32(x,__pu_addr); break;	\
189		  case 8: __put_user_64(x,__pu_addr); break;	\
190		  default: __put_user_unknown(); break;		\
191		}						\
192	}							\
193	__pu_err;						\
194})
195
196/*
197 * The "__put_user_xx()" macros tell gcc they read from memory
198 * instead of writing: this is because they do not write to
199 * any memory gcc knows about, so there are no aliasing issues
200 */
201#define __put_user_64(x,addr)								\
202	asm volatile (									\
203		"\n"_LL"\tst8 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
204		"\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n"			\
205		_LL									\
206		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
207
208#define __put_user_32(x,addr)								\
209	asm volatile (									\
210		"\n"_LL"\tst4 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
211		"\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n"			\
212		_LL									\
213		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
214
215#define __put_user_16(x,addr)								\
216	asm volatile (									\
217		"\n"_LL"\tst2 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
218		"\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n"			\
219		_LL									\
220		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
221
222#define __put_user_8(x,addr)								\
223	asm volatile (									\
224		"\n"_LL"\tst1 %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
225		"\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n"			\
226		_LL									\
227		: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
228
229/*
230 * Complex access routines
231 */
232extern unsigned long __copy_user (void *to, const void *from, unsigned long count);
233
234#define __copy_to_user(to,from,n)	__copy_user((to), (from), (n))
235#define __copy_from_user(to,from,n)	__copy_user((to), (from), (n))
236
237#define copy_to_user(to,from,n)   __copy_tofrom_user((to), (from), (n), 1)
238#define copy_from_user(to,from,n) __copy_tofrom_user((to), (from), (n), 0)
239
240#define __copy_tofrom_user(to,from,n,check_to)							\
241({												\
242	void *__cu_to = (to);									\
243	const void *__cu_from = (from);								\
244	long __cu_len = (n);									\
245												\
246	if (__access_ok((long) ((check_to) ? __cu_to : __cu_from), __cu_len, get_fs())) {	\
247		__cu_len = __copy_user(__cu_to, __cu_from, __cu_len);				\
248	}											\
249	__cu_len;										\
250})
251
252extern unsigned long __do_clear_user (void *, unsigned long);
253
254#define __clear_user(to,n)			\
255({						\
256	__do_clear_user(to,n);			\
257})
258
259#define clear_user(to,n)					\
260({								\
261	unsigned long __cu_len = (n);				\
262	if (__access_ok((long) to, __cu_len, get_fs())) {	\
263		__cu_len = __do_clear_user(to, __cu_len);	\
264	}							\
265	__cu_len;						\
266})
267
268
269/* Returns: -EFAULT if exception before terminator, N if the entire
270   buffer filled, else strlen.  */
271
272extern long __strncpy_from_user (char *to, const char *from, long to_len);
273
274#define strncpy_from_user(to,from,n)					\
275({									\
276	const char * __sfu_from = (from);				\
277	long __sfu_ret = -EFAULT;					\
278	if (__access_ok((long) __sfu_from, 0, get_fs()))		\
279		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
280	__sfu_ret;							\
281})
282
283/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
284extern unsigned long __strlen_user (const char *);
285
286#define strlen_user(str)				\
287({							\
288	const char *__su_str = (str);			\
289	unsigned long __su_ret = 0;			\
290	if (__access_ok((long) __su_str, 0, get_fs()))	\
291		__su_ret = __strlen_user(__su_str);	\
292	__su_ret;					\
293})
294
295/*
296 * Returns: 0 if exception before NUL or reaching the supplied limit
297 * (N), a value greater than N if the limit would be exceeded, else
298 * strlen.
299 */
300extern unsigned long __strnlen_user (const char *, long);
301
302#define strnlen_user(str, len)					\
303({								\
304	const char *__su_str = (str);				\
305	unsigned long __su_ret = 0;				\
306	if (__access_ok((long) __su_str, 0, get_fs()))		\
307		__su_ret = __strnlen_user(__su_str, len);	\
308	__su_ret;						\
309})
310
311struct exception_table_entry {
312	int addr;	/* gp-relative address of insn this fixup is for */
313	int cont;	/* gp-relative continuation address; if bit 2 is set, r9 is set to 0 */
314};
315
316struct exception_fixup {
317	unsigned long cont;	/* continuation point (bit 2: clear r9 if set) */
318};
319
320extern struct exception_fixup search_exception_table (unsigned long addr);
321extern void handle_exception (struct pt_regs *regs, struct exception_fixup fixup);
322
323#ifdef GAS_HAS_LOCAL_TAGS
324#define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip + ia64_psr(regs)->ri);
325#else
326#define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip);
327#endif
328
329static inline int
330done_with_exception (struct pt_regs *regs)
331{
332	struct exception_fixup fix;
333	fix = SEARCH_EXCEPTION_TABLE(regs);
334	if (fix.cont) {
335		handle_exception(regs, fix);
336		return 1;
337	}
338	return 0;
339}
340
341#endif /* _ASM_IA64_UACCESS_H */
342