1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_UACCESS_H
10#define _ASM_UACCESS_H
11
12#include <linux/errno.h>
13#include <linux/sched.h>
14
15#define STR(x)  __STR(x)
16#define __STR(x)  #x
17
18/*
19 * The fs value determines whether argument validity checking should be
20 * performed or not.  If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
22 *
23 * For historical reasons, these macros are grossly misnamed.
24 */
25#define KERNEL_DS	((mm_segment_t) { 0UL })
26#define USER_DS		((mm_segment_t) { -TASK_SIZE })
27
28#define VERIFY_READ    0
29#define VERIFY_WRITE   1
30
31#define get_fs()        (current->thread.current_ds)
32#define get_ds()	(KERNEL_DS)
33#define set_fs(x)       (current->thread.current_ds=(x))
34
35#define segment_eq(a,b)	((a).seg == (b).seg)
36
37
38/*
39 * Is a address valid? This does a straighforward calculation rather
40 * than tests.
41 *
42 * Address valid if:
43 *  - "addr" doesn't have any high-bits set
44 *  - AND "size" doesn't have any high-bits set
45 *  - AND "addr+size" doesn't have any high-bits set
46 *  - OR we are in kernel mode.
47 */
48#define __ua_size(size)							\
49	((__builtin_constant_p(size) && (size)) > 0 ? 0 : (size))
50
51#define __access_ok(addr, size, mask)					\
52	(((mask) & ((addr) | ((addr) + (size)) | __ua_size(size))) == 0)
53
54#define __access_mask get_fs().seg
55
56#define access_ok(type, addr, size)					\
57	__access_ok((unsigned long)(addr), (size), __access_mask)
58
59static inline int verify_area(int type, const void * addr, unsigned long size)
60{
61	return access_ok(type, addr, size) ? 0 : -EFAULT;
62}
63
64/*
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
67 * pointer type ...
68 *
69 * As MIPS uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
71 *
72 * Careful to not
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
75 */
76#define put_user(x,ptr)	\
77	__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78#define get_user(x,ptr) \
79	__get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
80
81/*
82 * The "__xxx" versions do not do address space checking, useful when
83 * doing multiple accesses to the same area (the user has to do the
84 * checks by hand with "access_ok()")
85 */
86#define __put_user(x,ptr) \
87	__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88#define __get_user(x,ptr) \
89	__get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
90
91struct __large_struct { unsigned long buf[100]; };
92#define __m(x) (*(struct __large_struct *)(x))
93
94#define __get_user_nocheck(x,ptr,size)				\
95({								\
96	long __gu_err;						\
97	__typeof(*(ptr)) __gu_val;				\
98	long __gu_addr;						\
99	__asm__("":"=r" (__gu_val));				\
100	__gu_addr = (long) (ptr);				\
101	__asm__("":"=r" (__gu_err));				\
102	switch (size) {						\
103	case 1: __get_user_asm("lb"); break;			\
104	case 2: __get_user_asm("lh"); break;			\
105	case 4: __get_user_asm("lw"); break;			\
106	case 8: __get_user_asm("ld"); break;			\
107	default: __get_user_unknown(); break;			\
108	} x = (__typeof__(*(ptr))) __gu_val; __gu_err;		\
109})
110
111#define __get_user_check(x,ptr,size)				\
112({								\
113	long __gu_err;						\
114	__typeof__(*(ptr)) __gu_val;				\
115	long __gu_addr;						\
116	__asm__("":"=r" (__gu_val));				\
117	__gu_addr = (long) (ptr);				\
118	__asm__("":"=r" (__gu_err));				\
119	if (__access_ok(__gu_addr,size,__access_mask)) {	\
120		switch (size) {					\
121		case 1: __get_user_asm("lb"); break;		\
122		case 2: __get_user_asm("lh"); break;		\
123		case 4: __get_user_asm("lw"); break;		\
124		case 8: __get_user_asm("ld"); break;		\
125		default: __get_user_unknown(); break;		\
126		}						\
127	} x = (__typeof__(*(ptr))) __gu_val; __gu_err;		\
128})
129
130#define __get_user_asm(insn)					\
131({								\
132	__asm__ __volatile__(					\
133	"1:\t" insn "\t%1,%2\n\t"				\
134	"move\t%0,$0\n"						\
135	"2:\n\t"						\
136	".section\t.fixup,\"ax\"\n"				\
137	"3:\tli\t%0,%3\n\t"					\
138	"move\t%1,$0\n\t"					\
139	"j\t2b\n\t"						\
140	".previous\n\t"						\
141	".section\t__ex_table,\"a\"\n\t"			\
142	".dword\t1b,3b\n\t"					\
143	".previous"						\
144	:"=r" (__gu_err), "=r" (__gu_val)			\
145	:"o" (__m(__gu_addr)), "i" (-EFAULT));			\
146})
147
148extern void __get_user_unknown(void);
149
150#define __put_user_nocheck(x,ptr,size)				\
151({								\
152	long __pu_err;						\
153	__typeof__(*(ptr)) __pu_val;				\
154	long __pu_addr;						\
155	__pu_val = (x);						\
156	__pu_addr = (long) (ptr);				\
157	__asm__("":"=r" (__pu_err));				\
158	switch (size) {						\
159	case 1: __put_user_asm("sb"); break;			\
160	case 2: __put_user_asm("sh"); break;			\
161	case 4: __put_user_asm("sw"); break;			\
162	case 8: __put_user_asm("sd"); break;			\
163	default: __put_user_unknown(); break;			\
164	} __pu_err;						\
165})
166
167#define __put_user_check(x,ptr,size)				\
168({								\
169	long __pu_err;						\
170	__typeof__(*(ptr)) __pu_val;				\
171	long __pu_addr;						\
172	__pu_val = (x);						\
173	__pu_addr = (long) (ptr);				\
174	__asm__("":"=r" (__pu_err));				\
175	if (__access_ok(__pu_addr,size,__access_mask)) { 	\
176		switch (size) {					\
177		case 1: __put_user_asm("sb"); break;		\
178		case 2: __put_user_asm("sh"); break;		\
179		case 4: __put_user_asm("sw"); break;		\
180		case 8: __put_user_asm("sd"); break;		\
181		default: __put_user_unknown(); break;		\
182		}						\
183	} __pu_err;						\
184})
185
186#define __put_user_asm(insn)					\
187({								\
188	__asm__ __volatile__(					\
189	"1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t"	\
190	"move\t%0,$0\n"						\
191	"2:\n\t"						\
192	".section\t.fixup,\"ax\"\n"				\
193	"3:\tli\t%0,%3\n\t"					\
194	"j\t2b\n\t"						\
195	".previous\n\t"						\
196	".section\t__ex_table,\"a\"\n\t"			\
197	".dword\t1b,3b\n\t"					\
198	".previous"						\
199	:"=r" (__pu_err)					\
200	:"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT));	\
201})
202
203extern void __put_user_unknown(void);
204
205/*
206 * We're generating jump to subroutines which will be outside the range of
207 * jump instructions
208 */
209#ifdef MODULE
210#define __MODULE_JAL(destination)	\
211	".set\tnoat\n\t"		\
212	"dla\t$1, " #destination "\n\t" \
213	"jalr\t$1\n\t"			\
214	".set\tat\n\t"
215#else
216#define __MODULE_JAL(destination)	\
217	"jal\t" #destination "\n\t"
218#endif
219
220extern size_t __copy_user(void *__to, const void *__from, size_t __n);
221
222#define __invoke_copy_to_user(to,from,n)				\
223({									\
224	register void *__cu_to_r __asm__ ("$4");			\
225	register const void *__cu_from_r __asm__ ("$5");		\
226	register long __cu_len_r __asm__ ("$6");			\
227									\
228	__cu_to_r = (to);						\
229	__cu_from_r = (from);						\
230	__cu_len_r = (n);						\
231	__asm__ __volatile__(						\
232	__MODULE_JAL(__copy_user)					\
233	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
234	:								\
235	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
236	  "memory");							\
237	__cu_len_r;							\
238})
239
240#define __copy_to_user(to,from,n)					\
241({									\
242	void *__cu_to;							\
243	const void *__cu_from;						\
244	long __cu_len;							\
245									\
246	__cu_to = (to);							\
247	__cu_from = (from);						\
248	__cu_len = (n);							\
249	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
250	__cu_len;							\
251})
252
253#define copy_to_user(to,from,n)						\
254({									\
255	void *__cu_to;							\
256	const void *__cu_from;						\
257	long __cu_len;							\
258									\
259	__cu_to = (to);							\
260	__cu_from = (from);						\
261	__cu_len = (n);							\
262	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len))			\
263		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
264		                                __cu_len);		\
265	__cu_len;							\
266})
267
268#define __invoke_copy_from_user(to,from,n)				\
269({									\
270	register void *__cu_to_r __asm__ ("$4");			\
271	register const void *__cu_from_r __asm__ ("$5");		\
272	register long __cu_len_r __asm__ ("$6");			\
273									\
274	__cu_to_r = (to);						\
275	__cu_from_r = (from);						\
276	__cu_len_r = (n);						\
277	__asm__ __volatile__(						\
278	".set\tnoreorder\n\t"						\
279	__MODULE_JAL(__copy_user)					\
280	".set\tnoat\n\t"						\
281	"daddu\t$1, %1, %2\n\t"						\
282	".set\tat\n\t"							\
283	".set\treorder\n\t"						\
284	"move\t%0, $6"							\
285	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
286	:								\
287	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
288	  "memory");							\
289	__cu_len_r;							\
290})
291
292#define __copy_from_user(to,from,n)					\
293({									\
294	void *__cu_to;							\
295	const void *__cu_from;						\
296	long __cu_len;							\
297									\
298	__cu_to = (to);							\
299	__cu_from = (from);						\
300	__cu_len = (n);							\
301	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
302	                                   __cu_len);			\
303	__cu_len;							\
304})
305
306#define copy_from_user(to,from,n)					\
307({									\
308	void *__cu_to;							\
309	const void *__cu_from;						\
310	long __cu_len;							\
311									\
312	__cu_to = (to);							\
313	__cu_from = (from);						\
314	__cu_len = (n);							\
315	if (access_ok(VERIFY_READ, __cu_from, __cu_len))		\
316		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
317		                                   __cu_len);		\
318	__cu_len;							\
319})
320
321static inline __kernel_size_t
322__clear_user(void *addr, __kernel_size_t size)
323{
324	__kernel_size_t res;
325
326	__asm__ __volatile__(
327		"move\t$4, %1\n\t"
328		"move\t$5, $0\n\t"
329		"move\t$6, %2\n\t"
330		__MODULE_JAL(__bzero)
331		"move\t%0, $6"
332		: "=r" (res)
333		: "r" (addr), "r" (size)
334		: "$4", "$5", "$6", "$8", "$9", "$31");
335
336	return res;
337}
338
339#define clear_user(addr,n)					\
340({								\
341	void * __cl_addr = (addr);				\
342	unsigned long __cl_size = (n);				\
343	if (__cl_size && access_ok(VERIFY_WRITE,		\
344		((unsigned long)(__cl_addr)), __cl_size))	\
345		__cl_size = __clear_user(__cl_addr, __cl_size);	\
346	__cl_size;						\
347})
348
349/*
350 * Returns: -EFAULT if exception before terminator, N if the entire
351 * buffer filled, else strlen.
352 */
353static inline long
354__strncpy_from_user(char *__to, const char *__from, long __len)
355{
356	long res;
357
358	__asm__ __volatile__(
359		"move\t$4, %1\n\t"
360		"move\t$5, %2\n\t"
361		"move\t$6, %3\n\t"
362		__MODULE_JAL(__strncpy_from_user_nocheck_asm)
363		"move\t%0, $2"
364		: "=r" (res)
365		: "r" (__to), "r" (__from), "r" (__len)
366		: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
367
368	return res;
369}
370
371static inline long
372strncpy_from_user(char *__to, const char *__from, long __len)
373{
374	long res;
375
376	__asm__ __volatile__(
377		"move\t$4, %1\n\t"
378		"move\t$5, %2\n\t"
379		"move\t$6, %3\n\t"
380		__MODULE_JAL(__strncpy_from_user_asm)
381		"move\t%0, $2"
382		: "=r" (res)
383		: "r" (__to), "r" (__from), "r" (__len)
384		: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
385
386	return res;
387}
388
389/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
390static inline long __strlen_user(const char *s)
391{
392	long res;
393
394	__asm__ __volatile__(
395		"move\t$4, %1\n\t"
396		__MODULE_JAL(__strlen_user_nocheck_asm)
397		"move\t%0, $2"
398		: "=r" (res)
399		: "r" (s)
400		: "$2", "$4", "$8", "$31");
401
402	return res;
403}
404
405static inline long strlen_user(const char *s)
406{
407	long res;
408
409	__asm__ __volatile__(
410		"move\t$4, %1\n\t"
411		__MODULE_JAL(__strlen_user_asm)
412		"move\t%0, $2"
413		: "=r" (res)
414		: "r" (s)
415		: "$2", "$4", "$8", "$31");
416
417	return res;
418}
419
420/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
421static inline long __strnlen_user(const char *s, long n)
422{
423	long res;
424
425	__asm__ __volatile__(
426		"move\t$4, %1\n\t"
427		"move\t$5, %2\n\t"
428		__MODULE_JAL(__strnlen_user_nocheck_asm)
429		"move\t%0, $2"
430		: "=r" (res)
431		: "r" (s), "r" (n)
432		: "$2", "$4", "$5", "$8", "$31");
433
434	return res;
435}
436
437static inline long strnlen_user(const char *s, long n)
438{
439	long res;
440
441	__asm__ __volatile__(
442		"move\t$4, %1\n\t"
443		"move\t$5, %2\n\t"
444		__MODULE_JAL(__strnlen_user_asm)
445		"move\t%0, $2"
446		: "=r" (res)
447		: "r" (s), "r" (n)
448		: "$2", "$4", "$5", "$8", "$31");
449
450	return res;
451}
452
453struct exception_table_entry
454{
455	unsigned long insn;
456	unsigned long nextinsn;
457};
458
459/* Returns 0 if exception not found and fixup.unit otherwise.  */
460extern unsigned long search_exception_table(unsigned long addr);
461
462/* Returns the new pc */
463#define fixup_exception(map_reg, fixup_unit, pc)                \
464({                                                              \
465	fixup_unit;                                             \
466})
467
468#endif /* _ASM_UACCESS_H */
469