1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_UACCESS_H
10#define _ASM_UACCESS_H
11
12#include <linux/errno.h>
13#include <linux/sched.h>
14
15#define STR(x)  __STR(x)
16#define __STR(x)  #x
17
18/*
19 * The fs value determines whether argument validity checking should be
20 * performed or not.  If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
22 *
23 * For historical reasons, these macros are grossly misnamed.
24 */
25#define KERNEL_DS	((mm_segment_t) { (unsigned long) 0L })
26#define USER_DS		((mm_segment_t) { (unsigned long) -1L })
27
28#define VERIFY_READ    0
29#define VERIFY_WRITE   1
30
31#define get_fs()        (current->thread.current_ds)
32#define get_ds()	(KERNEL_DS)
33#define set_fs(x)       (current->thread.current_ds=(x))
34
35#define segment_eq(a,b)	((a).seg == (b).seg)
36
37
38/*
39 * Is a address valid? This does a straighforward calculation rather
40 * than tests.
41 *
42 * Address valid if:
43 *  - "addr" doesn't have any high-bits set
44 *  - AND "size" doesn't have any high-bits set
45 *  - AND "addr+size" doesn't have any high-bits set
46 *  - OR we are in kernel mode.
47 */
48#define __ua_size(size)							\
49	(__builtin_constant_p(size) && (signed long) (size) > 0 ? 0 : (size))
50
51#define __access_ok(addr,size,mask)                                     \
52	(((signed long)((mask)&(addr | (addr + size) | __ua_size(size)))) >= 0)
53
54#define __access_mask ((long)(get_fs().seg))
55
56#define access_ok(type,addr,size) \
57__access_ok(((unsigned long)(addr)),(size),__access_mask)
58
59static inline int verify_area(int type, const void * addr, unsigned long size)
60{
61	return access_ok(type,addr,size) ? 0 : -EFAULT;
62}
63
64/*
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
67 * pointer type ...
68 *
69 * As MIPS uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
71 *
72 * Careful to not
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
75 */
76#define put_user(x,ptr)	\
77	__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78#define get_user(x,ptr) \
79	__get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
80
81/*
82 * The "__xxx" versions do not do address space checking, useful when
83 * doing multiple accesses to the same area (the user has to do the
84 * checks by hand with "access_ok()")
85 */
86#define __put_user(x,ptr) \
87	__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88#define __get_user(x,ptr) \
89	__get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
90
91struct __large_struct { unsigned long buf[100]; };
92#define __m(x) (*(struct __large_struct *)(x))
93
94/*
95 * Yuck.  We need two variants, one for 64bit operation and one
96 * for 32 bit mode and old iron.
97 */
98#ifdef __mips64
99#define __GET_USER_DW __get_user_asm("ld")
100#else
101#define __GET_USER_DW __get_user_asm_ll32
102#endif
103
104#define __get_user_nocheck(x,ptr,size) ({ \
105long __gu_err; \
106__typeof(*(ptr)) __gu_val; \
107long __gu_addr; \
108__asm__("":"=r" (__gu_val)); \
109__gu_addr = (long) (ptr); \
110__asm__("":"=r" (__gu_err)); \
111switch (size) { \
112case 1: __get_user_asm("lb"); break; \
113case 2: __get_user_asm("lh"); break; \
114case 4: __get_user_asm("lw"); break; \
115case 8: __GET_USER_DW; break; \
116default: __get_user_unknown(); break; \
117} x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
118
119#define __get_user_check(x,ptr,size) ({ \
120long __gu_err; \
121__typeof__(*(ptr)) __gu_val; \
122long __gu_addr; \
123__asm__("":"=r" (__gu_val)); \
124__gu_addr = (long) (ptr); \
125__asm__("":"=r" (__gu_err)); \
126if (__access_ok(__gu_addr,size,__access_mask)) { \
127switch (size) { \
128case 1: __get_user_asm("lb"); break; \
129case 2: __get_user_asm("lh"); break; \
130case 4: __get_user_asm("lw"); break; \
131case 8: __GET_USER_DW; break; \
132default: __get_user_unknown(); break; \
133} } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
134
135#define __get_user_asm(insn) \
136({ \
137__asm__ __volatile__( \
138	"1:\t" insn "\t%1,%2\n\t" \
139	"move\t%0,$0\n" \
140	"2:\n\t" \
141	".section\t.fixup,\"ax\"\n" \
142	"3:\tli\t%0,%3\n\t" \
143	"move\t%1,$0\n\t" \
144	"j\t2b\n\t" \
145	".previous\n\t" \
146	".section\t__ex_table,\"a\"\n\t" \
147	".word\t1b,3b\n\t" \
148	".previous" \
149	:"=r" (__gu_err), "=r" (__gu_val) \
150	:"o" (__m(__gu_addr)), "i" (-EFAULT)); })
151
152/*
153 * Get a long long 64 using 32 bit registers.
154 */
155#define __get_user_asm_ll32 \
156({ \
157__asm__ __volatile__( \
158	"1:\tlw\t%1,%2\n" \
159	"2:\tlw\t%D1,%3\n\t" \
160	"move\t%0,$0\n" \
161	"3:\t.section\t.fixup,\"ax\"\n" \
162	"4:\tli\t%0,%4\n\t" \
163	"move\t%1,$0\n\t" \
164	"move\t%D1,$0\n\t" \
165	"j\t3b\n\t" \
166	".previous\n\t" \
167	".section\t__ex_table,\"a\"\n\t" \
168	".word\t1b,4b\n\t" \
169	".word\t2b,4b\n\t" \
170	".previous" \
171	:"=r" (__gu_err), "=&r" (__gu_val) \
172	:"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
173	 "i" (-EFAULT)); })
174
175extern void __get_user_unknown(void);
176
177/*
178 * Yuck.  We need two variants, one for 64bit operation and one
179 * for 32 bit mode and old iron.
180 */
181#ifdef __mips64
182#define __PUT_USER_DW __put_user_asm("sd")
183#else
184#define __PUT_USER_DW __put_user_asm_ll32
185#endif
186
187#define __put_user_nocheck(x,ptr,size) ({ \
188long __pu_err; \
189__typeof__(*(ptr)) __pu_val; \
190long __pu_addr; \
191__pu_val = (x); \
192__pu_addr = (long) (ptr); \
193__asm__("":"=r" (__pu_err)); \
194switch (size) { \
195case 1: __put_user_asm("sb"); break; \
196case 2: __put_user_asm("sh"); break; \
197case 4: __put_user_asm("sw"); break; \
198case 8: __PUT_USER_DW; break; \
199default: __put_user_unknown(); break; \
200} __pu_err; })
201
202#define __put_user_check(x,ptr,size) ({ \
203long __pu_err; \
204__typeof__(*(ptr)) __pu_val; \
205long __pu_addr; \
206__pu_val = (x); \
207__pu_addr = (long) (ptr); \
208__asm__("":"=r" (__pu_err)); \
209if (__access_ok(__pu_addr,size,__access_mask)) { \
210switch (size) { \
211case 1: __put_user_asm("sb"); break; \
212case 2: __put_user_asm("sh"); break; \
213case 4: __put_user_asm("sw"); break; \
214case 8: __PUT_USER_DW; break; \
215default: __put_user_unknown(); break; \
216} } __pu_err; })
217
218#define __put_user_asm(insn) \
219({ \
220__asm__ __volatile__( \
221	"1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t" \
222	"move\t%0, $0\n" \
223	"2:\n\t" \
224	".section\t.fixup,\"ax\"\n" \
225	"3:\tli\t%0,%3\n\t" \
226	"j\t2b\n\t" \
227	".previous\n\t" \
228	".section\t__ex_table,\"a\"\n\t" \
229	".word\t1b,3b\n\t" \
230	".previous" \
231	:"=r" (__pu_err) \
232	:"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
233
234#define __put_user_asm_ll32 \
235({ \
236__asm__ __volatile__( \
237	"1:\tsw\t%1, %2\t\t\t# __put_user_asm_ll32\n\t" \
238	"2:\tsw\t%D1, %3\n" \
239	"move\t%0, $0\n" \
240	"3:\n\t" \
241	".section\t.fixup,\"ax\"\n" \
242	"4:\tli\t%0,%4\n\t" \
243	"j\t3b\n\t" \
244	".previous\n\t" \
245	".section\t__ex_table,\"a\"\n\t" \
246	".word\t1b,4b\n\t" \
247	".word\t2b,4b\n\t" \
248	".previous" \
249	:"=r" (__pu_err) \
250	:"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
251	 "i" (-EFAULT)); })
252
253extern void __put_user_unknown(void);
254
255/*
256 * We're generating jump to subroutines which will be outside the range of
257 * jump instructions
258 */
259#ifdef MODULE
260#define __MODULE_JAL(destination) \
261	".set\tnoat\n\t" \
262	"la\t$1, " #destination "\n\t" \
263	"jalr\t$1\n\t" \
264	".set\tat\n\t"
265#else
266#define __MODULE_JAL(destination) \
267	"jal\t" #destination "\n\t"
268#endif
269
270extern size_t __copy_user(void *__to, const void *__from, size_t __n);
271
272#define __invoke_copy_to_user(to,from,n) ({				\
273	register void *__cu_to_r __asm__ ("$4");			\
274	register const void *__cu_from_r __asm__ ("$5");		\
275	register long __cu_len_r __asm__ ("$6");			\
276									\
277	__cu_to_r = (to);						\
278	__cu_from_r = (from);						\
279	__cu_len_r = (n);						\
280	__asm__ __volatile__(						\
281		__MODULE_JAL(__copy_user)				\
282	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
283	:								\
284	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
285	  "memory");							\
286	__cu_len_r;							\
287})
288
289#define __copy_to_user(to,from,n) ({					\
290	void *__cu_to;							\
291	const void *__cu_from;						\
292	long __cu_len;							\
293									\
294	__cu_to = (to);							\
295	__cu_from = (from);						\
296	__cu_len = (n);							\
297	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
298	__cu_len;							\
299})
300
301#define copy_to_user(to,from,n) ({					\
302	void *__cu_to;							\
303	const void *__cu_from;						\
304	long __cu_len;							\
305									\
306	__cu_to = (to);							\
307	__cu_from = (from);						\
308	__cu_len = (n);							\
309	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len))			\
310		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
311		                                 __cu_len);		\
312	__cu_len;							\
313})
314
315#define __invoke_copy_from_user(to,from,n) ({				\
316	register void *__cu_to_r __asm__ ("$4");			\
317	register const void *__cu_from_r __asm__ ("$5");		\
318	register long __cu_len_r __asm__ ("$6");			\
319									\
320	__cu_to_r = (to);						\
321	__cu_from_r = (from);						\
322	__cu_len_r = (n);						\
323	__asm__ __volatile__(						\
324		".set\tnoreorder\n\t"					\
325		__MODULE_JAL(__copy_user)				\
326		".set\tnoat\n\t"					\
327		"addu\t$1, %1, %2\n\t"					\
328		".set\tat\n\t"						\
329		".set\treorder\n\t"					\
330	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
331	:								\
332	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
333	  "memory");							\
334	__cu_len_r;							\
335})
336
337#define __copy_from_user(to,from,n) ({					\
338	void *__cu_to;							\
339	const void *__cu_from;						\
340	long __cu_len;							\
341									\
342	__cu_to = (to);							\
343	__cu_from = (from);						\
344	__cu_len = (n);							\
345	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
346	                                   __cu_len);			\
347	__cu_len;							\
348})
349
350#define copy_from_user(to,from,n) ({					\
351	void *__cu_to;							\
352	const void *__cu_from;						\
353	long __cu_len;							\
354									\
355	__cu_to = (to);							\
356	__cu_from = (from);						\
357	__cu_len = (n);							\
358	if (access_ok(VERIFY_READ, __cu_from, __cu_len))		\
359		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
360		                                   __cu_len);		\
361	__cu_len;							\
362})
363
364static inline __kernel_size_t
365__clear_user(void *addr, __kernel_size_t size)
366{
367	__kernel_size_t res;
368
369	__asm__ __volatile__(
370		"move\t$4, %1\n\t"
371		"move\t$5, $0\n\t"
372		"move\t$6, %2\n\t"
373		__MODULE_JAL(__bzero)
374		"move\t%0, $6"
375		: "=r" (res)
376		: "r" (addr), "r" (size)
377		: "$4", "$5", "$6", "$8", "$9", "$31");
378
379	return res;
380}
381
382#define clear_user(addr,n) ({ \
383void * __cl_addr = (addr); \
384unsigned long __cl_size = (n); \
385if (__cl_size && access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
386__cl_size = __clear_user(__cl_addr, __cl_size); \
387__cl_size; })
388
389/*
390 * Returns: -EFAULT if exception before terminator, N if the entire
391 * buffer filled, else strlen.
392 */
393static inline long
394__strncpy_from_user(char *__to, const char *__from, long __len)
395{
396	long res;
397
398	__asm__ __volatile__(
399		"move\t$4, %1\n\t"
400		"move\t$5, %2\n\t"
401		"move\t$6, %3\n\t"
402		__MODULE_JAL(__strncpy_from_user_nocheck_asm)
403		"move\t%0, $2"
404		: "=r" (res)
405		: "r" (__to), "r" (__from), "r" (__len)
406		: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
407
408	return res;
409}
410
411static inline long
412strncpy_from_user(char *__to, const char *__from, long __len)
413{
414	long res;
415
416	__asm__ __volatile__(
417		"move\t$4, %1\n\t"
418		"move\t$5, %2\n\t"
419		"move\t$6, %3\n\t"
420		__MODULE_JAL(__strncpy_from_user_asm)
421		"move\t%0, $2"
422		: "=r" (res)
423		: "r" (__to), "r" (__from), "r" (__len)
424		: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
425
426	return res;
427}
428
429/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
430static inline long __strlen_user(const char *s)
431{
432	long res;
433
434	__asm__ __volatile__(
435		"move\t$4, %1\n\t"
436		__MODULE_JAL(__strlen_user_nocheck_asm)
437		"move\t%0, $2"
438		: "=r" (res)
439		: "r" (s)
440		: "$2", "$4", "$8", "$31");
441
442	return res;
443}
444
445static inline long strlen_user(const char *s)
446{
447	long res;
448
449	__asm__ __volatile__(
450		"move\t$4, %1\n\t"
451		__MODULE_JAL(__strlen_user_asm)
452		"move\t%0, $2"
453		: "=r" (res)
454		: "r" (s)
455		: "$2", "$4", "$8", "$31");
456
457	return res;
458}
459
460/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461static inline long __strnlen_user(const char *s, long n)
462{
463	long res;
464
465	__asm__ __volatile__(
466		"move\t$4, %1\n\t"
467		"move\t$5, %2\n\t"
468		__MODULE_JAL(__strnlen_user_nocheck_asm)
469		"move\t%0, $2"
470		: "=r" (res)
471		: "r" (s), "r" (n)
472		: "$2", "$4", "$5", "$8", "$31");
473
474	return res;
475}
476
477static inline long strnlen_user(const char *s, long n)
478{
479	long res;
480
481	__asm__ __volatile__(
482		"move\t$4, %1\n\t"
483		"move\t$5, %2\n\t"
484		__MODULE_JAL(__strnlen_user_asm)
485		"move\t%0, $2"
486		: "=r" (res)
487		: "r" (s), "r" (n)
488		: "$2", "$4", "$5", "$8", "$31");
489
490	return res;
491}
492
493struct exception_table_entry
494{
495	unsigned long insn;
496	unsigned long nextinsn;
497};
498
499/* Returns 0 if exception not found and fixup.unit otherwise.  */
500extern unsigned long search_exception_table(unsigned long addr);
501
502/* Returns the new pc */
503#define fixup_exception(map_reg, fixup_unit, pc)                \
504({                                                              \
505	fixup_unit;                                             \
506})
507
508#endif /* _ASM_UACCESS_H */
509