1/* $Id: uaccess.h,v 1.1.1.1 2008/10/15 03:29:17 james26_jang Exp $
2 *
3 * User space memory access functions
4 *
5 * Copyright (C) 1999  Niibe Yutaka
6 *
7 *  Based on:
8 *     MIPS implementation version 1.15 by
9 *              Copyright (C) 1996, 1997, 1998 by Ralf Baechle
10 *     and i386 version.
11 */
12#ifndef __ASM_SH_UACCESS_H
13#define __ASM_SH_UACCESS_H
14
15#include <linux/errno.h>
16#include <linux/sched.h>
17
18#define VERIFY_READ    0
19#define VERIFY_WRITE   1
20
21/*
22 * The fs value determines whether argument validity checking should be
23 * performed or not.  If get_fs() == USER_DS, checking is performed, with
24 * get_fs() == KERNEL_DS, checking is bypassed.
25 *
26 * For historical reasons (Data Segment Register?), these macros are misnamed.
27 */
28
29#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
30
31#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
32#define USER_DS		MAKE_MM_SEG(0x80000000)
33
34#define get_ds()	(KERNEL_DS)
35#define get_fs()        (current->addr_limit)
36#define set_fs(x)       (current->addr_limit=(x))
37
38#define segment_eq(a,b)	((a).seg == (b).seg)
39
40#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
41
42/*
43 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
44 *
45 * sum := addr + size;  carry? --> flag = true;
46 * if (sum >= addr_limit) flag = true;
47 */
48#define __range_ok(addr,size) ({					      \
49	unsigned long flag,sum; 					      \
50	__asm__("clrt; addc %3, %1; movt %0; cmp/hi %4, %1; rotcl %0"	      \
51		:"=&r" (flag), "=r" (sum) 				      \
52		:"1" (addr), "r" ((int)(size)), "r" (current->addr_limit.seg) \
53		:"t"); 							      \
54	flag; })
55
56#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
57#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
58
59static inline int verify_area(int type, const void * addr, unsigned long size)
60{
61	return access_ok(type,addr,size) ? 0 : -EFAULT;
62}
63
64/*
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
67 * pointer type ...
68 *
69 * As SuperH uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
71 *
72 * Careful to not
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
75 */
76#define put_user(x,ptr)	__put_user_check((x),(ptr),sizeof(*(ptr)))
77#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
78
79/*
80 * The "__xxx" versions do not do address space checking, useful when
81 * doing multiple accesses to the same area (the user has to do the
82 * checks by hand with "access_ok()")
83 */
84#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
85#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
86
87struct __large_struct { unsigned long buf[100]; };
88#define __m(x) (*(struct __large_struct *)(x))
89
90#define __get_user_nocheck(x,ptr,size) ({ \
91long __gu_err; \
92__typeof(*(ptr)) __gu_val; \
93long __gu_addr; \
94__asm__("":"=r" (__gu_val)); \
95__gu_addr = (long) (ptr); \
96__asm__("":"=r" (__gu_err)); \
97switch (size) { \
98case 1: __get_user_asm("b"); break; \
99case 2: __get_user_asm("w"); break; \
100case 4: __get_user_asm("l"); break; \
101default: __get_user_unknown(); break; \
102} x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
103
104#define __get_user_check(x,ptr,size) ({ \
105long __gu_err; \
106__typeof__(*(ptr)) __gu_val; \
107long __gu_addr; \
108__asm__("":"=r" (__gu_val)); \
109__gu_addr = (long) (ptr); \
110__asm__("":"=r" (__gu_err)); \
111if (__access_ok(__gu_addr,size)) { \
112switch (size) { \
113case 1: __get_user_asm("b"); break; \
114case 2: __get_user_asm("w"); break; \
115case 4: __get_user_asm("l"); break; \
116default: __get_user_unknown(); break; \
117} } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
118
119#define __get_user_asm(insn) \
120({ \
121__asm__ __volatile__( \
122	"1:\n\t" \
123	"mov." insn "	%2, %1\n\t" \
124	"mov	#0, %0\n" \
125	"2:\n" \
126	".section	.fixup,\"ax\"\n" \
127	"3:\n\t" \
128	"mov	#0, %1\n\t" \
129	"mov.l	4f, %0\n\t" \
130	"jmp	@%0\n\t" \
131	" mov	%3, %0\n" \
132	"4:	.long	2b\n\t" \
133	".previous\n" \
134	".section	__ex_table,\"a\"\n\t" \
135	".long	1b, 3b\n\t" \
136	".previous" \
137	:"=&r" (__gu_err), "=&r" (__gu_val) \
138	:"m" (__m(__gu_addr)), "i" (-EFAULT)); })
139
140extern void __get_user_unknown(void);
141
142#define __put_user_nocheck(x,ptr,size) ({ \
143long __pu_err; \
144__typeof__(*(ptr)) __pu_val; \
145long __pu_addr; \
146__pu_val = (x); \
147__pu_addr = (long) (ptr); \
148__asm__("":"=r" (__pu_err)); \
149switch (size) { \
150case 1: __put_user_asm("b"); break; \
151case 2: __put_user_asm("w"); break; \
152case 4: __put_user_asm("l"); break; \
153case 8: __put_user_u64(__pu_val,__pu_addr,__pu_err); break; \
154default: __put_user_unknown(); break; \
155} __pu_err; })
156
157#define __put_user_check(x,ptr,size) ({ \
158long __pu_err; \
159__typeof__(*(ptr)) __pu_val; \
160long __pu_addr; \
161__pu_val = (x); \
162__pu_addr = (long) (ptr); \
163__asm__("":"=r" (__pu_err)); \
164if (__access_ok(__pu_addr,size)) { \
165switch (size) { \
166case 1: __put_user_asm("b"); break; \
167case 2: __put_user_asm("w"); break; \
168case 4: __put_user_asm("l"); break; \
169case 8: __put_user_u64(__pu_val,__pu_addr,__pu_err); break; \
170default: __put_user_unknown(); break; \
171} } __pu_err; })
172
173#define __put_user_asm(insn) \
174({ \
175__asm__ __volatile__( \
176	"1:\n\t" \
177	"mov." insn "	%1, %2\n\t" \
178	"mov	#0, %0\n" \
179	"2:\n" \
180	".section	.fixup,\"ax\"\n" \
181	"3:\n\t" \
182	"nop\n\t" \
183	"mov.l	4f, %0\n\t" \
184	"jmp	@%0\n\t" \
185	"mov	%3, %0\n" \
186	"4:	.long	2b\n\t" \
187	".previous\n" \
188	".section	__ex_table,\"a\"\n\t" \
189	".long	1b, 3b\n\t" \
190	".previous" \
191	:"=&r" (__pu_err) \
192	:"r" (__pu_val), "m" (__m(__pu_addr)), "i" (-EFAULT) \
193        :"memory"); })
194
195#if defined(__LITTLE_ENDIAN__)
196#define __put_user_u64(val,addr,retval) \
197({ \
198__asm__ __volatile__( \
199	"1:\n\t" \
200	"mov.l	%R1,%2\n\t" \
201	"mov.l	%S1,%T2\n\t" \
202	"mov	#0,%0\n" \
203	"2:\n" \
204	".section	.fixup,\"ax\"\n" \
205	"3:\n\t" \
206	"nop\n\t" \
207	"mov.l	4f,%0\n\t" \
208	"jmp	@%0\n\t" \
209	" mov	%3,%0\n" \
210	"4:	.long	2b\n\t" \
211	".previous\n" \
212	".section	__ex_table,\"a\"\n\t" \
213	".long	1b, 3b\n\t" \
214	".previous" \
215	: "=r" (retval) \
216	: "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
217        : "memory"); })
218#else
219#define __put_user_u64(val,addr,retval) \
220({ \
221__asm__ __volatile__( \
222	"1:\n\t" \
223	"mov.l	%S1,%2\n\t" \
224	"mov.l	%R1,%T2\n\t" \
225	"mov	#0,%0\n" \
226	"2:\n" \
227	".section	.fixup,\"ax\"\n" \
228	"3:\n\t" \
229	"nop\n\t" \
230	"mov.l	4f,%0\n\t" \
231	"jmp	@%0\n\t" \
232	" mov	%3,%0\n" \
233	"4:	.long	2b\n\t" \
234	".previous\n" \
235	".section	__ex_table,\"a\"\n\t" \
236	".long	1b, 3b\n\t" \
237	".previous" \
238	: "=r" (retval) \
239	: "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
240        : "memory"); })
241#endif
242
243extern void __put_user_unknown(void);
244
245/* Generic arbitrary sized copy.  */
246/* Return the number of bytes NOT copied */
247static __inline__ __kernel_size_t
248__copy_user(void *__to, const void *__from, __kernel_size_t __n)
249{
250	unsigned long __dummy, _f, _t;
251	__kernel_size_t res;
252
253	if ((res = __n))
254	__asm__ __volatile__(
255		"9:\n\t"
256		"mov.b	@%2+, %1\n\t"
257		"dt	%0\n"
258		"1:\n\t"
259		"mov.b	%1, @%3\n\t"
260		"bf/s	9b\n\t"
261		" add	#1, %3\n"
262		"2:\n"
263		".section .fixup,\"ax\"\n"
264		"3:\n\t"
265		"mov.l	5f, %1\n\t"
266		"jmp	@%1\n\t"
267		" add	#1, %0\n\t"
268		".balign 4\n"
269		"5:	.long 2b\n"
270		".previous\n"
271		".section __ex_table,\"a\"\n"
272		"	.balign 4\n"
273		"	.long 9b,2b\n"
274		"	.long 1b,3b\n"
275		".previous"
276		: "=r" (res), "=&z" (__dummy), "=r" (_f), "=r" (_t)
277		: "2" (__from), "3" (__to), "0" (res)
278		: "memory", "t");
279
280	return res;
281}
282
283#define copy_to_user(to,from,n) ({ \
284void *__copy_to = (void *) (to); \
285__kernel_size_t __copy_size = (__kernel_size_t) (n); \
286__kernel_size_t __copy_res; \
287if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
288__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
289} else __copy_res = __copy_size; \
290__copy_res; })
291
292#define __copy_to_user(to,from,n)		\
293	__copy_user((void *)(to),		\
294		    (void *)(from), n)
295
296#define copy_from_user(to,from,n) ({ \
297void *__copy_to = (void *) (to); \
298void *__copy_from = (void *) (from); \
299__kernel_size_t __copy_size = (__kernel_size_t) (n); \
300__kernel_size_t __copy_res; \
301if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
302__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
303} else __copy_res = __copy_size; \
304__copy_res; })
305
306#define __copy_from_user(to,from,n)		\
307	__copy_user((void *)(to),		\
308		    (void *)(from), n)
309
310static __inline__ __kernel_size_t
311__clear_user(void *addr, __kernel_size_t size)
312{
313	unsigned long __a;
314
315	__asm__ __volatile__(
316		"9:\n\t"
317		"dt	%0\n"
318		"1:\n\t"
319		"mov.b	%4, @%1\n\t"
320		"bf/s	9b\n\t"
321		" add	#1, %1\n"
322		"2:\n"
323		".section .fixup,\"ax\"\n"
324		"3:\n\t"
325		"mov.l	4f, %1\n\t"
326		"jmp	@%1\n\t"
327		" nop\n"
328		".balign 4\n"
329		"4:	.long 2b\n"
330		".previous\n"
331		".section __ex_table,\"a\"\n"
332		"	.balign 4\n"
333		"	.long 1b,3b\n"
334		".previous"
335		: "=r" (size), "=r" (__a)
336		: "0" (size), "1" (addr), "r" (0)
337		: "memory", "t");
338
339	return size;
340}
341
342#define clear_user(addr,n) ({ \
343void * __cl_addr = (addr); \
344unsigned long __cl_size = (n); \
345if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
346__cl_size = __clear_user(__cl_addr, __cl_size); \
347__cl_size; })
348
349static __inline__ int
350__strncpy_from_user(unsigned long __dest, unsigned long __src, int __count)
351{
352	__kernel_size_t res;
353	unsigned long __dummy, _d, _s;
354
355	__asm__ __volatile__(
356		"9:\n"
357		"mov.b	@%2+, %1\n\t"
358		"cmp/eq	#0, %1\n\t"
359		"bt/s	2f\n"
360		"1:\n"
361		"mov.b	%1, @%3\n\t"
362		"dt	%7\n\t"
363		"bf/s	9b\n\t"
364		" add	#1, %3\n\t"
365		"2:\n\t"
366		"sub	%7, %0\n"
367		"3:\n"
368		".section .fixup,\"ax\"\n"
369		"4:\n\t"
370		"mov.l	5f, %1\n\t"
371		"jmp	@%1\n\t"
372		" mov	%8, %0\n\t"
373		".balign 4\n"
374		"5:	.long 3b\n"
375		".previous\n"
376		".section __ex_table,\"a\"\n"
377		"	.balign 4\n"
378		"	.long 9b,4b\n"
379		".previous"
380		: "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
381		: "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
382		  "i" (-EFAULT)
383		: "memory", "t");
384
385	return res;
386}
387
388#define strncpy_from_user(dest,src,count) ({ \
389unsigned long __sfu_src = (unsigned long) (src); \
390int __sfu_count = (int) (count); \
391long __sfu_res = -EFAULT; \
392if(__access_ok(__sfu_src, __sfu_count)) { \
393__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
394} __sfu_res; })
395
396#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
397
398/*
399 * Return the size of a string (including the ending 0!)
400 */
401static __inline__ long __strnlen_user(const char *__s, long __n)
402{
403	unsigned long res;
404	unsigned long __dummy;
405
406	__asm__ __volatile__(
407		"9:\n"
408		"cmp/eq	%4, %0\n\t"
409		"bt	2f\n"
410		"1:\t"
411		"mov.b	@(%0,%3), %1\n\t"
412		"tst	%1, %1\n\t"
413		"bf/s	9b\n\t"
414		" add	#1, %0\n"
415		"2:\n"
416		".section .fixup,\"ax\"\n"
417		"3:\n\t"
418		"mov.l	4f, %1\n\t"
419		"jmp	@%1\n\t"
420		" mov	%5, %0\n"
421		".balign 4\n"
422		"4:	.long 2b\n"
423		".previous\n"
424		".section __ex_table,\"a\"\n"
425		"	.balign 4\n"
426		"	.long 1b,3b\n"
427		".previous"
428		: "=z" (res), "=&r" (__dummy)
429		: "0" (0), "r" (__s), "r" (__n), "i" (-EFAULT)
430		: "t");
431	return res;
432}
433
434static __inline__ long strnlen_user(const char *s, long n)
435{
436	if (!__addr_ok(s))
437		return 0;
438	else
439		return __strnlen_user(s, n);
440}
441
442struct exception_table_entry
443{
444	unsigned long insn, fixup;
445};
446
447/* Returns 0 if exception not found and fixup.unit otherwise.  */
448extern unsigned long search_exception_table(unsigned long addr);
449
450/* Returns the new pc */
451#define fixup_exception(map_reg, fixup_unit, pc)                \
452({                                                              \
453	fixup_unit;                                             \
454})
455
456#endif /* __ASM_SH_UACCESS_H */
457