1#ifndef __ASM_SH64_UACCESS_H
2#define __ASM_SH64_UACCESS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License.  See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/uaccess.h
10 *
11 * Copyright (C) 2000, 2001  Paolo Alberelli
12 * Copyright (C) 2003, 2004  Paul Mundt
13 *
14 * User space memory access functions
15 *
16 * Copyright (C) 1999  Niibe Yutaka
17 *
18 *  Based on:
19 *     MIPS implementation version 1.15 by
20 *              Copyright (C) 1996, 1997, 1998 by Ralf Baechle
21 *     and i386 version.
22 *
23 */
24
25#include <linux/errno.h>
26#include <linux/sched.h>
27
28#define VERIFY_READ    0
29#define VERIFY_WRITE   1
30
31/*
32 * The fs value determines whether argument validity checking should be
33 * performed or not.  If get_fs() == USER_DS, checking is performed, with
34 * get_fs() == KERNEL_DS, checking is bypassed.
35 *
36 * For historical reasons (Data Segment Register?), these macros are misnamed.
37 */
38
39#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
40
41#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
42#define USER_DS		MAKE_MM_SEG(0x80000000)
43
44#define get_ds()	(KERNEL_DS)
45#define get_fs()        (current_thread_info()->addr_limit)
46#define set_fs(x)       (current_thread_info()->addr_limit=(x))
47
48#define segment_eq(a,b)	((a).seg == (b).seg)
49
50#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
51
52/*
53 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
54 *
55 * sum := addr + size;  carry? --> flag = true;
56 * if (sum >= addr_limit) flag = true;
57 */
58#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
59
60#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
61#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
62
63/*
64 * Uh, these should become the main single-value transfer routines ...
65 * They automatically use the right size if we just have the right
66 * pointer type ...
67 *
68 * As MIPS uses the same address space for kernel and user data, we
69 * can just do these as direct assignments.
70 *
71 * Careful to not
72 * (a) re-use the arguments for side effects (sizeof is ok)
73 * (b) require any knowledge of processes at this stage
74 */
75#define put_user(x,ptr)	__put_user_check((x),(ptr),sizeof(*(ptr)))
76#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
77
78/*
79 * The "__xxx" versions do not do address space checking, useful when
80 * doing multiple accesses to the same area (the user has to do the
81 * checks by hand with "access_ok()")
82 */
83#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
84#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
85
86/*
87 * The "xxx_ret" versions return constant specified in third argument, if
88 * something bad happens. These macros can be optimized for the
89 * case of just returning from the function xxx_ret is used.
90 */
91
92#define put_user_ret(x,ptr,ret) ({ \
93if (put_user(x,ptr)) return ret; })
94
95#define get_user_ret(x,ptr,ret) ({ \
96if (get_user(x,ptr)) return ret; })
97
98#define __put_user_ret(x,ptr,ret) ({ \
99if (__put_user(x,ptr)) return ret; })
100
101#define __get_user_ret(x,ptr,ret) ({ \
102if (__get_user(x,ptr)) return ret; })
103
104struct __large_struct { unsigned long buf[100]; };
105#define __m(x) (*(struct __large_struct *)(x))
106
107#define __get_user_size(x,ptr,size,retval)			\
108do {								\
109	retval = 0;						\
110	switch (size) {						\
111	case 1:							\
112		retval = __get_user_asm_b(x, ptr);		\
113		break;						\
114	case 2:							\
115		retval = __get_user_asm_w(x, ptr);		\
116		break;						\
117	case 4:							\
118		retval = __get_user_asm_l(x, ptr);		\
119		break;						\
120	case 8:							\
121		retval = __get_user_asm_q(x, ptr);		\
122		break;						\
123	default:						\
124		__get_user_unknown();				\
125		break;						\
126	}							\
127} while (0)
128
129#define __get_user_nocheck(x,ptr,size)				\
130({								\
131	long __gu_err, __gu_val;				\
132	__get_user_size((void *)&__gu_val, (long)(ptr),		\
133			(size), __gu_err);			\
134	(x) = (__typeof__(*(ptr)))__gu_val;			\
135	__gu_err;						\
136})
137
138#define __get_user_check(x,ptr,size)				\
139({								\
140	long __gu_addr = (long)(ptr);				\
141	long __gu_err = -EFAULT, __gu_val;			\
142	if (__access_ok(__gu_addr, (size)))			\
143		__get_user_size((void *)&__gu_val, __gu_addr,	\
144				(size), __gu_err);		\
145	(x) = (__typeof__(*(ptr))) __gu_val;			\
146	__gu_err;						\
147})
148
149extern long __get_user_asm_b(void *, long);
150extern long __get_user_asm_w(void *, long);
151extern long __get_user_asm_l(void *, long);
152extern long __get_user_asm_q(void *, long);
153extern void __get_user_unknown(void);
154
155#define __put_user_size(x,ptr,size,retval)			\
156do {								\
157	retval = 0;						\
158	switch (size) {						\
159	case 1:							\
160		retval = __put_user_asm_b(x, ptr);		\
161		break;						\
162	case 2:							\
163		retval = __put_user_asm_w(x, ptr);		\
164		break;						\
165	case 4:							\
166		retval = __put_user_asm_l(x, ptr);		\
167		break;						\
168	case 8:							\
169		retval = __put_user_asm_q(x, ptr);		\
170		break;						\
171	default:						\
172		__put_user_unknown();				\
173	}							\
174} while (0)
175
176#define __put_user_nocheck(x,ptr,size)				\
177({								\
178	long __pu_err;						\
179	__typeof__(*(ptr)) __pu_val = (x);			\
180	__put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
181	__pu_err;						\
182})
183
184#define __put_user_check(x,ptr,size)				\
185({								\
186	long __pu_err = -EFAULT;				\
187	long __pu_addr = (long)(ptr);				\
188	__typeof__(*(ptr)) __pu_val = (x);			\
189								\
190	if (__access_ok(__pu_addr, (size)))			\
191		__put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
192	__pu_err;						\
193})
194
195extern long __put_user_asm_b(void *, long);
196extern long __put_user_asm_w(void *, long);
197extern long __put_user_asm_l(void *, long);
198extern long __put_user_asm_q(void *, long);
199extern void __put_user_unknown(void);
200
201
202/* Generic arbitrary sized copy.  */
203/* Return the number of bytes NOT copied */
204extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
205
206#define copy_to_user(to,from,n) ({ \
207void *__copy_to = (void *) (to); \
208__kernel_size_t __copy_size = (__kernel_size_t) (n); \
209__kernel_size_t __copy_res; \
210if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
211__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
212} else __copy_res = __copy_size; \
213__copy_res; })
214
215#define copy_to_user_ret(to,from,n,retval) ({ \
216if (copy_to_user(to,from,n)) \
217	return retval; \
218})
219
220#define __copy_to_user(to,from,n)		\
221	__copy_user((void *)(to),		\
222		    (void *)(from), n)
223
224#define __copy_to_user_ret(to,from,n,retval) ({ \
225if (__copy_to_user(to,from,n)) \
226	return retval; \
227})
228
229#define copy_from_user(to,from,n) ({ \
230void *__copy_to = (void *) (to); \
231void *__copy_from = (void *) (from); \
232__kernel_size_t __copy_size = (__kernel_size_t) (n); \
233__kernel_size_t __copy_res; \
234if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
235__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
236} else __copy_res = __copy_size; \
237__copy_res; })
238
239#define copy_from_user_ret(to,from,n,retval) ({ \
240if (copy_from_user(to,from,n)) \
241	return retval; \
242})
243
244#define __copy_from_user(to,from,n)		\
245	__copy_user((void *)(to),		\
246		    (void *)(from), n)
247
248#define __copy_from_user_ret(to,from,n,retval) ({ \
249if (__copy_from_user(to,from,n)) \
250	return retval; \
251})
252
253#define __copy_to_user_inatomic __copy_to_user
254#define __copy_from_user_inatomic __copy_from_user
255
256extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
257
258#define clear_user(addr,n) ({ \
259void * __cl_addr = (addr); \
260unsigned long __cl_size = (n); \
261if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
262__cl_size = __clear_user(__cl_addr, __cl_size); \
263__cl_size; })
264
265extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
266
267#define strncpy_from_user(dest,src,count) ({ \
268unsigned long __sfu_src = (unsigned long) (src); \
269int __sfu_count = (int) (count); \
270long __sfu_res = -EFAULT; \
271if(__access_ok(__sfu_src, __sfu_count)) { \
272__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
273} __sfu_res; })
274
275#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
276
277/*
278 * Return the size of a string (including the ending 0!)
279 */
280extern long __strnlen_user(const char *__s, long __n);
281
282static inline long strnlen_user(const char *s, long n)
283{
284	if (!__addr_ok(s))
285		return 0;
286	else
287		return __strnlen_user(s, n);
288}
289
290struct exception_table_entry
291{
292	unsigned long insn, fixup;
293};
294
295#define ARCH_HAS_SEARCH_EXTABLE
296
297/* If gcc inlines memset, it will use st.q instructions.  Therefore, we need
298   kmalloc allocations to be 8-byte aligned.  Without this, the alignment
299   becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
300   sh64 at the moment). */
301#define ARCH_KMALLOC_MINALIGN 8
302
303/*
304 * We want 8-byte alignment for the slab caches as well, otherwise we have
305 * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
306 */
307#define ARCH_SLAB_MINALIGN 8
308
309/* Returns 0 if exception not found and fixup.unit otherwise.  */
310extern unsigned long search_exception_table(unsigned long addr);
311extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
312
313#endif /* __ASM_SH64_UACCESS_H */
314