• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/include/asm/
1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not.  If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
26
27#define KERNEL_DS	MAKE_MM_SEG(-1UL)
28#define USER_DS 	MAKE_MM_SEG(TASK_SIZE_MAX)
29
30#define get_ds()	(KERNEL_DS)
31#define get_fs()	(current_thread_info()->addr_limit)
32#define set_fs(x)	(current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b)	((a).seg == (b).seg)
35
36#define __addr_ok(addr)					\
37	((unsigned long __force)(addr) <		\
38	 (current_thread_info()->addr_limit.seg))
39
40/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size)					\
51({									\
52	unsigned long flag, roksum;					\
53	__chk_user_ptr(addr);						\
54	asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"		\
55	    : "=&r" (flag), "=r" (roksum)				\
56	    : "1" (addr), "g" ((long)(size)),				\
57	      "rm" (current_thread_info()->addr_limit.seg));		\
58	flag;								\
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
64 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 *        to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only.  This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue.  No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path.  This means when everything is well,
91 * we don't even have to jump over them.  Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96	unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines.  They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr)		      \
123	asm volatile("call __get_user_" #size	      \
124		     : "=a" (ret), "=d" (x)	      \
125		     : "0" (ptr))		      \
126
127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x:   Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only.  This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space.  It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr)				\
149		__get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr)				\
152		__get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr)						\
156({									\
157	int __ret_gu;							\
158	unsigned long __val_gu;						\
159	__chk_user_ptr(ptr);						\
160	might_fault();							\
161	switch (sizeof(*(ptr))) {					\
162	case 1:								\
163		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
164		break;							\
165	case 2:								\
166		__get_user_x(2, __ret_gu, __val_gu, ptr);		\
167		break;							\
168	case 4:								\
169		__get_user_x(4, __ret_gu, __val_gu, ptr);		\
170		break;							\
171	case 8:								\
172		__get_user_8(__ret_gu, __val_gu, ptr);			\
173		break;							\
174	default:							\
175		__get_user_x(X, __ret_gu, __val_gu, ptr);		\
176		break;							\
177	}								\
178	(x) = (__typeof__(*(ptr)))__val_gu;				\
179	__ret_gu;							\
180})
181
182#define __put_user_x(size, x, ptr, __ret_pu)			\
183	asm volatile("call __put_user_" #size : "=a" (__ret_pu)	\
184		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185
186
187
188#ifdef CONFIG_X86_32
189#define __put_user_asm_u64(x, addr, err, errret)			\
190	asm volatile("1:	movl %%eax,0(%2)\n"			\
191		     "2:	movl %%edx,4(%2)\n"			\
192		     "3:\n"						\
193		     ".section .fixup,\"ax\"\n"				\
194		     "4:	movl %3,%0\n"				\
195		     "	jmp 3b\n"					\
196		     ".previous\n"					\
197		     _ASM_EXTABLE(1b, 4b)				\
198		     _ASM_EXTABLE(2b, 4b)				\
199		     : "=r" (err)					\
200		     : "A" (x), "r" (addr), "i" (errret), "0" (err))
201
202#define __put_user_asm_ex_u64(x, addr)					\
203	asm volatile("1:	movl %%eax,0(%1)\n"			\
204		     "2:	movl %%edx,4(%1)\n"			\
205		     "3:\n"						\
206		     _ASM_EXTABLE(1b, 2b - 1b)				\
207		     _ASM_EXTABLE(2b, 3b - 2b)				\
208		     : : "A" (x), "r" (addr))
209
210#define __put_user_x8(x, ptr, __ret_pu)				\
211	asm volatile("call __put_user_8" : "=a" (__ret_pu)	\
212		     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
213#else
214#define __put_user_asm_u64(x, ptr, retval, errret) \
215	__put_user_asm(x, ptr, retval, "q", "", "er", errret)
216#define __put_user_asm_ex_u64(x, addr)	\
217	__put_user_asm_ex(x, addr, "q", "", "er")
218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
219#endif
220
221extern void __put_user_bad(void);
222
223/*
224 * Strange magic calling convention: pointer in %ecx,
225 * value in %eax(:%edx), return value in %eax. clobbers %rbx
226 */
227extern void __put_user_1(void);
228extern void __put_user_2(void);
229extern void __put_user_4(void);
230extern void __put_user_8(void);
231
232#ifdef CONFIG_X86_WP_WORKS_OK
233
234/**
235 * put_user: - Write a simple value into user space.
236 * @x:   Value to copy to user space.
237 * @ptr: Destination address, in user space.
238 *
239 * Context: User context only.  This function may sleep.
240 *
241 * This macro copies a single simple value from kernel space to user
242 * space.  It supports simple types like char and int, but not larger
243 * data types like structures or arrays.
244 *
245 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
246 * to the result of dereferencing @ptr.
247 *
248 * Returns zero on success, or -EFAULT on error.
249 */
250#define put_user(x, ptr)					\
251({								\
252	int __ret_pu;						\
253	__typeof__(*(ptr)) __pu_val;				\
254	__chk_user_ptr(ptr);					\
255	might_fault();						\
256	__pu_val = x;						\
257	switch (sizeof(*(ptr))) {				\
258	case 1:							\
259		__put_user_x(1, __pu_val, ptr, __ret_pu);	\
260		break;						\
261	case 2:							\
262		__put_user_x(2, __pu_val, ptr, __ret_pu);	\
263		break;						\
264	case 4:							\
265		__put_user_x(4, __pu_val, ptr, __ret_pu);	\
266		break;						\
267	case 8:							\
268		__put_user_x8(__pu_val, ptr, __ret_pu);		\
269		break;						\
270	default:						\
271		__put_user_x(X, __pu_val, ptr, __ret_pu);	\
272		break;						\
273	}							\
274	__ret_pu;						\
275})
276
277#define __put_user_size(x, ptr, size, retval, errret)			\
278do {									\
279	retval = 0;							\
280	__chk_user_ptr(ptr);						\
281	switch (size) {							\
282	case 1:								\
283		__put_user_asm(x, ptr, retval, "b", "b", "iq", errret);	\
284		break;							\
285	case 2:								\
286		__put_user_asm(x, ptr, retval, "w", "w", "ir", errret);	\
287		break;							\
288	case 4:								\
289		__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);	\
290		break;							\
291	case 8:								\
292		__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,	\
293				   errret);				\
294		break;							\
295	default:							\
296		__put_user_bad();					\
297	}								\
298} while (0)
299
300#define __put_user_size_ex(x, ptr, size)				\
301do {									\
302	__chk_user_ptr(ptr);						\
303	switch (size) {							\
304	case 1:								\
305		__put_user_asm_ex(x, ptr, "b", "b", "iq");		\
306		break;							\
307	case 2:								\
308		__put_user_asm_ex(x, ptr, "w", "w", "ir");		\
309		break;							\
310	case 4:								\
311		__put_user_asm_ex(x, ptr, "l", "k", "ir");		\
312		break;							\
313	case 8:								\
314		__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);	\
315		break;							\
316	default:							\
317		__put_user_bad();					\
318	}								\
319} while (0)
320
321#else
322
323#define __put_user_size(x, ptr, size, retval, errret)			\
324do {									\
325	__typeof__(*(ptr))__pus_tmp = x;				\
326	retval = 0;							\
327									\
328	if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))	\
329		retval = errret;					\
330} while (0)
331
332#define put_user(x, ptr)					\
333({								\
334	int __ret_pu;						\
335	__typeof__(*(ptr))__pus_tmp = x;			\
336	__ret_pu = 0;						\
337	if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp,		\
338				       sizeof(*(ptr))) != 0))	\
339		__ret_pu = -EFAULT;				\
340	__ret_pu;						\
341})
342#endif
343
344#ifdef CONFIG_X86_32
345#define __get_user_asm_u64(x, ptr, retval, errret)	(x) = __get_user_bad()
346#define __get_user_asm_ex_u64(x, ptr)			(x) = __get_user_bad()
347#else
348#define __get_user_asm_u64(x, ptr, retval, errret) \
349	 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
350#define __get_user_asm_ex_u64(x, ptr) \
351	 __get_user_asm_ex(x, ptr, "q", "", "=r")
352#endif
353
354#define __get_user_size(x, ptr, size, retval, errret)			\
355do {									\
356	retval = 0;							\
357	__chk_user_ptr(ptr);						\
358	switch (size) {							\
359	case 1:								\
360		__get_user_asm(x, ptr, retval, "b", "b", "=q", errret);	\
361		break;							\
362	case 2:								\
363		__get_user_asm(x, ptr, retval, "w", "w", "=r", errret);	\
364		break;							\
365	case 4:								\
366		__get_user_asm(x, ptr, retval, "l", "k", "=r", errret);	\
367		break;							\
368	case 8:								\
369		__get_user_asm_u64(x, ptr, retval, errret);		\
370		break;							\
371	default:							\
372		(x) = __get_user_bad();					\
373	}								\
374} while (0)
375
376#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
377	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
378		     "2:\n"						\
379		     ".section .fixup,\"ax\"\n"				\
380		     "3:	mov %3,%0\n"				\
381		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
382		     "	jmp 2b\n"					\
383		     ".previous\n"					\
384		     _ASM_EXTABLE(1b, 3b)				\
385		     : "=r" (err), ltype(x)				\
386		     : "m" (__m(addr)), "i" (errret), "0" (err))
387
388#define __get_user_size_ex(x, ptr, size)				\
389do {									\
390	__chk_user_ptr(ptr);						\
391	switch (size) {							\
392	case 1:								\
393		__get_user_asm_ex(x, ptr, "b", "b", "=q");		\
394		break;							\
395	case 2:								\
396		__get_user_asm_ex(x, ptr, "w", "w", "=r");		\
397		break;							\
398	case 4:								\
399		__get_user_asm_ex(x, ptr, "l", "k", "=r");		\
400		break;							\
401	case 8:								\
402		__get_user_asm_ex_u64(x, ptr);				\
403		break;							\
404	default:							\
405		(x) = __get_user_bad();					\
406	}								\
407} while (0)
408
409#define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
410	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
411		     "2:\n"						\
412		     _ASM_EXTABLE(1b, 2b - 1b)				\
413		     : ltype(x) : "m" (__m(addr)))
414
415#define __put_user_nocheck(x, ptr, size)			\
416({								\
417	int __pu_err;						\
418	__put_user_size((x), (ptr), (size), __pu_err, -EFAULT);	\
419	__pu_err;						\
420})
421
422#define __get_user_nocheck(x, ptr, size)				\
423({									\
424	int __gu_err;							\
425	unsigned long __gu_val;						\
426	__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);	\
427	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
428	__gu_err;							\
429})
430
431struct __large_struct { unsigned long buf[100]; };
432#define __m(x) (*(struct __large_struct __user *)(x))
433
434/*
435 * Tell gcc we read from memory instead of writing: this is because
436 * we do not write to any memory gcc knows about, so there are no
437 * aliasing issues.
438 */
439#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
440	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
441		     "2:\n"						\
442		     ".section .fixup,\"ax\"\n"				\
443		     "3:	mov %3,%0\n"				\
444		     "	jmp 2b\n"					\
445		     ".previous\n"					\
446		     _ASM_EXTABLE(1b, 3b)				\
447		     : "=r"(err)					\
448		     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
449
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype)			\
451	asm volatile("1:	mov"itype" %"rtype"0,%1\n"		\
452		     "2:\n"						\
453		     _ASM_EXTABLE(1b, 2b - 1b)				\
454		     : : ltype(x), "m" (__m(addr)))
455
456/*
457 * uaccess_try and catch
458 */
459#define uaccess_try	do {						\
460	int prev_err = current_thread_info()->uaccess_err;		\
461	current_thread_info()->uaccess_err = 0;				\
462	barrier();
463
464#define uaccess_catch(err)						\
465	(err) |= current_thread_info()->uaccess_err;			\
466	current_thread_info()->uaccess_err = prev_err;			\
467} while (0)
468
469/**
470 * __get_user: - Get a simple variable from user space, with less checking.
471 * @x:   Variable to store result.
472 * @ptr: Source address, in user space.
473 *
474 * Context: User context only.  This function may sleep.
475 *
476 * This macro copies a single simple variable from user space to kernel
477 * space.  It supports simple types like char and int, but not larger
478 * data types like structures or arrays.
479 *
480 * @ptr must have pointer-to-simple-variable type, and the result of
481 * dereferencing @ptr must be assignable to @x without a cast.
482 *
483 * Caller must check the pointer with access_ok() before calling this
484 * function.
485 *
486 * Returns zero on success, or -EFAULT on error.
487 * On error, the variable @x is set to zero.
488 */
489
490#define __get_user(x, ptr)						\
491	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
492
493/**
494 * __put_user: - Write a simple value into user space, with less checking.
495 * @x:   Value to copy to user space.
496 * @ptr: Destination address, in user space.
497 *
498 * Context: User context only.  This function may sleep.
499 *
500 * This macro copies a single simple value from kernel space to user
501 * space.  It supports simple types like char and int, but not larger
502 * data types like structures or arrays.
503 *
504 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
505 * to the result of dereferencing @ptr.
506 *
507 * Caller must check the pointer with access_ok() before calling this
508 * function.
509 *
510 * Returns zero on success, or -EFAULT on error.
511 */
512
513#define __put_user(x, ptr)						\
514	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
515
516#define __get_user_unaligned __get_user
517#define __put_user_unaligned __put_user
518
519/*
520 * {get|put}_user_try and catch
521 *
522 * get_user_try {
523 *	get_user_ex(...);
524 * } get_user_catch(err)
525 */
526#define get_user_try		uaccess_try
527#define get_user_catch(err)	uaccess_catch(err)
528
529#define get_user_ex(x, ptr)	do {					\
530	unsigned long __gue_val;					\
531	__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));	\
532	(x) = (__force __typeof__(*(ptr)))__gue_val;			\
533} while (0)
534
535#ifdef CONFIG_X86_WP_WORKS_OK
536
537#define put_user_try		uaccess_try
538#define put_user_catch(err)	uaccess_catch(err)
539
540#define put_user_ex(x, ptr)						\
541	__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
542
543#else /* !CONFIG_X86_WP_WORKS_OK */
544
545#define put_user_try		do {		\
546	int __uaccess_err = 0;
547
548#define put_user_catch(err)			\
549	(err) |= __uaccess_err;			\
550} while (0)
551
552#define put_user_ex(x, ptr)	do {		\
553	__uaccess_err |= __put_user(x, ptr);	\
554} while (0)
555
556#endif /* CONFIG_X86_WP_WORKS_OK */
557
558/*
559 * movsl can be slow when source and dest are not both 8-byte aligned
560 */
561#ifdef CONFIG_X86_INTEL_USERCOPY
562extern struct movsl_mask {
563	int mask;
564} ____cacheline_aligned_in_smp movsl_mask;
565#endif
566
567#define ARCH_HAS_NOCACHE_UACCESS 1
568
569#ifdef CONFIG_X86_32
570# include "uaccess_32.h"
571#else
572# include "uaccess_64.h"
573#endif
574
575#endif /* _ASM_X86_UACCESS_H */
576