1#ifndef __X86_64_UACCESS_H 2#define __X86_64_UACCESS_H 3 4/* 5 * User space memory access functions 6 */ 7#include <linux/config.h> 8#include <linux/errno.h> 9#include <linux/sched.h> 10#include <linux/prefetch.h> 11#include <asm/page.h> 12 13#define VERIFY_READ 0 14#define VERIFY_WRITE 1 15 16/* 17 * The fs value determines whether argument validity checking should be 18 * performed or not. If get_fs() == USER_DS, checking is performed, with 19 * get_fs() == KERNEL_DS, checking is bypassed. 20 * 21 * For historical reasons, these macros are grossly misnamed. 22 */ 23 24#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 25 26#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFF) 27#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 28 29#define get_ds() (KERNEL_DS) 30#define get_fs() (current->addr_limit) 31#define set_fs(x) (current->addr_limit = (x)) 32 33#define segment_eq(a,b) ((a).seg == (b).seg) 34 35#define __addr_ok(addr) (!((unsigned long)(addr) & (current->addr_limit.seg))) 36 37/* 38 * Uhhuh, this needs 65-bit arithmetic. We have a carry.. 39 */ 40#define __range_not_ok(addr,size) ({ \ 41 unsigned long flag,sum; \ 42 asm("# range_ok\n\r" \ 43 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ 44 :"=&r" (flag), "=r" (sum) \ 45 :"1" (addr),"g" ((long)(size)),"g" (current->addr_limit.seg)); \ 46 flag; }) 47 48#define access_ok(type,addr,size) (__range_not_ok(addr,size) == 0) 49 50extern inline int verify_area(int type, const void * addr, unsigned long size) 51{ 52 return access_ok(type,addr,size) ? 0 : -EFAULT; 53} 54 55 56/* 57 * The exception table consists of pairs of addresses: the first is the 58 * address of an instruction that is allowed to fault, and the second is 59 * the address at which the program should continue. No registers are 60 * modified, so it is entirely up to the continuation code to figure out 61 * what to do. 62 * 63 * All the routines below use bits of fixup code that are out of line 64 * with the main instruction path. This means when everything is well, 65 * we don't even have to jump over them. Further, they do not intrude 66 * on our cache or tlb entries. 67 */ 68 69struct exception_table_entry 70{ 71 unsigned long insn, fixup; 72}; 73 74/* Returns 0 if exception not found and fixup otherwise. */ 75extern unsigned long search_exception_table(unsigned long); 76 77 78/* 79 * These are the main single-value transfer routines. They automatically 80 * use the right size if we just have the right pointer type. 81 * 82 * This gets kind of ugly. We want to return _two_ values in "get_user()" 83 * and yet we don't want to do any pointers, because that is too much 84 * of a performance impact. Thus we have a few rather ugly macros here, 85 * and hide all the uglyness from the user. 86 * 87 * The "__xxx" versions of the user access functions are versions that 88 * do not verify the address space, that must have been done previously 89 * with a separate "access_ok()" call (this is used when we do multiple 90 * accesses to the same area of user memory). 91 */ 92 93extern void __get_user_1(void); 94extern void __get_user_2(void); 95extern void __get_user_4(void); 96extern void __get_user_8(void); 97 98#define __get_user_x(size,ret,x,ptr) \ 99 __asm__ __volatile__("call __get_user_" #size \ 100 :"=a" (ret),"=d" (x) \ 101 :"0" (ptr) \ 102 :"rbx") 103 104/* Careful: we have to cast the result to the type of the pointer for sign reasons */ 105#define get_user(x,ptr) \ 106({ long __ret_gu,__val_gu; \ 107 switch(sizeof (*(ptr))) { \ 108 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ 109 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ 110 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ 111 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ 112 default: __get_user_bad(); break; \ 113 } \ 114 (x) = (__typeof__(*(ptr)))__val_gu; \ 115 __ret_gu; \ 116}) 117 118extern void __put_user_1(void); 119extern void __put_user_2(void); 120extern void __put_user_4(void); 121extern void __put_user_8(void); 122 123extern void __put_user_bad(void); 124 125#define __put_user_x(size,ret,x,ptr) \ 126 __asm__ __volatile__("call __put_user_" #size \ 127 :"=a" (ret) \ 128 :"0" (ptr),"d" (x) \ 129 :"rbx") 130 131#define put_user(x,ptr) \ 132 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 133 134#define __get_user(x,ptr) \ 135 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 136#define __put_user(x,ptr) \ 137 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 138 139#define __put_user_nocheck(x,ptr,size) \ 140({ \ 141 long __pu_err; \ 142 __put_user_size((x),(ptr),(size),__pu_err); \ 143 __pu_err; \ 144}) 145 146 147#define __put_user_check(x,ptr,size) \ 148({ \ 149 long __pu_err = -EFAULT; \ 150 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 151 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 152 __put_user_size((x),__pu_addr,(size),__pu_err); \ 153 __pu_err; \ 154}) 155 156#define __put_user_size(x,ptr,size,retval) \ 157do { \ 158 retval = 0; \ 159 switch (size) { \ 160 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \ 161 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \ 162 case 4: __put_user_asm(x,ptr,retval,"l","k","ir"); break; \ 163 case 8: __put_user_asm(x,ptr,retval,"q","","ir"); break; \ 164 default: __put_user_bad(); \ 165 } \ 166} while (0) 167 168struct __large_struct { unsigned long buf[100]; }; 169#define __m(x) (*(struct __large_struct *)(x)) 170 171/* 172 * Tell gcc we read from memory instead of writing: this is because 173 * we do not write to any memory gcc knows about, so there are no 174 * aliasing issues. 175 */ 176#define __put_user_asm(x, addr, err, itype, rtype, ltype) \ 177 __asm__ __volatile__( \ 178 "1: mov"itype" %"rtype"1,%2\n" \ 179 "2:\n" \ 180 ".section .fixup,\"ax\"\n" \ 181 "3: movq %3,%0\n" \ 182 " jmp 2b\n" \ 183 ".previous\n" \ 184 ".section __ex_table,\"a\"\n" \ 185 " .align 8\n" \ 186 " .quad 1b,3b\n" \ 187 ".previous" \ 188 : "=r"(err) \ 189 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err)) 190 191 192#define __get_user_nocheck(x,ptr,size) \ 193({ \ 194 long __gu_err, __gu_val; \ 195 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 196 (x) = (__typeof__(*(ptr)))__gu_val; \ 197 __gu_err; \ 198}) 199 200extern long __get_user_bad(void); 201 202#define __get_user_size(x,ptr,size,retval) \ 203do { \ 204 retval = 0; \ 205 switch (size) { \ 206 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \ 207 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \ 208 case 4: __get_user_asm(x,ptr,retval,"l","k","=r"); break; \ 209 case 8: __get_user_asm(x,ptr,retval,"q","","=r"); break; \ 210 default: (x) = __get_user_bad(); \ 211 } \ 212} while (0) 213 214#define __get_user_asm(x, addr, err, itype, rtype, ltype) \ 215 __asm__ __volatile__( \ 216 "1: mov"itype" %2,%"rtype"1\n" \ 217 "2:\n" \ 218 ".section .fixup,\"ax\"\n" \ 219 "3: mov %3,%0\n" \ 220 " xor"itype" %"rtype"1,%"rtype"1\n" \ 221 " jmp 2b\n" \ 222 ".previous\n" \ 223 ".section __ex_table,\"a\"\n" \ 224 " .align 8\n" \ 225 " .quad 1b,3b\n" \ 226 ".previous" \ 227 : "=r"(err), ltype (x) \ 228 : "m"(__m(addr)), "i"(-EFAULT), "0"(err)) 229 230/* 231 * Copy To/From Userspace 232 * 233 * This relies on an optimized common worker function. 234 * 235 * Could do special inline versions for small constant copies, but avoid this 236 * for now. It's not clear it is worth it. 237 */ 238 239extern unsigned long copy_user_generic(void *to, const void *from, unsigned len); 240 241extern unsigned long copy_to_user(void *to, const void *from, unsigned len); 242extern unsigned long copy_from_user(void *to, const void *from, unsigned len); 243#define __copy_to_user copy_user_generic 244#define __copy_from_user copy_user_generic 245 246long strncpy_from_user(char *dst, const char *src, long count); 247long __strncpy_from_user(char *dst, const char *src, long count); 248#define strlen_user(str) strnlen_user(str, ~0UL >> 1) 249long strnlen_user(const char *str, long n); 250unsigned long clear_user(void *mem, unsigned long len); 251unsigned long __clear_user(void *mem, unsigned long len); 252 253#endif /* __X86_64_UACCESS_H */ 254