1#ifndef __X86_64_UACCESS_H 2#define __X86_64_UACCESS_H 3 4/* 5 * User space memory access functions 6 */ 7#include <linux/compiler.h> 8#include <linux/errno.h> 9#include <linux/prefetch.h> 10#include <asm/page.h> 11 12#define VERIFY_READ 0 13#define VERIFY_WRITE 1 14 15/* 16 * The fs value determines whether argument validity checking should be 17 * performed or not. If get_fs() == USER_DS, checking is performed, with 18 * get_fs() == KERNEL_DS, checking is bypassed. 19 * 20 * For historical reasons, these macros are grossly misnamed. 21 */ 22 23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24 25#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL) 26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 27 28#define get_ds() (KERNEL_DS) 29#define get_fs() (current_thread_info()->addr_limit) 30#define set_fs(x) (current_thread_info()->addr_limit = (x)) 31 32#define segment_eq(a,b) ((a).seg == (b).seg) 33 34#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) 35 36/* 37 * Uhhuh, this needs 65-bit arithmetic. We have a carry.. 38 */ 39#define __range_not_ok(addr,size) ({ \ 40 unsigned long flag,roksum; \ 41 __chk_user_ptr(addr); \ 42 asm("# range_ok\n\r" \ 43 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ 44 :"=&r" (flag), "=r" (roksum) \ 45 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ 46 flag; }) 47 48#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) 49 50/* 51 * The exception table consists of pairs of addresses: the first is the 52 * address of an instruction that is allowed to fault, and the second is 53 * the address at which the program should continue. No registers are 54 * modified, so it is entirely up to the continuation code to figure out 55 * what to do. 56 * 57 * All the routines below use bits of fixup code that are out of line 58 * with the main instruction path. This means when everything is well, 59 * we don't even have to jump over them. Further, they do not intrude 60 * on our cache or tlb entries. 61 */ 62 63struct exception_table_entry 64{ 65 unsigned long insn, fixup; 66}; 67 68#define ARCH_HAS_SEARCH_EXTABLE 69 70/* 71 * These are the main single-value transfer routines. They automatically 72 * use the right size if we just have the right pointer type. 73 * 74 * This gets kind of ugly. We want to return _two_ values in "get_user()" 75 * and yet we don't want to do any pointers, because that is too much 76 * of a performance impact. Thus we have a few rather ugly macros here, 77 * and hide all the ugliness from the user. 78 * 79 * The "__xxx" versions of the user access functions are versions that 80 * do not verify the address space, that must have been done previously 81 * with a separate "access_ok()" call (this is used when we do multiple 82 * accesses to the same area of user memory). 83 */ 84 85#define __get_user_x(size,ret,x,ptr) \ 86 asm volatile("call __get_user_" #size \ 87 :"=a" (ret),"=d" (x) \ 88 :"c" (ptr) \ 89 :"r8") 90 91/* Careful: we have to cast the result to the type of the pointer for sign reasons */ 92#define get_user(x,ptr) \ 93({ unsigned long __val_gu; \ 94 int __ret_gu; \ 95 __chk_user_ptr(ptr); \ 96 switch(sizeof (*(ptr))) { \ 97 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ 98 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ 99 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ 100 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ 101 default: __get_user_bad(); break; \ 102 } \ 103 (x) = (typeof(*(ptr)))__val_gu; \ 104 __ret_gu; \ 105}) 106 107extern void __put_user_1(void); 108extern void __put_user_2(void); 109extern void __put_user_4(void); 110extern void __put_user_8(void); 111extern void __put_user_bad(void); 112 113#define __put_user_x(size,ret,x,ptr) \ 114 asm volatile("call __put_user_" #size \ 115 :"=a" (ret) \ 116 :"c" (ptr),"d" (x) \ 117 :"r8") 118 119#define put_user(x,ptr) \ 120 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 121 122#define __get_user(x,ptr) \ 123 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 124#define __put_user(x,ptr) \ 125 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 126 127#define __get_user_unaligned __get_user 128#define __put_user_unaligned __put_user 129 130#define __put_user_nocheck(x,ptr,size) \ 131({ \ 132 int __pu_err; \ 133 __put_user_size((x),(ptr),(size),__pu_err); \ 134 __pu_err; \ 135}) 136 137 138#define __put_user_check(x,ptr,size) \ 139({ \ 140 int __pu_err; \ 141 typeof(*(ptr)) __user *__pu_addr = (ptr); \ 142 switch (size) { \ 143 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ 144 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ 145 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ 146 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ 147 default: __put_user_bad(); \ 148 } \ 149 __pu_err; \ 150}) 151 152#define __put_user_size(x,ptr,size,retval) \ 153do { \ 154 retval = 0; \ 155 __chk_user_ptr(ptr); \ 156 switch (size) { \ 157 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ 158 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ 159 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ 160 case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ 161 default: __put_user_bad(); \ 162 } \ 163} while (0) 164 165struct __large_struct { unsigned long buf[100]; }; 166#define __m(x) (*(struct __large_struct __user *)(x)) 167 168/* 169 * Tell gcc we read from memory instead of writing: this is because 170 * we do not write to any memory gcc knows about, so there are no 171 * aliasing issues. 172 */ 173#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ 174 asm volatile( \ 175 "1: mov"itype" %"rtype"1,%2\n" \ 176 "2:\n" \ 177 ".section .fixup,\"ax\"\n" \ 178 "3: mov %3,%0\n" \ 179 " jmp 2b\n" \ 180 ".previous\n" \ 181 ".section __ex_table,\"a\"\n" \ 182 " .align 8\n" \ 183 " .quad 1b,3b\n" \ 184 ".previous" \ 185 : "=r"(err) \ 186 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) 187 188 189#define __get_user_nocheck(x,ptr,size) \ 190({ \ 191 int __gu_err; \ 192 unsigned long __gu_val; \ 193 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 194 (x) = (typeof(*(ptr)))__gu_val; \ 195 __gu_err; \ 196}) 197 198extern int __get_user_1(void); 199extern int __get_user_2(void); 200extern int __get_user_4(void); 201extern int __get_user_8(void); 202extern int __get_user_bad(void); 203 204#define __get_user_size(x,ptr,size,retval) \ 205do { \ 206 retval = 0; \ 207 __chk_user_ptr(ptr); \ 208 switch (size) { \ 209 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ 210 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ 211 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ 212 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ 213 default: (x) = __get_user_bad(); \ 214 } \ 215} while (0) 216 217#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ 218 asm volatile( \ 219 "1: mov"itype" %2,%"rtype"1\n" \ 220 "2:\n" \ 221 ".section .fixup,\"ax\"\n" \ 222 "3: mov %3,%0\n" \ 223 " xor"itype" %"rtype"1,%"rtype"1\n" \ 224 " jmp 2b\n" \ 225 ".previous\n" \ 226 ".section __ex_table,\"a\"\n" \ 227 " .align 8\n" \ 228 " .quad 1b,3b\n" \ 229 ".previous" \ 230 : "=r"(err), ltype (x) \ 231 : "m"(__m(addr)), "i"(errno), "0"(err)) 232 233/* 234 * Copy To/From Userspace 235 */ 236 237/* Handles exceptions in both to and from, but doesn't do access_ok */ 238__must_check unsigned long 239copy_user_generic(void *to, const void *from, unsigned len); 240 241__must_check unsigned long 242copy_to_user(void __user *to, const void *from, unsigned len); 243__must_check unsigned long 244copy_from_user(void *to, const void __user *from, unsigned len); 245__must_check unsigned long 246copy_in_user(void __user *to, const void __user *from, unsigned len); 247 248static __always_inline __must_check 249int __copy_from_user(void *dst, const void __user *src, unsigned size) 250{ 251 int ret = 0; 252 if (!__builtin_constant_p(size)) 253 return copy_user_generic(dst,(__force void *)src,size); 254 switch (size) { 255 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 256 return ret; 257 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); 258 return ret; 259 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); 260 return ret; 261 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); 262 return ret; 263 case 10: 264 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); 265 if (unlikely(ret)) return ret; 266 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); 267 return ret; 268 case 16: 269 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); 270 if (unlikely(ret)) return ret; 271 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); 272 return ret; 273 default: 274 return copy_user_generic(dst,(__force void *)src,size); 275 } 276} 277 278static __always_inline __must_check 279int __copy_to_user(void __user *dst, const void *src, unsigned size) 280{ 281 int ret = 0; 282 if (!__builtin_constant_p(size)) 283 return copy_user_generic((__force void *)dst,src,size); 284 switch (size) { 285 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 286 return ret; 287 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); 288 return ret; 289 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); 290 return ret; 291 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); 292 return ret; 293 case 10: 294 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); 295 if (unlikely(ret)) return ret; 296 asm("":::"memory"); 297 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); 298 return ret; 299 case 16: 300 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); 301 if (unlikely(ret)) return ret; 302 asm("":::"memory"); 303 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); 304 return ret; 305 default: 306 return copy_user_generic((__force void *)dst,src,size); 307 } 308} 309 310static __always_inline __must_check 311int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 312{ 313 int ret = 0; 314 if (!__builtin_constant_p(size)) 315 return copy_user_generic((__force void *)dst,(__force void *)src,size); 316 switch (size) { 317 case 1: { 318 u8 tmp; 319 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); 320 if (likely(!ret)) 321 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); 322 return ret; 323 } 324 case 2: { 325 u16 tmp; 326 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); 327 if (likely(!ret)) 328 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); 329 return ret; 330 } 331 332 case 4: { 333 u32 tmp; 334 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); 335 if (likely(!ret)) 336 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); 337 return ret; 338 } 339 case 8: { 340 u64 tmp; 341 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); 342 if (likely(!ret)) 343 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); 344 return ret; 345 } 346 default: 347 return copy_user_generic((__force void *)dst,(__force void *)src,size); 348 } 349} 350 351__must_check long 352strncpy_from_user(char *dst, const char __user *src, long count); 353__must_check long 354__strncpy_from_user(char *dst, const char __user *src, long count); 355__must_check long strnlen_user(const char __user *str, long n); 356__must_check long __strnlen_user(const char __user *str, long n); 357__must_check long strlen_user(const char __user *str); 358__must_check unsigned long clear_user(void __user *mem, unsigned long len); 359__must_check unsigned long __clear_user(void __user *mem, unsigned long len); 360 361__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); 362 363static __must_check __always_inline int 364__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) 365{ 366 return copy_user_generic((__force void *)dst, src, size); 367} 368 369#define ARCH_HAS_NOCACHE_UACCESS 1 370extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); 371 372static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) 373{ 374 might_sleep(); 375 return __copy_user_nocache(dst, src, size, 1); 376} 377 378static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) 379{ 380 return __copy_user_nocache(dst, src, size, 0); 381} 382 383#endif /* __X86_64_UACCESS_H */ 384