1#ifndef _ASM_IA64_UACCESS_H 2#define _ASM_IA64_UACCESS_H 3 4/* 5 * This file defines various macros to transfer memory areas across 6 * the user/kernel boundary. This needs to be done carefully because 7 * this code is executed in kernel mode and uses user-specified 8 * addresses. Thus, we need to be careful not to let the user to 9 * trick us into accessing kernel memory that would normally be 10 * inaccessible. This code is also fairly performance sensitive, 11 * so we want to spend as little time doing safety checks as 12 * possible. 13 * 14 * To make matters a bit more interesting, these macros sometimes also 15 * called from within the kernel itself, in which case the address 16 * validity check must be skipped. The get_fs() macro tells us what 17 * to do: if get_fs()==USER_DS, checking is performed, if 18 * get_fs()==KERNEL_DS, checking is bypassed. 19 * 20 * Note that even if the memory area specified by the user is in a 21 * valid address range, it is still possible that we'll get a page 22 * fault while accessing it. This is handled by filling out an 23 * exception handler fixup entry for each instruction that has the 24 * potential to fault. When such a fault occurs, the page fault 25 * handler checks to see whether the faulting instruction has a fixup 26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and 27 * then resumes execution at the continuation point. 28 * 29 * Based on <asm-alpha/uaccess.h>. 30 * 31 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co 32 * David Mosberger-Tang <davidm@hpl.hp.com> 33 */ 34 35#include <linux/compiler.h> 36#include <linux/errno.h> 37#include <linux/sched.h> 38#include <linux/page-flags.h> 39#include <linux/mm.h> 40 41#include <asm/intrinsics.h> 42#include <asm/pgtable.h> 43#include <asm/io.h> 44 45/* 46 * For historical reasons, the following macros are grossly misnamed: 47 */ 48#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ 49#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ 50 51#define VERIFY_READ 0 52#define VERIFY_WRITE 1 53 54#define get_ds() (KERNEL_DS) 55#define get_fs() (current_thread_info()->addr_limit) 56#define set_fs(x) (current_thread_info()->addr_limit = (x)) 57 58#define segment_eq(a, b) ((a).seg == (b).seg) 59 60/* 61 * When accessing user memory, we need to make sure the entire area really is in 62 * user-level space. In order to do this efficiently, we make sure that the page at 63 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't 64 * point inside the virtually mapped linear page table. 65 */ 66#define __access_ok(addr, size, segment) \ 67({ \ 68 __chk_user_ptr(addr); \ 69 (likely((unsigned long) (addr) <= (segment).seg) \ 70 && ((segment).seg == KERNEL_DS.seg \ 71 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ 72}) 73#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) 74 75/* 76 * These are the main single-value transfer routines. They automatically 77 * use the right size if we just have the right pointer type. 78 * 79 * Careful to not 80 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 81 * (b) require any knowledge of processes at this stage 82 */ 83#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) 84#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 85 86/* 87 * The "__xxx" versions do not do address space checking, useful when 88 * doing multiple accesses to the same area (the programmer has to do the 89 * checks by hand with "access_ok()") 90 */ 91#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) 92#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 93 94extern long __put_user_unaligned_unknown (void); 95 96#define __put_user_unaligned(x, ptr) \ 97({ \ 98 long __ret; \ 99 switch (sizeof(*(ptr))) { \ 100 case 1: __ret = __put_user((x), (ptr)); break; \ 101 case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \ 102 | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ 103 case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \ 104 | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ 105 case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \ 106 | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ 107 default: __ret = __put_user_unaligned_unknown(); \ 108 } \ 109 __ret; \ 110}) 111 112extern long __get_user_unaligned_unknown (void); 113 114#define __get_user_unaligned(x, ptr) \ 115({ \ 116 long __ret; \ 117 switch (sizeof(*(ptr))) { \ 118 case 1: __ret = __get_user((x), (ptr)); break; \ 119 case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \ 120 | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ 121 case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \ 122 | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ 123 case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \ 124 | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ 125 default: __ret = __get_user_unaligned_unknown(); \ 126 } \ 127 __ret; \ 128}) 129 130#ifdef ASM_SUPPORTED 131 struct __large_struct { unsigned long buf[100]; }; 132# define __m(x) (*(struct __large_struct __user *)(x)) 133 134/* We need to declare the __ex_table section before we can use it in .xdata. */ 135asm (".section \"__ex_table\", \"a\"\n\t.previous"); 136 137# define __get_user_size(val, addr, n, err) \ 138do { \ 139 register long __gu_r8 asm ("r8") = 0; \ 140 register long __gu_r9 asm ("r9"); \ 141 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ 142 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ 143 "[1:]" \ 144 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ 145 (err) = __gu_r8; \ 146 (val) = __gu_r9; \ 147} while (0) 148 149/* 150 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This 151 * is because they do not write to any memory gcc knows about, so there are no aliasing 152 * issues. 153 */ 154# define __put_user_size(val, addr, n, err) \ 155do { \ 156 register long __pu_r8 asm ("r8") = 0; \ 157 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ 158 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ 159 "[1:]" \ 160 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ 161 (err) = __pu_r8; \ 162} while (0) 163 164#else /* !ASM_SUPPORTED */ 165# define RELOC_TYPE 2 /* ip-rel */ 166# define __get_user_size(val, addr, n, err) \ 167do { \ 168 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ 169 (err) = ia64_getreg(_IA64_REG_R8); \ 170 (val) = ia64_getreg(_IA64_REG_R9); \ 171} while (0) 172# define __put_user_size(val, addr, n, err) \ 173do { \ 174 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ 175 (err) = ia64_getreg(_IA64_REG_R8); \ 176} while (0) 177#endif /* !ASM_SUPPORTED */ 178 179extern void __get_user_unknown (void); 180 181/* 182 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which 183 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while 184 * using r8/r9. 185 */ 186#define __do_get_user(check, x, ptr, size, segment) \ 187({ \ 188 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 189 __typeof__ (size) __gu_size = (size); \ 190 long __gu_err = -EFAULT; \ 191 unsigned long __gu_val = 0; \ 192 if (!check || __access_ok(__gu_ptr, size, segment)) \ 193 switch (__gu_size) { \ 194 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ 195 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ 196 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ 197 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ 198 default: __get_user_unknown(); break; \ 199 } \ 200 (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ 201 __gu_err; \ 202}) 203 204#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) 205#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) 206 207extern void __put_user_unknown (void); 208 209/* 210 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which 211 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. 212 */ 213#define __do_put_user(check, x, ptr, size, segment) \ 214({ \ 215 __typeof__ (x) __pu_x = (x); \ 216 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ 217 __typeof__ (size) __pu_size = (size); \ 218 long __pu_err = -EFAULT; \ 219 \ 220 if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ 221 switch (__pu_size) { \ 222 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ 223 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ 224 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ 225 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ 226 default: __put_user_unknown(); break; \ 227 } \ 228 __pu_err; \ 229}) 230 231#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) 232#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) 233 234/* 235 * Complex access routines 236 */ 237extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, 238 unsigned long count); 239 240static inline unsigned long 241__copy_to_user (void __user *to, const void *from, unsigned long count) 242{ 243 return __copy_user(to, (__force void __user *) from, count); 244} 245 246static inline unsigned long 247__copy_from_user (void *to, const void __user *from, unsigned long count) 248{ 249 return __copy_user((__force void __user *) to, from, count); 250} 251 252#define __copy_to_user_inatomic __copy_to_user 253#define __copy_from_user_inatomic __copy_from_user 254#define copy_to_user(to, from, n) \ 255({ \ 256 void __user *__cu_to = (to); \ 257 const void *__cu_from = (from); \ 258 long __cu_len = (n); \ 259 \ 260 if (__access_ok(__cu_to, __cu_len, get_fs())) \ 261 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 262 __cu_len; \ 263}) 264 265#define copy_from_user(to, from, n) \ 266({ \ 267 void *__cu_to = (to); \ 268 const void __user *__cu_from = (from); \ 269 long __cu_len = (n); \ 270 \ 271 __chk_user_ptr(__cu_from); \ 272 if (__access_ok(__cu_from, __cu_len, get_fs())) \ 273 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 274 __cu_len; \ 275}) 276 277#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) 278 279static inline unsigned long 280copy_in_user (void __user *to, const void __user *from, unsigned long n) 281{ 282 if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) 283 n = __copy_user(to, from, n); 284 return n; 285} 286 287extern unsigned long __do_clear_user (void __user *, unsigned long); 288 289#define __clear_user(to, n) __do_clear_user(to, n) 290 291#define clear_user(to, n) \ 292({ \ 293 unsigned long __cu_len = (n); \ 294 if (__access_ok(to, __cu_len, get_fs())) \ 295 __cu_len = __do_clear_user(to, __cu_len); \ 296 __cu_len; \ 297}) 298 299 300/* 301 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else 302 * strlen. 303 */ 304extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); 305 306#define strncpy_from_user(to, from, n) \ 307({ \ 308 const char __user * __sfu_from = (from); \ 309 long __sfu_ret = -EFAULT; \ 310 if (__access_ok(__sfu_from, 0, get_fs())) \ 311 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ 312 __sfu_ret; \ 313}) 314 315/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 316extern unsigned long __strlen_user (const char __user *); 317 318#define strlen_user(str) \ 319({ \ 320 const char __user *__su_str = (str); \ 321 unsigned long __su_ret = 0; \ 322 if (__access_ok(__su_str, 0, get_fs())) \ 323 __su_ret = __strlen_user(__su_str); \ 324 __su_ret; \ 325}) 326 327/* 328 * Returns: 0 if exception before NUL or reaching the supplied limit 329 * (N), a value greater than N if the limit would be exceeded, else 330 * strlen. 331 */ 332extern unsigned long __strnlen_user (const char __user *, long); 333 334#define strnlen_user(str, len) \ 335({ \ 336 const char __user *__su_str = (str); \ 337 unsigned long __su_ret = 0; \ 338 if (__access_ok(__su_str, 0, get_fs())) \ 339 __su_ret = __strnlen_user(__su_str, len); \ 340 __su_ret; \ 341}) 342 343/* Generic code can't deal with the location-relative format that we use for compactness. */ 344#define ARCH_HAS_SORT_EXTABLE 345#define ARCH_HAS_SEARCH_EXTABLE 346 347struct exception_table_entry { 348 int addr; /* location-relative address of insn this fixup is for */ 349 int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ 350}; 351 352extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e); 353extern const struct exception_table_entry *search_exception_tables (unsigned long addr); 354 355static inline int 356ia64_done_with_exception (struct pt_regs *regs) 357{ 358 const struct exception_table_entry *e; 359 e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); 360 if (e) { 361 ia64_handle_exception(regs, e); 362 return 1; 363 } 364 return 0; 365} 366 367#define ARCH_HAS_TRANSLATE_MEM_PTR 1 368static __inline__ char * 369xlate_dev_mem_ptr (unsigned long p) 370{ 371 struct page *page; 372 char * ptr; 373 374 page = pfn_to_page(p >> PAGE_SHIFT); 375 if (PageUncached(page)) 376 ptr = (char *)p + __IA64_UNCACHED_OFFSET; 377 else 378 ptr = __va(p); 379 380 return ptr; 381} 382 383/* 384 * Convert a virtual cached kernel memory pointer to an uncached pointer 385 */ 386static __inline__ char * 387xlate_dev_kmem_ptr (char * p) 388{ 389 struct page *page; 390 char * ptr; 391 392 page = virt_to_page((unsigned long)p); 393 if (PageUncached(page)) 394 ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; 395 else 396 ptr = p; 397 398 return ptr; 399} 400 401#endif /* _ASM_IA64_UACCESS_H */ 402