1#ifndef _ASM_IA64_PROCESSOR_H 2#define _ASM_IA64_PROCESSOR_H 3 4/* 5 * Copyright (C) 1998-2002 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * Stephane Eranian <eranian@hpl.hp.com> 8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 10 * 11 * 11/24/98 S.Eranian added ia64_set_iva() 12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API 13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support 14 */ 15 16#include <linux/config.h> 17#include <linux/cache.h> 18 19#include <asm/ptrace.h> 20#include <asm/kregs.h> 21#include <asm/types.h> 22 23#define IA64_NUM_DBG_REGS 8 24/* 25 * Limits for PMC and PMD are set to less than maximum architected values 26 * but should be sufficient for a while 27 */ 28#define IA64_NUM_PMC_REGS 32 29#define IA64_NUM_PMD_REGS 32 30 31#define DEFAULT_MAP_BASE 0x2000000000000000 32#define DEFAULT_TASK_SIZE 0xa000000000000000 33 34/* 35 * TASK_SIZE really is a mis-named. It really is the maximum user 36 * space address (plus one). On IA-64, there are five regions of 2TB 37 * each (assuming 8KB page size), for a total of 8TB of user virtual 38 * address space. 39 */ 40#define TASK_SIZE (current->thread.task_size) 41 42/* 43 * This decides where the kernel will search for a free chunk of vm 44 * space during mmap's. 45 */ 46#define TASK_UNMAPPED_BASE (current->thread.map_base) 47 48/* 49 * Bus types 50 */ 51#define EISA_bus 0 52#define EISA_bus__is_a_macro /* for versions in ksyms.c */ 53#define MCA_bus 0 54#define MCA_bus__is_a_macro /* for versions in ksyms.c */ 55 56#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ 57#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ 58#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ 59#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ 60#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ 61#define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */ 62#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ 63#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ 64#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */ 65 66#define IA64_THREAD_UAC_SHIFT 3 67#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) 68#define IA64_THREAD_FPEMU_SHIFT 6 69#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) 70 71 72/* 73 * This shift should be large enough to be able to represent 74 * 1000000/itc_freq with good accuracy while being small enough to fit 75 * 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits. 76 */ 77#define IA64_USEC_PER_CYC_SHIFT 41 78 79#ifndef __ASSEMBLY__ 80 81#include <linux/threads.h> 82#include <linux/cache.h> 83 84#include <asm/fpu.h> 85#include <asm/offsets.h> 86#include <asm/page.h> 87#include <asm/rse.h> 88#include <asm/unwind.h> 89#include <asm/atomic.h> 90 91/* like above but expressed as bitfields for more efficient access: */ 92struct ia64_psr { 93 __u64 reserved0 : 1; 94 __u64 be : 1; 95 __u64 up : 1; 96 __u64 ac : 1; 97 __u64 mfl : 1; 98 __u64 mfh : 1; 99 __u64 reserved1 : 7; 100 __u64 ic : 1; 101 __u64 i : 1; 102 __u64 pk : 1; 103 __u64 reserved2 : 1; 104 __u64 dt : 1; 105 __u64 dfl : 1; 106 __u64 dfh : 1; 107 __u64 sp : 1; 108 __u64 pp : 1; 109 __u64 di : 1; 110 __u64 si : 1; 111 __u64 db : 1; 112 __u64 lp : 1; 113 __u64 tb : 1; 114 __u64 rt : 1; 115 __u64 reserved3 : 4; 116 __u64 cpl : 2; 117 __u64 is : 1; 118 __u64 mc : 1; 119 __u64 it : 1; 120 __u64 id : 1; 121 __u64 da : 1; 122 __u64 dd : 1; 123 __u64 ss : 1; 124 __u64 ri : 2; 125 __u64 ed : 1; 126 __u64 bn : 1; 127 __u64 reserved4 : 19; 128}; 129 130/* 131 * CPU type, hardware bug flags, and per-CPU state. Frequently used 132 * state comes earlier: 133 */ 134struct cpuinfo_ia64 { 135 /* irq_stat must be 64-bit aligned */ 136 union { 137 struct { 138 __u32 irq_count; 139 __u32 bh_count; 140 } f; 141 __u64 irq_and_bh_counts; 142 } irq_stat; 143 __u32 softirq_pending; 144 __u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */ 145 __u64 itm_delta; /* # of clock cycles between clock ticks */ 146 __u64 itm_next; /* interval timer mask value to use for next clock tick */ 147 __u64 *pgd_quick; 148 __u64 *pmd_quick; 149 __u64 *pte_quick; 150 __u64 pgtable_cache_sz; 151 /* CPUID-derived information: */ 152 __u64 ppn; 153 __u64 features; 154 __u8 number; 155 __u8 revision; 156 __u8 model; 157 __u8 family; 158 __u8 archrev; 159 char vendor[16]; 160 __u64 itc_freq; /* frequency of ITC counter */ 161 __u64 proc_freq; /* frequency of processor */ 162 __u64 cyc_per_usec; /* itc_freq/1000000 */ 163 __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */ 164 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ 165 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ 166 __u64 ptce_base; 167 __u32 ptce_count[2]; 168 __u32 ptce_stride[2]; 169 struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ 170#ifdef CONFIG_SMP 171 int processor; 172 __u64 loops_per_jiffy; 173 __u64 ipi_count; 174 __u64 prof_counter; 175 __u64 prof_multiplier; 176# ifdef CONFIG_PERFMON 177 __u32 pfm_syst_wide; 178 __u32 pfm_dcr_pp; 179# endif 180 union { 181 /* 182 * This is written to by *other* CPUs, 183 * so isolate it in its own cacheline. 184 */ 185 __u64 operation; 186 char pad[SMP_CACHE_BYTES] ____cacheline_aligned; 187 } ipi; 188#endif 189#ifdef CONFIG_NUMA 190 void *node_directory; 191 int numa_node_id; 192 struct cpuinfo_ia64 *cpu_data[NR_CPUS]; 193#endif 194 /* Platform specific word. MUST BE LAST IN STRUCT */ 195 __u64 platform_specific; 196} __attribute__ ((aligned (PAGE_SIZE))); 197 198/* 199 * The "local" data pointer. It points to the per-CPU data of the currently executing 200 * CPU, much like "current" points to the per-task data of the currently executing task. 201 */ 202#define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR) 203 204/* 205 * On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on 206 * the node that contains the cpu. This minimizes off-node memory references. cpu_data 207 * for each cpu contains an array of pointers to the cpu_data structures of each of the 208 * other cpus. 209 * 210 * On non-NUMA systems, cpu_data is a static array allocated at compile time. References 211 * to the cpu_data of another cpu is done by direct references to the appropriate entry of 212 * the array. 213 */ 214#ifdef CONFIG_NUMA 215# define cpu_data(cpu) local_cpu_data->cpu_data[cpu] 216# define numa_node_id() (local_cpu_data->numa_node_id) 217#else 218 extern struct cpuinfo_ia64 _cpu_data[NR_CPUS]; 219# define cpu_data(cpu) (&_cpu_data[cpu]) 220#endif 221 222extern void identify_cpu (struct cpuinfo_ia64 *); 223extern void print_cpu_info (struct cpuinfo_ia64 *); 224 225typedef struct { 226 unsigned long seg; 227} mm_segment_t; 228 229#define SET_UNALIGN_CTL(task,value) \ 230({ \ 231 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ 232 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ 233 0; \ 234}) 235#define GET_UNALIGN_CTL(task,addr) \ 236({ \ 237 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ 238 (int *) (addr)); \ 239}) 240 241#define SET_FPEMU_CTL(task,value) \ 242({ \ 243 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ 244 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ 245 0; \ 246}) 247#define GET_FPEMU_CTL(task,addr) \ 248({ \ 249 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ 250 (int *) (addr)); \ 251}) 252 253struct siginfo; 254 255struct thread_struct { 256 __u64 ksp; /* kernel stack pointer */ 257 unsigned long flags; /* various flags */ 258 __u64 map_base; /* base address for get_unmapped_area() */ 259 __u64 task_size; /* limit for task size */ 260 struct siginfo *siginfo; /* current siginfo struct for ptrace() */ 261 262#ifdef CONFIG_IA32_SUPPORT 263 __u64 eflag; /* IA32 EFLAGS reg */ 264 __u64 fsr; /* IA32 floating pt status reg */ 265 __u64 fcr; /* IA32 floating pt control reg */ 266 __u64 fir; /* IA32 fp except. instr. reg */ 267 __u64 fdr; /* IA32 fp except. data reg */ 268 __u64 csd; /* IA32 code selector descriptor */ 269 __u64 ssd; /* IA32 stack selector descriptor */ 270 __u64 old_k1; /* old value of ar.k1 */ 271 __u64 old_iob; /* old IOBase value */ 272# define INIT_THREAD_IA32 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, 273#else 274# define INIT_THREAD_IA32 275#endif /* CONFIG_IA32_SUPPORT */ 276#ifdef CONFIG_PERFMON 277 __u64 pmc[IA64_NUM_PMC_REGS]; 278 __u64 pmd[IA64_NUM_PMD_REGS]; 279 unsigned long pfm_ovfl_block_reset;/* non-zero if we need to block or reset regs on ovfl */ 280 void *pfm_context; /* pointer to detailed PMU context */ 281 atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */ 282 atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */ 283 void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */ 284# define INIT_THREAD_PM {0, }, {0, }, 0, NULL, {0}, {0}, NULL, 285#else 286# define INIT_THREAD_PM 287#endif 288 __u64 dbr[IA64_NUM_DBG_REGS]; 289 __u64 ibr[IA64_NUM_DBG_REGS]; 290 struct ia64_fpreg fph[96]; /* saved/loaded on demand */ 291 int last_fph_cpu; 292}; 293 294#define INIT_THREAD { \ 295 0, /* ksp */ \ 296 0, /* flags */ \ 297 DEFAULT_MAP_BASE, /* map_base */ \ 298 DEFAULT_TASK_SIZE, /* task_size */ \ 299 0, /* siginfo */ \ 300 INIT_THREAD_IA32 \ 301 INIT_THREAD_PM \ 302 {0, }, /* dbr */ \ 303 {0, }, /* ibr */ \ 304 {{{{0}}}, } /* fph */ \ 305} 306 307#define start_thread(regs,new_ip,new_sp) do { \ 308 set_fs(USER_DS); \ 309 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \ 310 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ 311 regs->cr_iip = new_ip; \ 312 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ 313 regs->ar_rnat = 0; \ 314 regs->ar_bspstore = IA64_RBS_BOT; \ 315 regs->ar_fpsr = FPSR_DEFAULT; \ 316 regs->loadrs = 0; \ 317 regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ 318 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ 319 if (!__builtin_expect (current->mm->dumpable, 1)) { \ 320 /* \ 321 * Zap scratch regs to avoid leaking bits between processes with different \ 322 * uid/privileges. \ 323 */ \ 324 regs->ar_pfs = 0; \ 325 regs->pr = 0; \ 326 \ 327 regs->b6 = 0; \ 328 regs->r1 = 0; regs->r2 = 0; regs->r3 = 0; \ 329 regs->r13 = 0; regs->r14 = 0; regs->r15 = 0; \ 330 regs->r9 = 0; regs->r11 = 0; \ 331 regs->r16 = 0; regs->r17 = 0; regs->r18 = 0; regs->r19 = 0; \ 332 regs->r20 = 0; regs->r21 = 0; regs->r22 = 0; regs->r23 = 0; \ 333 regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \ 334 regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \ 335 regs->ar_ccv = 0; \ 336 regs->b0 = 0; regs->b7 = 0; \ 337 regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \ 338 regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \ 339 regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \ 340 regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \ 341 } \ 342} while (0) 343 344/* Forward declarations, a strange C thing... */ 345struct mm_struct; 346struct task_struct; 347 348/* 349 * Free all resources held by a thread. This is called after the 350 * parent of DEAD_TASK has collected the exist status of the task via 351 * wait(). 352 */ 353#ifdef CONFIG_PERFMON 354 extern void release_thread (struct task_struct *task); 355#else 356# define release_thread(dead_task) 357#endif 358 359/* 360 * This is the mechanism for creating a new kernel thread. 361 * 362 * NOTE 1: Only a kernel-only process (ie the swapper or direct 363 * descendants who haven't done an "execve()") should use this: it 364 * will work within a system call from a "real" process, but the 365 * process memory space will not be free'd until both the parent and 366 * the child have exited. 367 * 368 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get 369 * into trouble in init/main.c when the child thread returns to 370 * do_basic_setup() and the timing is such that free_initmem() has 371 * been called already. 372 */ 373extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); 374 375/* Copy and release all segment info associated with a VM */ 376#define copy_segments(tsk, mm) do { } while (0) 377#define release_segments(mm) do { } while (0) 378 379/* Get wait channel for task P. */ 380extern unsigned long get_wchan (struct task_struct *p); 381 382/* Return instruction pointer of blocked task TSK. */ 383#define KSTK_EIP(tsk) \ 384 ({ \ 385 struct pt_regs *_regs = ia64_task_regs(tsk); \ 386 _regs->cr_iip + ia64_psr(_regs)->ri; \ 387 }) 388 389/* Return stack pointer of blocked task TSK. */ 390#define KSTK_ESP(tsk) ((tsk)->thread.ksp) 391 392static inline unsigned long 393ia64_get_kr (unsigned long regnum) 394{ 395 unsigned long r; 396 397 switch (regnum) { 398 case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break; 399 case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break; 400 case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break; 401 case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break; 402 case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break; 403 case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break; 404 case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break; 405 case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break; 406 } 407 return r; 408} 409 410static inline void 411ia64_set_kr (unsigned long regnum, unsigned long r) 412{ 413 switch (regnum) { 414 case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break; 415 case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break; 416 case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break; 417 case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break; 418 case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break; 419 case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break; 420 case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break; 421 case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break; 422 } 423} 424 425static inline struct task_struct * 426ia64_get_fpu_owner (void) 427{ 428 return (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER); 429} 430 431static inline void 432ia64_set_fpu_owner (struct task_struct *t) 433{ 434 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t); 435} 436 437extern void __ia64_init_fpu (void); 438extern void __ia64_save_fpu (struct ia64_fpreg *fph); 439extern void __ia64_load_fpu (struct ia64_fpreg *fph); 440extern void ia64_save_debug_regs (unsigned long *save_area); 441extern void ia64_load_debug_regs (unsigned long *save_area); 442 443#ifdef CONFIG_IA32_SUPPORT 444extern void ia32_save_state (struct task_struct *task); 445extern void ia32_load_state (struct task_struct *task); 446#endif 447 448#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory"); 449#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory"); 450 451/* load fp 0.0 into fph */ 452static inline void 453ia64_init_fpu (void) { 454 ia64_fph_enable(); 455 __ia64_init_fpu(); 456 ia64_fph_disable(); 457} 458 459/* save f32-f127 at FPH */ 460static inline void 461ia64_save_fpu (struct ia64_fpreg *fph) { 462 ia64_fph_enable(); 463 __ia64_save_fpu(fph); 464 ia64_fph_disable(); 465} 466 467/* load f32-f127 from FPH */ 468static inline void 469ia64_load_fpu (struct ia64_fpreg *fph) { 470 ia64_fph_enable(); 471 __ia64_load_fpu(fph); 472 ia64_fph_disable(); 473} 474 475static inline void 476ia64_fc (void *addr) 477{ 478 asm volatile ("fc %0" :: "r"(addr) : "memory"); 479} 480 481static inline void 482ia64_sync_i (void) 483{ 484 asm volatile (";; sync.i" ::: "memory"); 485} 486 487static inline void 488ia64_srlz_i (void) 489{ 490 asm volatile (";; srlz.i ;;" ::: "memory"); 491} 492 493static inline void 494ia64_srlz_d (void) 495{ 496 asm volatile (";; srlz.d" ::: "memory"); 497} 498 499static inline __u64 500ia64_get_rr (__u64 reg_bits) 501{ 502 __u64 r; 503 asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory"); 504 return r; 505} 506 507static inline void 508ia64_set_rr (__u64 reg_bits, __u64 rr_val) 509{ 510 asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory"); 511} 512 513static inline __u64 514ia64_get_dcr (void) 515{ 516 __u64 r; 517 asm volatile ("mov %0=cr.dcr" : "=r"(r)); 518 return r; 519} 520 521static inline void 522ia64_set_dcr (__u64 val) 523{ 524 asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory"); 525 ia64_srlz_d(); 526} 527 528static inline __u64 529ia64_get_lid (void) 530{ 531 __u64 r; 532 asm volatile ("mov %0=cr.lid" : "=r"(r)); 533 return r; 534} 535 536static inline void 537ia64_invala (void) 538{ 539 asm volatile ("invala" ::: "memory"); 540} 541 542/* 543 * Save the processor status flags in FLAGS and then clear the interrupt collection and 544 * interrupt enable bits. Don't trigger any mandatory RSE references while this bit is 545 * off! 546 */ 547static inline __u64 548ia64_clear_ic (void) 549{ 550 __u64 psr; 551 asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory"); 552 return psr; 553} 554 555/* 556 * Restore the psr. 557 */ 558static inline void 559ia64_set_psr (__u64 psr) 560{ 561 asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory"); 562} 563 564/* 565 * Insert a translation into an instruction and/or data translation 566 * register. 567 */ 568static inline void 569ia64_itr (__u64 target_mask, __u64 tr_num, 570 __u64 vmaddr, __u64 pte, 571 __u64 log_page_size) 572{ 573 asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); 574 asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); 575 if (target_mask & 0x1) 576 asm volatile ("itr.i itr[%0]=%1" 577 :: "r"(tr_num), "r"(pte) : "memory"); 578 if (target_mask & 0x2) 579 asm volatile (";;itr.d dtr[%0]=%1" 580 :: "r"(tr_num), "r"(pte) : "memory"); 581} 582 583/* 584 * Insert a translation into the instruction and/or data translation 585 * cache. 586 */ 587static inline void 588ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, 589 __u64 log_page_size) 590{ 591 asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); 592 asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); 593 /* as per EAS2.6, itc must be the last instruction in an instruction group */ 594 if (target_mask & 0x1) 595 asm volatile ("itc.i %0;;" :: "r"(pte) : "memory"); 596 if (target_mask & 0x2) 597 asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory"); 598} 599 600/* 601 * Purge a range of addresses from instruction and/or data translation 602 * register(s). 603 */ 604static inline void 605ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) 606{ 607 if (target_mask & 0x1) 608 asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); 609 if (target_mask & 0x2) 610 asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); 611} 612 613/* Set the interrupt vector address. The address must be suitably aligned (32KB). */ 614static inline void 615ia64_set_iva (void *ivt_addr) 616{ 617 asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory"); 618} 619 620/* Set the page table address and control bits. */ 621static inline void 622ia64_set_pta (__u64 pta) 623{ 624 /* Note: srlz.i implies srlz.d */ 625 asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory"); 626} 627 628static inline __u64 629ia64_get_cpuid (__u64 regnum) 630{ 631 __u64 r; 632 633 asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum)); 634 return r; 635} 636 637static inline void 638ia64_eoi (void) 639{ 640 asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory"); 641} 642 643static inline void 644ia64_set_lrr0 (unsigned long val) 645{ 646 asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory"); 647} 648 649#define cpu_relax() do { } while (0) 650 651 652static inline void 653ia64_set_lrr1 (unsigned long val) 654{ 655 asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory"); 656} 657 658static inline void 659ia64_set_pmv (__u64 val) 660{ 661 asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory"); 662} 663 664static inline __u64 665ia64_get_pmc (__u64 regnum) 666{ 667 __u64 retval; 668 669 asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum)); 670 return retval; 671} 672 673static inline void 674ia64_set_pmc (__u64 regnum, __u64 value) 675{ 676 asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value)); 677} 678 679static inline __u64 680ia64_get_pmd (__u64 regnum) 681{ 682 __u64 retval; 683 684 asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum)); 685 return retval; 686} 687 688static inline void 689ia64_set_pmd (__u64 regnum, __u64 value) 690{ 691 asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value)); 692} 693 694/* 695 * Given the address to which a spill occurred, return the unat bit 696 * number that corresponds to this address. 697 */ 698static inline __u64 699ia64_unat_pos (void *spill_addr) 700{ 701 return ((__u64) spill_addr >> 3) & 0x3f; 702} 703 704/* 705 * Set the NaT bit of an integer register which was spilled at address 706 * SPILL_ADDR. UNAT is the mask to be updated. 707 */ 708static inline void 709ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) 710{ 711 __u64 bit = ia64_unat_pos(spill_addr); 712 __u64 mask = 1UL << bit; 713 714 *unat = (*unat & ~mask) | (nat << bit); 715} 716 717/* 718 * Return saved PC of a blocked thread. 719 * Note that the only way T can block is through a call to schedule() -> switch_to(). 720 */ 721static inline unsigned long 722thread_saved_pc (struct thread_struct *t) 723{ 724 struct unw_frame_info info; 725 unsigned long ip; 726 727 struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET); 728 729 unw_init_from_blocked_task(&info, p); 730 if (unw_unwind(&info) < 0) 731 return 0; 732 unw_get_ip(&info, &ip); 733 return ip; 734} 735 736/* 737 * Get the current instruction/program counter value. 738 */ 739#define current_text_addr() \ 740 ({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; }) 741 742#define THREAD_SIZE IA64_STK_OFFSET 743/* NOTE: The task struct and the stacks are allocated together. */ 744#define alloc_task_struct() \ 745 ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES)) 746#define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES) 747#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) 748 749#define init_task (init_task_union.task) 750#define init_stack (init_task_union.stack) 751 752/* 753 * Set the correctable machine check vector register 754 */ 755static inline void 756ia64_set_cmcv (__u64 val) 757{ 758 asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory"); 759} 760 761/* 762 * Read the correctable machine check vector register 763 */ 764static inline __u64 765ia64_get_cmcv (void) 766{ 767 __u64 val; 768 769 asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory"); 770 return val; 771} 772 773static inline __u64 774ia64_get_ivr (void) 775{ 776 __u64 r; 777 asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r)); 778 return r; 779} 780 781static inline void 782ia64_set_tpr (__u64 val) 783{ 784 asm volatile ("mov cr.tpr=%0" :: "r"(val)); 785} 786 787static inline __u64 788ia64_get_tpr (void) 789{ 790 __u64 r; 791 asm volatile ("mov %0=cr.tpr" : "=r"(r)); 792 return r; 793} 794 795static inline void 796ia64_set_irr0 (__u64 val) 797{ 798 asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory"); 799 ia64_srlz_d(); 800} 801 802static inline __u64 803ia64_get_irr0 (void) 804{ 805 __u64 val; 806 807 /* this is volatile because irr may change unbeknownst to gcc... */ 808 asm volatile("mov %0=cr.irr0" : "=r"(val)); 809 return val; 810} 811 812static inline void 813ia64_set_irr1 (__u64 val) 814{ 815 asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory"); 816 ia64_srlz_d(); 817} 818 819static inline __u64 820ia64_get_irr1 (void) 821{ 822 __u64 val; 823 824 /* this is volatile because irr may change unbeknownst to gcc... */ 825 asm volatile("mov %0=cr.irr1" : "=r"(val)); 826 return val; 827} 828 829static inline void 830ia64_set_irr2 (__u64 val) 831{ 832 asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory"); 833 ia64_srlz_d(); 834} 835 836static inline __u64 837ia64_get_irr2 (void) 838{ 839 __u64 val; 840 841 /* this is volatile because irr may change unbeknownst to gcc... */ 842 asm volatile("mov %0=cr.irr2" : "=r"(val)); 843 return val; 844} 845 846static inline void 847ia64_set_irr3 (__u64 val) 848{ 849 asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory"); 850 ia64_srlz_d(); 851} 852 853static inline __u64 854ia64_get_irr3 (void) 855{ 856 __u64 val; 857 858 /* this is volatile because irr may change unbeknownst to gcc... */ 859 asm volatile ("mov %0=cr.irr3" : "=r"(val)); 860 return val; 861} 862 863static inline __u64 864ia64_get_gp(void) 865{ 866 __u64 val; 867 868 asm ("mov %0=gp" : "=r"(val)); 869 return val; 870} 871 872static inline void 873ia64_set_ibr (__u64 regnum, __u64 value) 874{ 875 asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value)); 876} 877 878static inline void 879ia64_set_dbr (__u64 regnum, __u64 value) 880{ 881 asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value)); 882#ifdef CONFIG_ITANIUM 883 asm volatile (";; srlz.d"); 884#endif 885} 886 887static inline __u64 888ia64_get_ibr (__u64 regnum) 889{ 890 __u64 retval; 891 892 asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum)); 893 return retval; 894} 895 896static inline __u64 897ia64_get_dbr (__u64 regnum) 898{ 899 __u64 retval; 900 901 asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum)); 902#ifdef CONFIG_ITANIUM 903 asm volatile (";; srlz.d"); 904#endif 905 return retval; 906} 907 908#ifdef SMART_COMPILER 909# define ia64_rotr(w,n) \ 910 ({ \ 911 __u64 _w = (w), _n = (n); \ 912 \ 913 (_w >> _n) | (_w << (64 - _n)); \ 914 }) 915#else 916# define ia64_rotr(w,n) \ 917 ({ \ 918 __u64 result; \ 919 asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \ 920 result; \ 921 }) 922#endif 923 924#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n)) 925 926static inline __u64 927ia64_thash (__u64 addr) 928{ 929 __u64 result; 930 asm ("thash %0=%1" : "=r"(result) : "r" (addr)); 931 return result; 932} 933 934static inline __u64 935ia64_tpa (__u64 addr) 936{ 937 __u64 result; 938 asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); 939 return result; 940} 941 942#define ARCH_HAS_PREFETCH 943#define ARCH_HAS_PREFETCHW 944#define ARCH_HAS_SPINLOCK_PREFETCH 945#define PREFETCH_STRIDE 256 946 947extern inline void 948prefetch (const void *x) 949{ 950 __asm__ __volatile__ ("lfetch [%0]" : : "r"(x)); 951} 952 953extern inline void 954prefetchw (const void *x) 955{ 956 __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x)); 957} 958 959#define spin_lock_prefetch(x) prefetchw(x) 960 961#endif /* !__ASSEMBLY__ */ 962 963#endif /* _ASM_IA64_PROCESSOR_H */ 964