1#ifndef _ASM_IA64_SYSTEM_H 2#define _ASM_IA64_SYSTEM_H 3 4/* 5 * System defines. Note that this is included both from .c and .S 6 * files, so it does only defines, not any C code. This is based 7 * on information published in the Processor Abstraction Layer 8 * and the System Abstraction Layer manual. 9 * 10 * Copyright (C) 1998-2003 Hewlett-Packard Co 11 * David Mosberger-Tang <davidm@hpl.hp.com> 12 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 13 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 14 */ 15 16#include <asm/kregs.h> 17#include <asm/page.h> 18#include <asm/pal.h> 19#include <asm/percpu.h> 20 21#define GATE_ADDR RGN_BASE(RGN_GATE) 22 23/* 24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 26 */ 27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) 28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 29 30#ifndef __ASSEMBLY__ 31 32#include <linux/kernel.h> 33#include <linux/types.h> 34 35struct pci_vector_struct { 36 __u16 segment; /* PCI Segment number */ 37 __u16 bus; /* PCI Bus number */ 38 __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ 39 __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ 40 __u32 irq; /* IRQ assigned */ 41}; 42 43extern struct ia64_boot_param { 44 __u64 command_line; /* physical address of command line arguments */ 45 __u64 efi_systab; /* physical address of EFI system table */ 46 __u64 efi_memmap; /* physical address of EFI memory map */ 47 __u64 efi_memmap_size; /* size of EFI memory map */ 48 __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ 49 __u32 efi_memdesc_version; /* memory descriptor version */ 50 struct { 51 __u16 num_cols; /* number of columns on console output device */ 52 __u16 num_rows; /* number of rows on console output device */ 53 __u16 orig_x; /* cursor's x position */ 54 __u16 orig_y; /* cursor's y position */ 55 } console_info; 56 __u64 fpswa; /* physical address of the fpswa interface */ 57 __u64 initrd_start; 58 __u64 initrd_size; 59} *ia64_boot_param; 60 61/* 62 * Macros to force memory ordering. In these descriptions, "previous" 63 * and "subsequent" refer to program order; "visible" means that all 64 * architecturally visible effects of a memory access have occurred 65 * (at a minimum, this means the memory has been read or written). 66 * 67 * wmb(): Guarantees that all preceding stores to memory- 68 * like regions are visible before any subsequent 69 * stores and that all following stores will be 70 * visible only after all previous stores. 71 * rmb(): Like wmb(), but for reads. 72 * mb(): wmb()/rmb() combo, i.e., all previous memory 73 * accesses are visible before all subsequent 74 * accesses and vice versa. This is also known as 75 * a "fence." 76 * 77 * Note: "mb()" and its variants cannot be used as a fence to order 78 * accesses to memory mapped I/O registers. For that, mf.a needs to 79 * be used. However, we don't want to always use mf.a because (a) 80 * it's (presumably) much slower than mf and (b) mf.a is supported for 81 * sequential memory pages only. 82 */ 83#define mb() ia64_mf() 84#define rmb() mb() 85#define wmb() mb() 86#define read_barrier_depends() do { } while(0) 87 88#ifdef CONFIG_SMP 89# define smp_mb() mb() 90# define smp_rmb() rmb() 91# define smp_wmb() wmb() 92# define smp_read_barrier_depends() read_barrier_depends() 93#else 94# define smp_mb() barrier() 95# define smp_rmb() barrier() 96# define smp_wmb() barrier() 97# define smp_read_barrier_depends() do { } while(0) 98#endif 99 100#define set_mb(var, value) do { (var) = (value); mb(); } while (0) 101 102#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ 103 104/* 105 * The group barrier in front of the rsm & ssm are necessary to ensure 106 * that none of the previous instructions in the same group are 107 * affected by the rsm/ssm. 108 */ 109/* For spinlocks etc */ 110 111/* 112 * - clearing psr.i is implicitly serialized (visible by next insn) 113 * - setting psr.i requires data serialization 114 * - we need a stop-bit before reading PSR because we sometimes 115 * write a floating-point register right before reading the PSR 116 * and that writes to PSR.mfl 117 */ 118#define __local_irq_save(x) \ 119do { \ 120 ia64_stop(); \ 121 (x) = ia64_getreg(_IA64_REG_PSR); \ 122 ia64_stop(); \ 123 ia64_rsm(IA64_PSR_I); \ 124} while (0) 125 126#define __local_irq_disable() \ 127do { \ 128 ia64_stop(); \ 129 ia64_rsm(IA64_PSR_I); \ 130} while (0) 131 132#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) 133 134#ifdef CONFIG_IA64_DEBUG_IRQ 135 136 extern unsigned long last_cli_ip; 137 138# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) 139 140# define local_irq_save(x) \ 141do { \ 142 unsigned long psr; \ 143 \ 144 __local_irq_save(psr); \ 145 if (psr & IA64_PSR_I) \ 146 __save_ip(); \ 147 (x) = psr; \ 148} while (0) 149 150# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) 151 152# define local_irq_restore(x) \ 153do { \ 154 unsigned long old_psr, psr = (x); \ 155 \ 156 local_save_flags(old_psr); \ 157 __local_irq_restore(psr); \ 158 if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ 159 __save_ip(); \ 160} while (0) 161 162#else /* !CONFIG_IA64_DEBUG_IRQ */ 163# define local_irq_save(x) __local_irq_save(x) 164# define local_irq_disable() __local_irq_disable() 165# define local_irq_restore(x) __local_irq_restore(x) 166#endif /* !CONFIG_IA64_DEBUG_IRQ */ 167 168#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) 169#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) 170 171#define irqs_disabled() \ 172({ \ 173 unsigned long __ia64_id_flags; \ 174 local_save_flags(__ia64_id_flags); \ 175 (__ia64_id_flags & IA64_PSR_I) == 0; \ 176}) 177 178#ifdef __KERNEL__ 179 180#ifdef CONFIG_IA32_SUPPORT 181# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) 182#else 183# define IS_IA32_PROCESS(regs) 0 184struct task_struct; 185static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} 186static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} 187#endif 188 189/* 190 * Context switch from one thread to another. If the two threads have 191 * different address spaces, schedule() has already taken care of 192 * switching to the new address space by calling switch_mm(). 193 * 194 * Disabling access to the fph partition and the debug-register 195 * context switch MUST be done before calling ia64_switch_to() since a 196 * newly created thread returns directly to 197 * ia64_ret_from_syscall_clear_r8. 198 */ 199extern struct task_struct *ia64_switch_to (void *next_task); 200 201struct task_struct; 202 203extern void ia64_save_extra (struct task_struct *task); 204extern void ia64_load_extra (struct task_struct *task); 205 206#ifdef CONFIG_PERFMON 207 DECLARE_PER_CPU(unsigned long, pfm_syst_info); 208# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) 209#else 210# define PERFMON_IS_SYSWIDE() (0) 211#endif 212 213#define IA64_HAS_EXTRA_STATE(t) \ 214 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 215 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) 216 217#define __switch_to(prev,next,last) do { \ 218 if (IA64_HAS_EXTRA_STATE(prev)) \ 219 ia64_save_extra(prev); \ 220 if (IA64_HAS_EXTRA_STATE(next)) \ 221 ia64_load_extra(next); \ 222 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 223 (last) = ia64_switch_to((next)); \ 224} while (0) 225 226#ifdef CONFIG_SMP 227/* 228 * In the SMP case, we save the fph state when context-switching away from a thread that 229 * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can 230 * pick up the state from task->thread.fph, avoiding the complication of having to fetch 231 * the latest fph state from another CPU. In other words: eager save, lazy restore. 232 */ 233# define switch_to(prev,next,last) do { \ 234 if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 235 ia64_psr(task_pt_regs(prev))->mfh = 0; \ 236 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ 237 __ia64_save_fpu((prev)->thread.fph); \ 238 } \ 239 __switch_to(prev, next, last); \ 240 /* "next" in old context is "current" in new context */ \ 241 if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ 242 (task_cpu(current) != \ 243 task_thread_info(current)->last_cpu))) { \ 244 platform_migrate(current); \ 245 task_thread_info(current)->last_cpu = task_cpu(current); \ 246 } \ 247} while (0) 248#else 249# define switch_to(prev,next,last) __switch_to(prev, next, last) 250#endif 251 252#define __ARCH_WANT_UNLOCKED_CTXSW 253#define ARCH_HAS_PREFETCH_SWITCH_STACK 254#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 255 256void cpu_idle_wait(void); 257void sched_cacheflush(void); 258 259#define arch_align_stack(x) (x) 260 261void default_idle(void); 262 263#endif /* __KERNEL__ */ 264 265#endif /* __ASSEMBLY__ */ 266 267#endif /* _ASM_IA64_SYSTEM_H */ 268