1/* 2 * X86-64 specific CPU setup. 3 * Copyright (C) 1995 Linus Torvalds 4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. 5 * See setup.c for older changelog. 6 */ 7#include <linux/init.h> 8#include <linux/kernel.h> 9#include <linux/sched.h> 10#include <linux/string.h> 11#include <linux/bootmem.h> 12#include <linux/bitops.h> 13#include <linux/module.h> 14#include <asm/bootsetup.h> 15#include <asm/pda.h> 16#include <asm/pgtable.h> 17#include <asm/processor.h> 18#include <asm/desc.h> 19#include <asm/atomic.h> 20#include <asm/mmu_context.h> 21#include <asm/smp.h> 22#include <asm/i387.h> 23#include <asm/percpu.h> 24#include <asm/proto.h> 25#include <asm/sections.h> 26 27char x86_boot_params[BOOT_PARAM_SIZE] __initdata; 28 29cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 30 31struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; 32EXPORT_SYMBOL(_cpu_pda); 33struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; 34 35struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 36 37char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 38 39unsigned long __supported_pte_mask __read_mostly = ~0UL; 40static int do_not_nx __cpuinitdata = 0; 41 42/* noexec=on|off 43Control non executable mappings for 64bit processes. 44 45on Enable(default) 46off Disable 47*/ 48static int __init nonx_setup(char *str) 49{ 50 if (!str) 51 return -EINVAL; 52 if (!strncmp(str, "on", 2)) { 53 __supported_pte_mask |= _PAGE_NX; 54 do_not_nx = 0; 55 } else if (!strncmp(str, "off", 3)) { 56 do_not_nx = 1; 57 __supported_pte_mask &= ~_PAGE_NX; 58 } 59 return 0; 60} 61early_param("noexec", nonx_setup); 62 63int force_personality32 = 0; 64 65/* noexec32=on|off 66Control non executable heap for 32bit processes. 67To control the stack too use noexec=off 68 69on PROT_READ does not imply PROT_EXEC for 32bit processes 70off PROT_READ implies PROT_EXEC (default) 71*/ 72static int __init nonx32_setup(char *str) 73{ 74 if (!strcmp(str, "on")) 75 force_personality32 &= ~READ_IMPLIES_EXEC; 76 else if (!strcmp(str, "off")) 77 force_personality32 |= READ_IMPLIES_EXEC; 78 return 1; 79} 80__setup("noexec32=", nonx32_setup); 81 82/* 83 * Great future plan: 84 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 85 * Always point %gs to its beginning 86 */ 87void __init setup_per_cpu_areas(void) 88{ 89 int i; 90 unsigned long size; 91 92#ifdef CONFIG_HOTPLUG_CPU 93 prefill_possible_map(); 94#endif 95 96 /* Copy section for each CPU (we discard the original) */ 97 size = PERCPU_ENOUGH_ROOM; 98 99 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); 100 for_each_cpu_mask (i, cpu_possible_map) { 101 char *ptr; 102 103 if (!NODE_DATA(cpu_to_node(i))) { 104 printk("cpu with no node %d, num_online_nodes %d\n", 105 i, num_online_nodes()); 106 ptr = alloc_bootmem_pages(size); 107 } else { 108 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); 109 } 110 if (!ptr) 111 panic("Cannot allocate cpu data for CPU %d\n", i); 112 cpu_pda(i)->data_offset = ptr - __per_cpu_start; 113 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 114 } 115} 116 117void pda_init(int cpu) 118{ 119 struct x8664_pda *pda = cpu_pda(cpu); 120 121 /* Setup up data that may be needed in __get_free_pages early */ 122 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 123 /* Memory clobbers used to order PDA accessed */ 124 mb(); 125 wrmsrl(MSR_GS_BASE, pda); 126 mb(); 127 128 pda->cpunumber = cpu; 129 pda->irqcount = -1; 130 pda->kernelstack = 131 (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 132 pda->active_mm = &init_mm; 133 pda->mmu_state = 0; 134 135 if (cpu == 0) { 136 /* others are initialized in smpboot.c */ 137 pda->pcurrent = &init_task; 138 pda->irqstackptr = boot_cpu_stack; 139 } else { 140 pda->irqstackptr = (char *) 141 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 142 if (!pda->irqstackptr) 143 panic("cannot allocate irqstack for cpu %d", cpu); 144 } 145 146 147 pda->irqstackptr += IRQSTACKSIZE-64; 148} 149 150char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ] 151__attribute__((section(".bss.page_aligned"))); 152 153extern asmlinkage void ignore_sysret(void); 154 155/* May not be marked __init: used by software suspend */ 156void syscall_init(void) 157{ 158 /* 159 * LSTAR and STAR live in a bit strange symbiosis. 160 * They both write to the same internal register. STAR allows to set CS/DS 161 * but only a 32bit target. LSTAR sets the 64bit rip. 162 */ 163 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); 164 wrmsrl(MSR_LSTAR, system_call); 165 wrmsrl(MSR_CSTAR, ignore_sysret); 166 167#ifdef CONFIG_IA32_EMULATION 168 syscall32_cpu_init (); 169#endif 170 171 /* Flags to clear on syscall */ 172 wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 173} 174 175void __cpuinit check_efer(void) 176{ 177 unsigned long efer; 178 179 rdmsrl(MSR_EFER, efer); 180 if (!(efer & EFER_NX) || do_not_nx) { 181 __supported_pte_mask &= ~_PAGE_NX; 182 } 183} 184 185unsigned long kernel_eflags; 186 187/* 188 * cpu_init() initializes state that is per-CPU. Some data is already 189 * initialized (naturally) in the bootstrap process, such as the GDT 190 * and IDT. We reload them nevertheless, this function acts as a 191 * 'CPU state barrier', nothing should get across. 192 * A lot of state is already set up in PDA init. 193 */ 194void __cpuinit cpu_init (void) 195{ 196 int cpu = stack_smp_processor_id(); 197 struct tss_struct *t = &per_cpu(init_tss, cpu); 198 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); 199 unsigned long v; 200 char *estacks = NULL; 201 struct task_struct *me; 202 int i; 203 204 /* CPU 0 is initialised in head64.c */ 205 if (cpu != 0) { 206 pda_init(cpu); 207 } else 208 estacks = boot_exception_stacks; 209 210 me = current; 211 212 if (cpu_test_and_set(cpu, cpu_initialized)) 213 panic("CPU#%d already initialized!\n", cpu); 214 215 printk("Initializing CPU#%d\n", cpu); 216 217 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 218 219 /* 220 * Initialize the per-CPU GDT with the boot GDT, 221 * and set up the GDT descriptor: 222 */ 223 if (cpu) 224 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE); 225 226 cpu_gdt_descr[cpu].size = GDT_SIZE; 227 asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); 228 asm volatile("lidt %0" :: "m" (idt_descr)); 229 230 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 231 syscall_init(); 232 233 wrmsrl(MSR_FS_BASE, 0); 234 wrmsrl(MSR_KERNEL_GS_BASE, 0); 235 barrier(); 236 237 check_efer(); 238 239 /* 240 * set up and load the per-CPU TSS 241 */ 242 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 243 static const unsigned int order[N_EXCEPTION_STACKS] = { 244 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 245 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 246 }; 247 if (cpu) { 248 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 249 if (!estacks) 250 panic("Cannot allocate exception stack %ld %d\n", 251 v, cpu); 252 } 253 estacks += PAGE_SIZE << order[v]; 254 orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; 255 } 256 257 t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 258 /* 259 * <= is required because the CPU will access up to 260 * 8 bits beyond the end of the IO permission bitmap. 261 */ 262 for (i = 0; i <= IO_BITMAP_LONGS; i++) 263 t->io_bitmap[i] = ~0UL; 264 265 atomic_inc(&init_mm.mm_count); 266 me->active_mm = &init_mm; 267 if (me->mm) 268 BUG(); 269 enter_lazy_tlb(&init_mm, me); 270 271 set_tss_desc(cpu, t); 272 load_TR_desc(); 273 load_LDT(&init_mm.context); 274 275 /* 276 * Clear all 6 debug registers: 277 */ 278 279 set_debugreg(0UL, 0); 280 set_debugreg(0UL, 1); 281 set_debugreg(0UL, 2); 282 set_debugreg(0UL, 3); 283 set_debugreg(0UL, 6); 284 set_debugreg(0UL, 7); 285 286 fpu_init(); 287 288 raw_local_save_flags(kernel_eflags); 289} 290