1/* 2 * Suspend support specific for i386. 3 * 4 * Distribute under GPLv2 5 * 6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 8 */ 9 10#include <linux/smp.h> 11#include <linux/suspend.h> 12#include <asm/proto.h> 13#include <asm/page.h> 14#include <asm/pgtable.h> 15#include <asm/mtrr.h> 16 17/* References to section boundaries */ 18extern const void __nosave_begin, __nosave_end; 19 20struct saved_context saved_context; 21 22unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; 23unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; 24unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; 25unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; 26unsigned long saved_context_eflags; 27 28void __save_processor_state(struct saved_context *ctxt) 29{ 30 kernel_fpu_begin(); 31 32 /* 33 * descriptor tables 34 */ 35 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 36 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 37 asm volatile ("str %0" : "=m" (ctxt->tr)); 38 39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 40 /* 41 * segment registers 42 */ 43 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 44 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 45 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 46 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); 47 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); 48 49 rdmsrl(MSR_FS_BASE, ctxt->fs_base); 50 rdmsrl(MSR_GS_BASE, ctxt->gs_base); 51 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 52 mtrr_save_fixed_ranges(NULL); 53 54 /* 55 * control registers 56 */ 57 rdmsrl(MSR_EFER, ctxt->efer); 58 asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0)); 59 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); 60 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); 61 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); 62 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8)); 63} 64 65void save_processor_state(void) 66{ 67 __save_processor_state(&saved_context); 68} 69 70static void do_fpu_end(void) 71{ 72 /* 73 * Restore FPU regs if necessary 74 */ 75 kernel_fpu_end(); 76} 77 78void __restore_processor_state(struct saved_context *ctxt) 79{ 80 /* 81 * control registers 82 */ 83 wrmsrl(MSR_EFER, ctxt->efer); 84 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8)); 85 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); 86 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); 87 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); 88 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); 89 90 /* 91 * now restore the descriptor tables to their proper values 92 * ltr is done i fix_processor_context(). 93 */ 94 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); 95 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); 96 97 /* 98 * segment registers 99 */ 100 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 101 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 102 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); 103 load_gs_index(ctxt->gs); 104 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); 105 106 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 107 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 108 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 109 110 fix_processor_context(); 111 112 do_fpu_end(); 113 mtrr_ap_init(); 114} 115 116void restore_processor_state(void) 117{ 118 __restore_processor_state(&saved_context); 119} 120 121void fix_processor_context(void) 122{ 123 int cpu = smp_processor_id(); 124 struct tss_struct *t = &per_cpu(init_tss, cpu); 125 126 set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ 127 128 cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9; 129 130 syscall_init(); /* This sets MSR_*STAR and related */ 131 load_TR_desc(); /* This does ltr */ 132 load_LDT(¤t->active_mm->context); /* This does lldt */ 133 134 /* 135 * Now maybe reload the debug registers 136 */ 137 if (current->thread.debugreg7){ 138 loaddebug(¤t->thread, 0); 139 loaddebug(¤t->thread, 1); 140 loaddebug(¤t->thread, 2); 141 loaddebug(¤t->thread, 3); 142 /* no 4 and 5 */ 143 loaddebug(¤t->thread, 6); 144 loaddebug(¤t->thread, 7); 145 } 146 147} 148 149#ifdef CONFIG_SOFTWARE_SUSPEND 150/* Defined in arch/x86_64/kernel/suspend_asm.S */ 151extern int restore_image(void); 152 153pgd_t *temp_level4_pgt; 154 155static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 156{ 157 long i, j; 158 159 i = pud_index(address); 160 pud = pud + i; 161 for (; i < PTRS_PER_PUD; pud++, i++) { 162 unsigned long paddr; 163 pmd_t *pmd; 164 165 paddr = address + i*PUD_SIZE; 166 if (paddr >= end) 167 break; 168 169 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 170 if (!pmd) 171 return -ENOMEM; 172 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 173 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 174 unsigned long pe; 175 176 if (paddr >= end) 177 break; 178 pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr; 179 pe &= __supported_pte_mask; 180 set_pmd(pmd, __pmd(pe)); 181 } 182 } 183 return 0; 184} 185 186static int set_up_temporary_mappings(void) 187{ 188 unsigned long start, end, next; 189 int error; 190 191 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 192 if (!temp_level4_pgt) 193 return -ENOMEM; 194 195 /* It is safe to reuse the original kernel mapping */ 196 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 197 init_level4_pgt[pgd_index(__START_KERNEL_map)]); 198 199 /* Set up the direct mapping from scratch */ 200 start = (unsigned long)pfn_to_kaddr(0); 201 end = (unsigned long)pfn_to_kaddr(end_pfn); 202 203 for (; start < end; start = next) { 204 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 205 if (!pud) 206 return -ENOMEM; 207 next = start + PGDIR_SIZE; 208 if (next > end) 209 next = end; 210 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) 211 return error; 212 set_pgd(temp_level4_pgt + pgd_index(start), 213 mk_kernel_pgd(__pa(pud))); 214 } 215 return 0; 216} 217 218int swsusp_arch_resume(void) 219{ 220 int error; 221 222 /* We have got enough memory and from now on we cannot recover */ 223 if ((error = set_up_temporary_mappings())) 224 return error; 225 restore_image(); 226 return 0; 227} 228 229/* 230 * pfn_is_nosave - check if given pfn is in the 'nosave' section 231 */ 232 233int pfn_is_nosave(unsigned long pfn) 234{ 235 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 236 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 237 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 238} 239#endif /* CONFIG_SOFTWARE_SUSPEND */ 240