1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_KEXEC_H
3#define _ASM_X86_KEXEC_H
4
5#ifdef CONFIG_X86_32
6# define PA_CONTROL_PAGE	0
7# define VA_CONTROL_PAGE	1
8# define PA_PGD			2
9# define PA_SWAP_PAGE		3
10# define PAGES_NR		4
11#else
12# define PA_CONTROL_PAGE	0
13# define VA_CONTROL_PAGE	1
14# define PA_TABLE_PAGE		2
15# define PA_SWAP_PAGE		3
16# define PAGES_NR		4
17#endif
18
19# define KEXEC_CONTROL_CODE_MAX_SIZE	2048
20
21#ifndef __ASSEMBLY__
22
23#include <linux/string.h>
24#include <linux/kernel.h>
25
26#include <asm/page.h>
27#include <asm/ptrace.h>
28
29struct kimage;
30
31/*
32 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
33 * I.e. Maximum page that is mapped directly into kernel memory,
34 * and kmap is not required.
35 *
36 * So far x86_64 is limited to 40 physical address bits.
37 */
38#ifdef CONFIG_X86_32
39/* Maximum physical address we can use pages from */
40# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
41/* Maximum address we can reach in physical address mode */
42# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
43/* Maximum address we can use for the control code buffer */
44# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
45
46# define KEXEC_CONTROL_PAGE_SIZE	4096
47
48/* The native architecture */
49# define KEXEC_ARCH KEXEC_ARCH_386
50
51/* We can also handle crash dumps from 64 bit kernel. */
52# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
53#else
54/* Maximum physical address we can use pages from */
55# define KEXEC_SOURCE_MEMORY_LIMIT      (MAXMEM-1)
56/* Maximum address we can reach in physical address mode */
57# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
58/* Maximum address we can use for the control pages */
59# define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
60
61/* Allocate one page for the pdp and the second for the code */
62# define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
63
64/* The native architecture */
65# define KEXEC_ARCH KEXEC_ARCH_X86_64
66#endif
67
68/*
69 * This function is responsible for capturing register states if coming
70 * via panic otherwise just fix up the ss and sp if coming via kernel
71 * mode exception.
72 */
73static inline void crash_setup_regs(struct pt_regs *newregs,
74				    struct pt_regs *oldregs)
75{
76	if (oldregs) {
77		memcpy(newregs, oldregs, sizeof(*newregs));
78	} else {
79#ifdef CONFIG_X86_32
80		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
81		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
82		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
83		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
84		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
85		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
86		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
87		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
88		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
89		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
90		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
91		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
92		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
93#else
94		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
95		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
96		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
97		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
98		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
99		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
100		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
101		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
102		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
103		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
104		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
105		asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
106		asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
107		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
108		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
109		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
110		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
111		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
112		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
113#endif
114		newregs->ip = _THIS_IP_;
115	}
116}
117
118#ifdef CONFIG_X86_32
119asmlinkage unsigned long
120relocate_kernel(unsigned long indirection_page,
121		unsigned long control_page,
122		unsigned long start_address,
123		unsigned int has_pae,
124		unsigned int preserve_context);
125#else
126unsigned long
127relocate_kernel(unsigned long indirection_page,
128		unsigned long page_list,
129		unsigned long start_address,
130		unsigned int preserve_context,
131		unsigned int host_mem_enc_active);
132#endif
133
134#define ARCH_HAS_KIMAGE_ARCH
135
136#ifdef CONFIG_X86_32
137struct kimage_arch {
138	pgd_t *pgd;
139#ifdef CONFIG_X86_PAE
140	pmd_t *pmd0;
141	pmd_t *pmd1;
142#endif
143	pte_t *pte0;
144	pte_t *pte1;
145};
146#else
147struct kimage_arch {
148	p4d_t *p4d;
149	pud_t *pud;
150	pmd_t *pmd;
151	pte_t *pte;
152};
153#endif /* CONFIG_X86_32 */
154
155#ifdef CONFIG_X86_64
156/*
157 * Number of elements and order of elements in this structure should match
158 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
159 * make an appropriate change in purgatory too.
160 */
161struct kexec_entry64_regs {
162	uint64_t rax;
163	uint64_t rcx;
164	uint64_t rdx;
165	uint64_t rbx;
166	uint64_t rsp;
167	uint64_t rbp;
168	uint64_t rsi;
169	uint64_t rdi;
170	uint64_t r8;
171	uint64_t r9;
172	uint64_t r10;
173	uint64_t r11;
174	uint64_t r12;
175	uint64_t r13;
176	uint64_t r14;
177	uint64_t r15;
178	uint64_t rip;
179};
180
181extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
182				       gfp_t gfp);
183#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
184
185extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
186#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
187
188void arch_kexec_protect_crashkres(void);
189#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
190
191void arch_kexec_unprotect_crashkres(void);
192#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
193
194#ifdef CONFIG_KEXEC_FILE
195struct purgatory_info;
196int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
197				     Elf_Shdr *section,
198				     const Elf_Shdr *relsec,
199				     const Elf_Shdr *symtab);
200#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
201
202int arch_kimage_file_post_load_cleanup(struct kimage *image);
203#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
204#endif
205#endif
206
207extern void kdump_nmi_shootdown_cpus(void);
208
209#ifdef CONFIG_CRASH_HOTPLUG
210void arch_crash_handle_hotplug_event(struct kimage *image);
211#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event
212
213#ifdef CONFIG_HOTPLUG_CPU
214int arch_crash_hotplug_cpu_support(void);
215#define crash_hotplug_cpu_support arch_crash_hotplug_cpu_support
216#endif
217
218#ifdef CONFIG_MEMORY_HOTPLUG
219int arch_crash_hotplug_memory_support(void);
220#define crash_hotplug_memory_support arch_crash_hotplug_memory_support
221#endif
222
223unsigned int arch_crash_get_elfcorehdr_size(void);
224#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
225#endif
226
227#endif /* __ASSEMBLY__ */
228
229#endif /* _ASM_X86_KEXEC_H */
230