1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7/* The kernel expects to be booted by a Multiboot compliant bootloader.
8 * See Multiboot specifications:
9 * www.gnu.org/software/grub/manual/multiboot
10 * www.gnu.org/software/grub/manual/multiboot2
11 *
12 * The multiboot header's flags field is set to 3, indicating that we want
13 * modules loaded on page boundaries, access to memory map information, and
14 * information about the video mode table. Bit 16 of the multiboot header is
15 * not set, indicating that the structure of the image should be taken from its
16 * ELF headers.
17 *
18 * When the bootloader jumps to the entry point it is not in long mode and
19 * 64-bit instructions are not accessible (Multiboot 1 does not have support
20 * for this). While in protected mode, setup including the initialisation of
21 * 64-bit paging structures is done before manually enabling long mode and
22 * continuing. */
23
24#include <config.h>
25#include <machine/assembler.h>
26
27#define IA32_EFER_MSR 0xC0000080
28#define IA32_APIC_BASE_MSR 0x01B
29#define APIC_ID_OFFSET 0x020
30
31.section .phys.text
32
33.code32
34
35BEGIN_FUNC(print_string)
36    movw $0x3f8, %dx
371:
38    movb (%ebx), %al
39    outb %al, %dx
40    incl %ebx
41    decl %ecx
42    jnz  1b
43    ret
44END_FUNC(print_string)
45
46BEGIN_FUNC(hang)
471:
48    hlt
49    jmp  1b
50END_FUNC(hang)
51
52#ifdef CONFIG_HUGE_PAGE
53BEGIN_FUNC(huge_page_check)
54    movl $0x80000001, %eax
55    cpuid
56    andl $0x04000000, %edx
57    jz   1f
58    ret
591:
60    movl $huge_page_error_string, %ebx
61    movl $huge_page_error_size, %ecx
62    call print_string
63    call hang
64
65huge_page_error_string:
66    .string "Huge page not supported by the processor"
67    .set huge_page_error_size, . - huge_page_error_string
68END_FUNC(huge_page_check)
69#endif /* CONFIG_HUGE_PAGE */
70
71BEGIN_FUNC(setup_pml4)
72#ifdef CONFIG_HUGE_PAGE
73    call huge_page_check
74#endif /* CONFIG_HUGE_PAGE */
75    movl %cr0, %eax
76    andl $0x7fffffff, %eax
77    movl %eax, %cr0
78
79    movl $boot_pml4, %edi
80    movl $0x0, %edx
81    movl $1024, %ecx
821:
83    movl %edx, (%edi)
84    addl $4, %edi
85    loop 1b
86
87    movl $boot_pdpt, %edi
88    movl $1024, %ecx
891:
90    movl %edx, (%edi)
91    addl $4, %edi
92    loop 1b
93
94    movl $boot_pml4, %edi
95    movl $boot_pdpt, %ecx
96    orl  $0x7, %ecx
97    movl %ecx, (%edi)
98    movl %ecx, 0x800(%edi)
99    movl %ecx, 4088(%edi)
100
101    movl $_boot_pd, %ecx
102    orl  $0x7, %ecx
103    movl $boot_pdpt, %edi
104    movl %ecx, (%edi)
105    movl %ecx, 4080(%edi)
106    addl $0x1000, %ecx
107    movl %ecx, 8(%edi)
108    addl $0x1000, %ecx
109    movl %ecx, 16(%edi)
110    addl $0x1000, %ecx
111    movl %ecx, 24(%edi)
112
113    /* Map first 4GiB into the _boot_pd. */
114    movl $_boot_pd, %edi
115    movl $2048, %ecx
116    movl $0x87, %edx
1172:
118    movl %edx, (%edi)
119    addl $0x200000, %edx
120    addl $8, %edi
121    loop 2b
122    ret
123END_FUNC(setup_pml4)
124
125BEGIN_FUNC(pcid_check)
126    movl $0x1, %eax
127    xorl %ecx, %ecx
128    cpuid
129    andl $0x20000, %ecx
130    jz   1f
131    ret
1321:
133    movl $pcid_error_string, %ebx
134    movl $pcid_error_size, %ecx
135    call print_string
136    call hang
137
138pcid_error_string:
139    .string "PCIDs not supported by the processor"
140    .set pcid_error_size, . - pcid_error_string
141END_FUNC(pcid_check)
142
143BEGIN_FUNC(invpcid_check)
144    movl $0x7, %eax
145    xorl %ecx, %ecx
146    cpuid
147    andl $0x400, %ebx
148    jz   1f
149    ret
1501:
151    movl  $invpcid_error_string, %ebx
152    movl  $invpcid_error_size, %ecx
153    call  print_string
154    call  hang
155
156invpcid_error_string:
157    .string "INVPCID instruction not supported by the processor"
158    .set invpcid_error_size, . - invpcid_error_string
159END_FUNC(invpcid_check)
160
161BEGIN_FUNC(syscall_check)
162    movl $0x80000001, %eax
163    xorl %ecx, %ecx
164    cpuid
165    andl $0x20000000, %edx
166    jz   1f
167    ret
1681:
169    movl  $syscall_error_string, %ebx
170    movl  $syscall_error_size, %ecx
171    call  print_string
172    call  hang
173
174syscall_error_string:
175    .string "SYSCALL/SYSRET instruction not supported by the processor"
176    .set syscall_error_size, . - syscall_error_string
177END_FUNC(syscall_check)
178
179#ifdef CONFIG_FSGSBASE_INST
180BEGIN_FUNC(fsgsbase_enable)
181    movl $0x7, %eax
182    xorl %ecx, %ecx
183    cpuid
184    andl $1, %ebx
185    jz   1f
186    movl %cr4, %eax
187    /* Enable the bit in cr4. */
188    orl  $0x10000, %eax
189    movl %eax, %cr4
190    ret
1911:
192    movl $fsgsbase_error_string, %ebx
193    movl $fsgsbase_error_size, %ecx
194    call print_string
195    call hang
196
197fsgsbase_error_string:
198    .string "fsgsbase instructions not supported by the processor"
199    .set fsgsbase_error_size, . - fsgsbase_error_string
200END_FUNC(fsgsbase_enable)
201#endif /* CONFIG_FSGSBASE_INST */
202
203BEGIN_FUNC(syscall_enable)
204    call syscall_check
205    /* Set SCE (bit 0) in the extended feature MSR. */
206    movl $IA32_EFER_MSR, %ecx
207    rdmsr
208    orl $0x1, %eax
209    wrmsr
210    ret
211END_FUNC(syscall_enable)
212
213BEGIN_FUNC(enable_x64_mode)
214#ifdef CONFIG_SUPPORT_PCID
215    call pcid_check
216    call invpcid_check
217#endif
218    /* Put base pointer in cr3. */
219    movl $boot_pml4, %eax
220    movl %eax, %cr3
221    /* Set PAE (bit 5), as this is required before switching to long mode. */
222    movl %cr4, %eax
223    orl $0x20, %eax
224    movl %eax, %cr4
225    /* Set LME (bit 8) in the extended feature MSR. */
226    movl $IA32_EFER_MSR, %ecx
227    rdmsr
228    orl $0x100, %eax
229    wrmsr
230    /* Set PG (bit 31) of cr0 to enable paging. */
231    movl %cr0, %eax
232    orl $0x80000000, %eax
233    movl %eax, %cr0
234#ifdef CONFIG_SUPPORT_PCID
235    /* Enable PCID (bit 17), must be done in long mode. */
236    movl %cr4, %eax
237    orl  $0x20000, %eax
238    movl %eax, %cr4
239#endif
240    ret
241END_FUNC(enable_x64_mode)
242
243BEGIN_FUNC(common_init)
244    /* Disable paging. */
245    movl %cr0, %eax
246    andl $0x7fffffff, %eax
247    movl %eax, %cr0
248
249#ifdef CONFIG_FSGSBASE_INST
250    call fsgsbase_enable
251#endif /* CONFIG_FSGSBASE_INST */
252
253    /* Initialize boot PML4 and switch to long mode. */
254    call setup_pml4
255    call enable_x64_mode
256    lgdt _gdt64_ptr
257
258#ifdef CONFIG_SYSCALL
259    call syscall_enable
260#endif
261
262    ret
263END_FUNC(common_init)
264
265BEGIN_FUNC(_start)
266    /* Assume we are MultiBooted, e.g. by GRUB.
267     * While not immediately checked, the magic number is checked prior to
268     * Multiboot dependent operations. */
269    movl %eax, %edi /* multiboot_magic    */
270    movl %ebx, %esi /* multiboot_info_ptr */
271
272    /* Load kernel boot stack pointer. */
273    leal boot_stack_top, %esp
274
275    /* Reset EFLAGS register (also disables interrupts etc.). */
276    pushl $0
277    popf
278
279    /* Already push parameters for calling boot_sys later. Push
280     * them as 8 byte values so we can easily pop later. */
281    pushl $0
282    pushl %esi /* 2nd parameter: multiboot_info_ptr */
283    pushl $0
284    pushl %edi /* 1st parameter: multiboot_magic    */
285
286    call common_init
287
288    /* Reload CS with long bit to enable long mode. */
289    ljmp $8, $_start64
290END_FUNC(_start)
291
292.code64
293.align 4096
294BEGIN_FUNC(_start64)
295    /* Leave phys code behind and jump to the high kernel virtual address. */
296    movabs $_entry_64, %rax
297    jmp *%rax
298END_FUNC(_start64)
299
300.section .phys.data
301_gdt64_ptr:
302    .word (3 * 8) - 1
303    .long _gdt64
304
305.align 16
306_gdt64:
307    .quad 0x0000000000000000
308    .word   0
309    .word   0
310    .byte   0
311    .byte   0x98
312    .byte   0x20
313    .byte   0
314    .word   0
315    .word   0
316    .byte   0
317    .byte   0x90
318    .byte   0
319    .byte   0
320
321.section .phys.bss
322.align 4096
323_boot_pd:
324    .fill 16384
325
326.section .boot.text
327
328BEGIN_FUNC(_entry_64)
329    /* Update our stack pointer. */
330    movq $0xffffffff80000000, %rax
331    addq %rax, %rsp
332    addq %rax, %rbp
333
334    /* Pop the multiboot parameters off. */
335    pop %rdi
336    pop %rsi
337
338    /* Load our real kernel stack. */
339    leaq kernel_stack_alloc + (1 << CONFIG_KERNEL_STACK_BITS), %rsp
340
341    movabs $restore_user_context, %rax
342    push %rax
343    jmp boot_sys
344END_FUNC(_entry_64)
345
346.section .phys.text
347
348#ifdef ENABLE_SMP_SUPPORT
349
350BEGIN_FUNC(boot_cpu_start)
351.code16
352    /* Set DS equal to CS and load GDTR register with GDT pointer. */
353    movw %cs, %ax
354    movw %ax, %ds
355    lgdt _boot_gdt_ptr - boot_cpu_start
356
357    /* Enable protected mode. */
358    movl %cr0, %eax
359    orl  $1,   %eax
360    movl %eax, %cr0
361
362    /* Reload CS with a far jump. */
363    ljmpl $0x08, $1f
364
365.code32
3661:
367    /* Load DS/ES/SS with kernel data segment selector. */
368    movw $0x10, %ax
369    movw %ax,   %ds
370    movw %ax,   %es
371    movw %ax,   %ss
372
373    /* Use temporary kernel boot stack pointer. */
374    leal boot_stack_top, %esp
375
376    /* Reset EFLAGS register (also disables interrupts etc.). */
377    pushl $0
378    popf
379
380    call common_init
381
382    /* Reload CS with long bit to enable long mode. */
383    ljmp $8, $_start_ap64
384    jmp 1b
385END_FUNC(boot_cpu_start)
386
387.code64
388BEGIN_FUNC(_start_ap64)
389    /* Leave phys code behind and jump to the high kernel virtual address. */
390    movabs $_entry_ap64, %rax
391    jmp *%rax
392END_FUNC(_start_ap64)
393
394_boot_gdt_ptr:
395    .word   (3 * 8) - 1 /* Limit: 3 segments * 8 bytes - 1 byte */
396    .long   _boot_gdt   /* Address of boot GDT */
397
398/* GDT for getting us through 32-bit protected mode. */
399    .align 16
400_boot_gdt:
401    .quad 0x0000000000000000 /* Null segment */
402    .quad 0x00cf9b000000ffff /* 4GB kernel code segment */
403    .quad 0x00cf93000000ffff /* 4GB kernel data segment */
404
405.global boot_cpu_end
406boot_cpu_end:
407
408.section .boot.text
409
410BEGIN_FUNC(_entry_ap64)
411    /* Get the index of this cpu. */
412    movq smp_aps_index, %rcx
413
414    /* Switch to a real kernel stack. */
415    leaq kernel_stack_alloc, %rsp
416    inc %rcx
417    shlq $CONFIG_KERNEL_STACK_BITS, %rcx
418    addq %rcx, %rsp
419
420    movabs $restore_user_context, %rax
421    push %rax
422    jmp boot_node
423END_FUNC(_entry_ap64)
424
425#endif /* ENABLE_SMP_SUPPORT */
426