1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13/* Configuration for MultiBoot, see MultiBoot Specification:
14   www.gnu.org/software/grub/manual/multiboot
15   We use a flags field of 3, indicating that we want modules loaded on page
16   boundaries and access to the memory map information. We do not set bit 16,
17   indicating that the structure of the image should be taken from its ELF
18   headers. */
19
20#include <config.h>
21#include <machine/assembler.h>
22
23#define IA32_EFER_MSR 0xC0000080
24#define IA32_APIC_BASE_MSR 0x01B
25#define APIC_ID_OFFSET 0x020
26
27.section .phys.text
28
29.code32
30
31/* We need setup code to run in 32bits. Unfortunately it is difficult to
32 * persuade other parts of the C kernel to be compiled as 32bit code
33 * Therefore, whilst we would rather write this in C, we have to put
34 * the PML4 initialization code here in assembly */
35
36BEGIN_FUNC(print_string)
37    movw $0x3f8, %dx
381:
39    movb (%ebx), %al
40    outb %al, %dx
41    incl %ebx
42    decl %ecx
43    jnz  1b
44    ret
45END_FUNC(print_string)
46
47#ifdef CONFIG_HUGE_PAGE
48BEGIN_FUNC(huge_page_check)
49    movl $0x80000001, %eax
50    cpuid
51    andl $0x04000000, %edx
52    jnz  2f
53    movl $huge_page_error_string, %ebx
54    movl $huge_page_error_size, %ecx
55    call print_string
561:
57    hlt
58    jmp  1b
592:
60    ret
61
62huge_page_error_string:
63    .string "Huge page not supported by the processor"
64    .set huge_page_error_size, . - huge_page_error_string
65END_FUNC(huge_page_check)
66
67BEGIN_FUNC(setup_pml4)
68    call huge_page_check
69    /* Zero the boot pml4 */
70    movl %cr0, %eax
71    andl $0x7fffffff, %eax
72    movl %eax, %cr0
73    movl $boot_pml4, %edi
74    movl $0x0, %edx
75    movl $1024, %ecx
761:
77    movl %edx, (%edi)
78    addl $4, %edi
79    loop 1b
80    /* Zero the boot PDPT */
81    movl $boot_pdpt, %edi
82    movl $1024, %ecx
831:
84    movl %edx, (%edi)
85    addl $4, %edi
86    loop 1b
87    /* Set the first 4 entries in the PDPT to the first
88     * 4gb of memory */
89    movl $boot_pdpt, %edi
90    movl $0x87, %edx
91    movl $4, %ecx
92    movl $0, %ebx
931:
94    movl %edx, (%edi)
95    movl %ebx, 4(%edi)
96    addl $0x40000000, %edx
97    jnc 2f
98    /* Carry occurred, need to increase the high part
99     * of the address */
100    incl %ebx
1012:
102    addl $8, %edi
103    loop 1b
104    /* Set the second highest entry in the PDPT to also map to the
105     * first part of memory. This is our actual kernel window */
106    movl $boot_pdpt, %edi
107    movl $0x87, %edx
108    movl %edx, 4080(%edi)
109    /* Put the PDPTs into the PML4 twice
110     * Once to create a 1-to-1 mapping, and once
111     * to create the higher kernel window */
112    movl $boot_pml4, %edi
113    movl $boot_pdpt, %edx
114    orl $0x7, %edx
115    movl %edx, (%edi)
116    movl %edx, 4088(%edi)
117    /* done */
118    ret
119END_FUNC(setup_pml4)
120
121#else
122
123BEGIN_FUNC(setup_pml4)
124    movl %cr0, %eax
125    andl $0x7fffffff, %eax
126    movl %eax, %cr0
127
128    movl $boot_pml4, %edi
129    movl $0x0, %edx
130    movl $1024, %ecx
1311:
132    movl %edx, (%edi)
133    addl $4, %edi
134    loop 1b
135
136    movl $boot_pdpt, %edi
137    movl $1024, %ecx
1381:
139    movl %edx, (%edi)
140    addl $4, %edi
141    loop 1b
142
143    movl $boot_pml4, %edi
144    movl $boot_pdpt, %ecx
145    orl  $0x7, %ecx
146    movl %ecx, (%edi)
147    movl %ecx, 0x800(%edi)
148    movl %ecx, 4088(%edi)
149
150    movl $_boot_pd, %ecx
151    orl  $0x7, %ecx
152    movl $boot_pdpt, %edi
153    movl %ecx, (%edi)
154    movl %ecx, 4080(%edi)
155    addl $0x1000, %ecx
156    movl %ecx, 8(%edi)
157    addl $0x1000, %ecx
158    movl %ecx, 16(%edi)
159    addl $0x1000, %ecx
160    movl %ecx, 24(%edi)
161
162    /* map first 4GiB into the _boot_pd */
163    movl $_boot_pd, %edi
164    movl $2048, %ecx
165    movl $0x87, %edx
1662:
167    movl %edx, (%edi)
168    addl $0x200000, %edx
169    addl $8, %edi
170    loop 2b
171    ret
172END_FUNC(setup_pml4)
173
174#endif
175
176BEGIN_FUNC(pcid_check)
177    movl $0x1, %eax
178    xorl %ecx, %ecx
179    cpuid
180    andl $0x800000, %ecx
181    jnz 2f
182    movl $pcid_error_string, %ebx
183    movl $pcid_error_size, %ecx
184    call print_string
1851:
186    hlt
187    jmp 1b
1882:
189    ret
190
191pcid_error_string:
192    .string "PCIDs not supported by the processor"
193    .set pcid_error_size, . - pcid_error_string
194END_FUNC(pcid_check)
195
196BEGIN_FUNC(invpcid_check)
197    movl $0x7, %eax
198    xorl %ecx, %ecx
199    cpuid
200    andl $0x400, %ebx
201    jnz   2f
202    movl  $invpcid_error_string, %ebx
203    movl  $invpcid_error_size, %ecx
204    call  print_string
2051:
206    hlt
207    jmp   1b
2082:
209    ret
210
211invpcid_error_string:
212    .string "INVPCID instruction not supported by the processor"
213    .set invpcid_error_size, . - invpcid_error_string
214END_FUNC(invpcid_check)
215
216BEGIN_FUNC(syscall_check)
217    movl $0x80000001, %eax
218    xorl %ecx, %ecx
219    cpuid
220    andl $0x20000000, %edx
221    jnz   2f
222    movl  $syscall_error_string, %ebx
223    movl  $syscall_error_size, %ecx
224    call  print_string
2251:
226    hlt
227    jmp   1b
2282:
229    ret
230
231syscall_error_string:
232    .string "SYSCALL/SYSRET instruction not supported by the processor"
233    .set syscall_error_size, . - syscall_error_string
234END_FUNC(syscall_check)
235
236/* if fsgsbase instructions are supported, we enable them. */
237BEGIN_FUNC(fsgsbase_enable)
238#ifdef CONFIG_FSGSBASE_INST
239    movl $0x7, %eax
240    xorl %ecx, %ecx
241    cpuid
242    andl $1, %ebx
243    jnz  2f
244    movl $fsgsbase_error_string, %ebx
245    movl $fsgsbase_error_size, %ecx
246    call print_string
2471:
248    hlt
249    jmp 1b
2502:
251    movl %cr4, %eax
252    /* enable the bit in CR4 */
253    orl  $0x10000, %eax
254    movl %eax, %cr4
255    ret
256
257fsgsbase_error_string:
258    .string "fsgsbase instructions not supported by the processor"
259    .set fsgsbase_error_size, . - fsgsbase_error_string
260#else
261    ret
262#endif
263END_FUNC(fsgsbase_enable)
264
265BEGIN_FUNC(syscall_enable)
266    call syscall_check
267    /* Set SCE (bit 0) in the extended feature MSR */
268    movl $IA32_EFER_MSR, %ecx
269    rdmsr
270    orl $0x1, %eax
271    wrmsr
272    ret
273END_FUNC(syscall_enable)
274
275BEGIN_FUNC(enable_x64_mode)
276#ifdef CONFIG_SUPPORT_PCID
277    call pcid_check
278    call invpcid_check
279#endif
280    /* put base pointer in cr3 */
281    movl $boot_pml4, %eax
282    movl %eax, %cr3
283    /* Set PAE (bit 5), as this is required before switching to long mode */
284    movl %cr4, %eax
285    orl $0x20, %eax
286    movl %eax, %cr4
287    /* Set LME (bit 8) in the extended feature MSR */
288    movl $IA32_EFER_MSR, %ecx
289    rdmsr
290    orl $0x100, %eax
291    wrmsr
292    /* Set PG (bit 31) of cr0 to enable paging */
293    movl %cr0, %eax
294    orl $0x80000000, %eax
295    movl %eax, %cr0
296#ifdef CONFIG_SUPPORT_PCID
297    /* enable PCID (bit 17), must be done in long mode */
298    movl %cr4, %eax
299    orl  $0x20000, %eax
300    movl %eax, %cr4
301#endif
302    ret
303END_FUNC(enable_x64_mode)
304
305BEGIN_FUNC(common_init)
306    /* make sure paging (bit 31) is off */
307    movl %cr0, %eax
308    andl $0x7fffffff, %eax
309    movl %eax, %cr0
310
311    call fsgsbase_enable
312    /* Initialize boot PML4 and switch to long mode */
313    call setup_pml4
314    call enable_x64_mode
315    lgdt _gdt64_ptr
316
317#ifdef CONFIG_SYSCALL
318    call syscall_enable
319#endif
320
321    ret
322END_FUNC(common_init)
323
324BEGIN_FUNC(_start)
325    /* Assume we are MultiBooted, e.g. by GRUB.
326       See MultiBoot Specification: www.gnu.org/software/grub/manual/multiboot
327    */
328    movl %eax, %edi /* multiboot_magic    */
329    movl %ebx, %esi /* multiboot_info_ptr */
330
331    /* Load kernel boot stack pointer */
332    leal boot_stack_top, %esp
333
334    /* Reset EFLAGS register (also disables interrupts etc.) */
335    pushl $0
336    popf
337
338    /* Already push parameters for calling boot_sys later. Push
339     * them as 8 byte values so we can easily pop later */
340    pushl $0
341    pushl %esi /* 2nd parameter: multiboot_info_ptr */
342    pushl $0
343    pushl %edi /* 1st parameter: multiboot_magic    */
344
345    call common_init
346
347    /* reload CS with long bit to enable long mode */
348    ljmp $8, $_start64
349END_FUNC(_start)
350
351.code64
352.align 4096
353BEGIN_FUNC(_start64)
354    /* Leave phys code behind and jump to the high kernel virtual address */
355    movabs $_entry_64, %rax
356    jmp *%rax
357END_FUNC(_start64)
358
359.section .phys.data
360_gdt64_ptr:
361    .word (3 * 8) - 1
362    .long _gdt64
363
364.align 16
365_gdt64:
366    .quad 0x0000000000000000
367    .word   0
368    .word   0
369    .byte   0
370    .byte   0x98
371    .byte   0x20
372    .byte   0
373    .word   0
374    .word   0
375    .byte   0
376    .byte   0x90
377    .byte   0
378    .byte   0
379
380#ifndef CONFIG_HUGE_PAGE
381.section .phys.bss
382.align 4096
383_boot_pd:
384    .fill 16384
385#endif
386
387.section .boot.text
388
389BEGIN_FUNC(_entry_64)
390    /* Update our stack pointer */
391    movq $0xffffffff80000000, %rax
392    addq %rax, %rsp
393    addq %rax, %rbp
394
395    /* Pop the multiboot parameters off */
396    pop %rdi
397    pop %rsi
398
399    /* Load our real kernel stack */
400    leaq kernel_stack_alloc + (1 << CONFIG_KERNEL_STACK_BITS), %rsp
401
402    movabs $restore_user_context, %rax
403    push %rax
404    jmp boot_sys
405END_FUNC(_entry_64)
406
407.section .phys.text
408
409#ifdef ENABLE_SMP_SUPPORT
410
411BEGIN_FUNC(boot_cpu_start)
412.code16
413    /* Set DS equal to CS and load GDTR register with GDT pointer */
414    movw %cs, %ax
415    movw %ax, %ds
416    lgdt _boot_gdt_ptr - boot_cpu_start
417
418    /* Enable Protected Mode */
419    movl %cr0, %eax
420    orl  $1,   %eax
421    movl %eax, %cr0
422
423    /* Reload CS with a far jump */
424    ljmpl $0x08, $1f
425
426.code32
4271:
428    /* Load DS/ES/SS with kernel data segment selector */
429    movw $0x10, %ax
430    movw %ax,   %ds
431    movw %ax,   %es
432    movw %ax,   %ss
433
434    /* Use temporary kernel boot stack pointer */
435    leal boot_stack_top, %esp
436
437    /* Reset EFLAGS register (also disables interrupts etc.) */
438    pushl $0
439    popf
440
441    call common_init
442
443    /* reload CS with long bit to enable long mode */
444    ljmp $8, $_start_ap64
445    jmp 1b
446END_FUNC(boot_cpu_start)
447
448.code64
449BEGIN_FUNC(_start_ap64)
450    /* Leave phys code behind and jump to the high kernel virtual address */
451    movabs $_entry_ap64, %rax
452    jmp *%rax
453END_FUNC(_start_ap64)
454
455_boot_gdt_ptr:
456    .word   (3 * 8) - 1 /* Limit: 3 segments * 8 bytes - 1 byte */
457    .long   _boot_gdt   /* Address of boot GDT */
458
459/* GDT for getting us through 32-bit protected mode */
460    .align 16
461_boot_gdt:
462    .quad 0x0000000000000000 /* Null segment */
463    .quad 0x00cf9b000000ffff /* 4GB kernel code segment */
464    .quad 0x00cf93000000ffff /* 4GB kernel data segment */
465
466.global boot_cpu_end
467boot_cpu_end:
468
469.section .boot.text
470
471BEGIN_FUNC(_entry_ap64)
472    /* Get the index of this cpu */
473    movq smp_aps_index, %rcx
474
475    /* Switch to a real kernel stack */
476    leaq kernel_stack_alloc, %rsp
477    inc %rcx
478    shlq $CONFIG_KERNEL_STACK_BITS, %rcx
479    addq %rcx, %rsp
480
481    movabs $restore_user_context, %rax
482    push %rax
483    jmp boot_node
484END_FUNC(_entry_64)
485
486#endif /* ENABLE_SMP_SUPPORT */
487