1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2015 Travis Geiselbrecht
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <asm.h>
9#include <arch/defines.h>
10
11/* void x86_64_context_switch(uint64_t *oldsp, uint64_t newsp) */
12FUNCTION(x86_64_context_switch)
13    /* save the old context and restore the new */
14    /* This layout should match struct x86_64_context_switch_frame */
15    push_reg %rbx
16    push_reg %rbp
17    push_reg %r12
18    push_reg %r13
19    push_reg %r14
20    push_reg %r15
21
22    movq %rsp,(%rdi)
23    movq %rsi,%rsp
24
25    pop_reg %r15
26    pop_reg %r14
27    pop_reg %r13
28    pop_reg %r12
29    pop_reg %rbp
30    pop_reg %rbx
31
32    retq
33END_FUNCTION(x86_64_context_switch)
34
35#include <arch/x86/mp.h>
36
37/* void arch_spin_lock(unsigned long *lock) */
38FUNCTION(arch_spin_lock)
39    /* fetch the current cpu number + 1 */
40    mov %gs:PERCPU_CPU_NUM_OFFSET, %rsi
41    inc %rsi
42
43.Ltake_lock:
44    xor %rax, %rax
45    lock cmpxchg %rsi, (%rdi)
46    jnz .Lspin
47    ret
48
49.Lspin:
50    pause
51    cmpq $0, (%rdi)
52    je .Ltake_lock
53    jmp .Lspin
54END_FUNCTION(arch_spin_lock)
55
56/* int arch_spin_trylock(unsigned long *lock) */
57FUNCTION(arch_spin_trylock)
58    /* fetch the current cpu number + 1 */
59    mov %gs:PERCPU_CPU_NUM_OFFSET, %rsi
60    inc %rsi
61
62    xor %rax, %rax
63    lock cmpxchg %rsi, (%rdi)
64    /* we return 0 to indicate success. %rax contains the value found by cmpxchg,
65     * which is already 0 if we got the lock */
66    ret
67END_FUNCTION(arch_spin_trylock)
68
69/* void arch_spin_unlock(spin_lock_t *lock) */
70FUNCTION(arch_spin_unlock)
71    movq $0, (%rdi)
72    ret
73END_FUNCTION(arch_spin_unlock)
74
75/* rep stos version of page zero */
76FUNCTION(arch_zero_page)
77    xorl    %eax, %eax /* set %rax = 0 */
78    mov     $PAGE_SIZE >> 3, %rcx
79    cld
80
81    rep     stosq
82
83    ret
84END_FUNCTION(arch_zero_page)
85
86// This clobbers %rax and memory below %rsp, but preserves all other registers.
87FUNCTION(load_startup_idt)
88    lea _idt_startup(%rip), %rax
89    movw $(16 * 256) - 1, -16(%rsp)
90    movq %rax, -16+2(%rsp)
91    lidt -16(%rsp)
92    ret
93END_FUNCTION(load_startup_idt)
94