1259698Sdim/* $NetBSD: profile.h,v 1.21 2021/11/02 11:26:03 ryo Exp $ */ 2259698Sdim 3259698Sdim/* 4259698Sdim * Copyright (c) 1992, 1993 5259698Sdim * The Regents of the University of California. All rights reserved. 6259698Sdim * 7259698Sdim * Redistribution and use in source and binary forms, with or without 8259698Sdim * modification, are permitted provided that the following conditions 9259698Sdim * are met: 10259698Sdim * 1. Redistributions of source code must retain the above copyright 11259698Sdim * notice, this list of conditions and the following disclaimer. 12259698Sdim * 2. Redistributions in binary form must reproduce the above copyright 13259698Sdim * notice, this list of conditions and the following disclaimer in the 14259698Sdim * documentation and/or other materials provided with the distribution. 15259698Sdim * 3. Neither the name of the University nor the names of its contributors 16259698Sdim * may be used to endorse or promote products derived from this software 17259698Sdim * without specific prior written permission. 18259698Sdim * 19259698Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20259698Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21259698Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22259698Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23259698Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24259698Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25259698Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26259698Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27259698Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28259698Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29259698Sdim * SUCH DAMAGE. 30259698Sdim * 31259698Sdim * @(#)profile.h 8.1 (Berkeley) 6/11/93 32259698Sdim */ 33259698Sdim 34259698Sdim#ifdef __x86_64__ 35259698Sdim 36259698Sdim#ifdef _KERNEL_OPT 37259698Sdim#include "opt_xen.h" 38259698Sdim#endif 39259698Sdim 40259698Sdim#define _MCOUNT_DECL void _mcount 41259698Sdim 42259698Sdim#define EPROL_EXPORT __asm(".globl _eprol") 43259698Sdim 44259698Sdim#ifdef __PIC__ 45259698Sdim#define __MCPLT "@PLT" 46259698Sdim#else 47259698Sdim#define __MCPLT 48259698Sdim#endif 49259698Sdim 50259698Sdim#define MCOUNT \ 51259698Sdim__weak_alias(mcount, __mcount) \ 52259698Sdim__asm(" .globl __mcount \n" \ 53259698Sdim" .type __mcount,@function\n" \ 54259698Sdim"__mcount: \n" \ 55259698Sdim" pushq %rbp \n" \ 56259698Sdim" movq %rsp,%rbp \n" \ 57259698Sdim" subq $56,%rsp \n" \ 58259698Sdim" movq %rdi,0(%rsp) \n" \ 59259698Sdim" movq %rsi,8(%rsp) \n" \ 60259698Sdim" movq %rdx,16(%rsp) \n" \ 61259698Sdim" movq %rcx,24(%rsp) \n" \ 62259698Sdim" movq %r8,32(%rsp) \n" \ 63259698Sdim" movq %r9,40(%rsp) \n" \ 64259698Sdim" movq %rax,48(%rsp) \n" \ 65259698Sdim" movq 0(%rbp),%r11 \n" \ 66259698Sdim" movq 8(%r11),%rdi \n" \ 67259698Sdim" movq 8(%rbp),%rsi \n" \ 68259698Sdim" call _mcount"__MCPLT " \n" \ 69259698Sdim" movq 0(%rsp),%rdi \n" \ 70259698Sdim" movq 8(%rsp),%rsi \n" \ 71259698Sdim" movq 16(%rsp),%rdx \n" \ 72259698Sdim" movq 24(%rsp),%rcx \n" \ 73259698Sdim" movq 32(%rsp),%r8 \n" \ 74259698Sdim" movq 40(%rsp),%r9 \n" \ 75259698Sdim" movq 48(%rsp),%rax \n" \ 76259698Sdim" leave \n" \ 77259698Sdim" ret \n" \ 78259698Sdim" .size __mcount,.-__mcount"); 79259698Sdim 80259698Sdim 81259698Sdim#ifdef _KERNEL 82259698Sdim#ifdef XENPV 83259698Sdimstatic inline __always_inline void 84259698Sdimmcount_disable_intr(void) 85259698Sdim{ 86259698Sdim /* should be __cli() but this calls x86_lfence() which calls mcount */ 87259698Sdim curcpu()->ci_vcpu->evtchn_upcall_mask = 1; 88259698Sdim __asm volatile("lfence" ::: "memory"); /* x86_lfence() */ 89259698Sdim} 90259698Sdim 91259698Sdimstatic inline __always_inline u_long 92259698Sdimmcount_read_psl(void) 93259698Sdim{ 94259698Sdim return (curcpu()->ci_vcpu->evtchn_upcall_mask); 95259698Sdim} 96259698Sdim 97259698Sdimstatic inline __always_inline void 98259698Sdimmcount_write_psl(u_long psl) 99259698Sdim{ 100259698Sdim curcpu()->ci_vcpu->evtchn_upcall_mask = psl; 101259698Sdim /* can't call x86_lfence because it calls mcount() */ 102259698Sdim __asm volatile("lfence" ::: "memory"); /* x86_lfence() */ 103259698Sdim /* XXX can't call hypervisor_force_callback() because we're in mcount*/ 104259698Sdim} 105259698Sdim 106259698Sdim#else /* XENPV */ 107259698Sdimstatic inline __always_inline void 108259698Sdimmcount_disable_intr(void) 109259698Sdim{ 110259698Sdim __asm volatile("cli"); 111259698Sdim} 112259698Sdim 113259698Sdimstatic inline __always_inline u_long 114259698Sdimmcount_read_psl(void) 115259698Sdim{ 116259698Sdim u_long ef; 117259698Sdim 118259698Sdim __asm volatile("pushfq; popq %0" : "=r" (ef)); 119259698Sdim return (ef); 120259698Sdim} 121259698Sdim 122259698Sdimstatic inline __always_inline void 123259698Sdimmcount_write_psl(u_long ef) 124259698Sdim{ 125259698Sdim __asm volatile("pushq %0; popfq" : : "r" (ef)); 126259698Sdim} 127259698Sdim 128259698Sdim#endif /* XENPV */ 129259698Sdim 130259698Sdim#define MCOUNT_ENTER \ 131259698Sdim do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0) 132259698Sdim#define MCOUNT_EXIT do { mcount_write_psl(s); } while (0) 133259698Sdim 134259698Sdim#endif /* _KERNEL */ 135259698Sdim 136259698Sdim#else /* __x86_64__ */ 137259698Sdim 138259698Sdim#include <i386/profile.h> 139259698Sdim 140259698Sdim#endif /* __x86_64__ */ 141259698Sdim