1147191Sjkoshy/*- 2183033Sjkoshy * Copyright (c) 2005,2008 Joseph Koshy 3174395Sjkoshy * Copyright (c) 2007 The FreeBSD Foundation 4147191Sjkoshy * All rights reserved. 5147191Sjkoshy * 6174395Sjkoshy * Portions of this software were developed by A. Joseph Koshy under 7174395Sjkoshy * sponsorship from the FreeBSD Foundation and Google, Inc. 8174395Sjkoshy * 9147191Sjkoshy * Redistribution and use in source and binary forms, with or without 10147191Sjkoshy * modification, are permitted provided that the following conditions 11147191Sjkoshy * are met: 12147191Sjkoshy * 1. Redistributions of source code must retain the above copyright 13147191Sjkoshy * notice, this list of conditions and the following disclaimer. 14147191Sjkoshy * 2. Redistributions in binary form must reproduce the above copyright 15147191Sjkoshy * notice, this list of conditions and the following disclaimer in the 16147191Sjkoshy * documentation and/or other materials provided with the distribution. 17147191Sjkoshy * 18147191Sjkoshy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19147191Sjkoshy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20147191Sjkoshy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21147191Sjkoshy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22147191Sjkoshy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23147191Sjkoshy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24147191Sjkoshy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25147191Sjkoshy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26147191Sjkoshy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27147191Sjkoshy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28147191Sjkoshy * SUCH DAMAGE. 29147191Sjkoshy */ 30147191Sjkoshy 31147191Sjkoshy#include <sys/cdefs.h> 32147191Sjkoshy__FBSDID("$FreeBSD$"); 33147191Sjkoshy 34147191Sjkoshy#include <sys/param.h> 35147191Sjkoshy#include <sys/bus.h> 36147191Sjkoshy#include <sys/pmc.h> 37174395Sjkoshy#include <sys/proc.h> 38147191Sjkoshy#include <sys/systm.h> 39147191Sjkoshy 40174395Sjkoshy#include <machine/cpu.h> 41185341Sjkim#include <machine/cputypes.h> 42196224Sjhb#include <machine/intr_machdep.h> 43280455Srrs#if (__FreeBSD_version >= 1100000) 44280455Srrs#include <x86/apicvar.h> 45280455Srrs#else 46196224Sjhb#include <machine/apicvar.h> 47280455Srrs#endif 48147191Sjkoshy#include <machine/pmc_mdep.h> 49147191Sjkoshy#include <machine/md_var.h> 50147191Sjkoshy 51174395Sjkoshy#include <vm/vm.h> 52174395Sjkoshy#include <vm/vm_param.h> 53174395Sjkoshy#include <vm/pmap.h> 54174395Sjkoshy 55233628Sfabient#include "hwpmc_soft.h" 56233628Sfabient 57174395Sjkoshy/* 58174395Sjkoshy * Attempt to walk a user call stack using a too-simple algorithm. 59174395Sjkoshy * In the general case we need unwind information associated with 60174395Sjkoshy * the executable to be able to walk the user stack. 61174395Sjkoshy * 62174395Sjkoshy * We are handed a trap frame laid down at the time the PMC interrupt 63174395Sjkoshy * was taken. If the application is using frame pointers, the saved 64174395Sjkoshy * PC value could be: 65174395Sjkoshy * a. at the beginning of a function before the stack frame is laid 66174395Sjkoshy * down, 67174395Sjkoshy * b. just before a 'ret', after the stack frame has been taken off, 68174395Sjkoshy * c. somewhere else in the function with a valid stack frame being 69174395Sjkoshy * present, 70174395Sjkoshy * 71174395Sjkoshy * If the application is not using frame pointers, this algorithm will 72174395Sjkoshy * fail to yield an interesting call chain. 73174395Sjkoshy * 74174395Sjkoshy * TODO: figure out a way to use unwind information. 75174395Sjkoshy */ 76147191Sjkoshy 77174395Sjkoshyint 78174395Sjkoshypmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf) 79174395Sjkoshy{ 80174395Sjkoshy int n; 81174395Sjkoshy uint32_t instr; 82174395Sjkoshy uintptr_t fp, oldfp, pc, r, sp; 83174395Sjkoshy 84174395Sjkoshy KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p", 85174395Sjkoshy __LINE__, (void *) tf)); 86174395Sjkoshy 87174395Sjkoshy pc = PMC_TRAPFRAME_TO_PC(tf); 88174395Sjkoshy oldfp = fp = PMC_TRAPFRAME_TO_FP(tf); 89183033Sjkoshy sp = PMC_TRAPFRAME_TO_USER_SP(tf); 90174395Sjkoshy 91174395Sjkoshy *cc++ = pc; n = 1; 92174395Sjkoshy 93174395Sjkoshy r = fp + sizeof(uintptr_t); /* points to return address */ 94174395Sjkoshy 95174395Sjkoshy if (!PMC_IN_USERSPACE(pc)) 96174395Sjkoshy return (n); 97174395Sjkoshy 98174395Sjkoshy if (copyin((void *) pc, &instr, sizeof(instr)) != 0) 99174395Sjkoshy return (n); 100174395Sjkoshy 101174395Sjkoshy if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) || 102174395Sjkoshy PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */ 103174395Sjkoshy if (copyin((void *) sp, &pc, sizeof(pc)) != 0) 104174395Sjkoshy return (n); 105174395Sjkoshy } else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) { 106174395Sjkoshy sp += sizeof(uintptr_t); 107174395Sjkoshy if (copyin((void *) sp, &pc, sizeof(pc)) != 0) 108174395Sjkoshy return (n); 109174395Sjkoshy } else if (copyin((void *) r, &pc, sizeof(pc)) != 0 || 110200001Semaste copyin((void *) fp, &fp, sizeof(fp)) != 0) 111174395Sjkoshy return (n); 112174395Sjkoshy 113174395Sjkoshy for (; n < nframes;) { 114174395Sjkoshy if (pc == 0 || !PMC_IN_USERSPACE(pc)) 115174395Sjkoshy break; 116174395Sjkoshy 117174395Sjkoshy *cc++ = pc; n++; 118174395Sjkoshy 119174395Sjkoshy if (fp < oldfp) 120174395Sjkoshy break; 121174395Sjkoshy 122174395Sjkoshy r = fp + sizeof(uintptr_t); /* address of return address */ 123174395Sjkoshy oldfp = fp; 124174395Sjkoshy 125174395Sjkoshy if (copyin((void *) r, &pc, sizeof(pc)) != 0 || 126174395Sjkoshy copyin((void *) fp, &fp, sizeof(fp)) != 0) 127174395Sjkoshy break; 128174395Sjkoshy } 129174395Sjkoshy 130174395Sjkoshy return (n); 131174395Sjkoshy} 132174395Sjkoshy 133174395Sjkoshy/* 134174395Sjkoshy * Walking the kernel call stack. 135174395Sjkoshy * 136174395Sjkoshy * We are handed the trap frame laid down at the time the PMC 137174395Sjkoshy * interrupt was taken. The saved PC could be: 138174395Sjkoshy * a. in the lowlevel trap handler, meaning that there isn't a C stack 139174395Sjkoshy * to traverse, 140174395Sjkoshy * b. at the beginning of a function before the stack frame is laid 141174395Sjkoshy * down, 142174395Sjkoshy * c. just before a 'ret', after the stack frame has been taken off, 143174395Sjkoshy * d. somewhere else in a function with a valid stack frame being 144174395Sjkoshy * present. 145174395Sjkoshy * 146174395Sjkoshy * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and 147174395Sjkoshy * the return address is at [%ebp+4]/[%rbp+8]. 148174395Sjkoshy * 149174395Sjkoshy * For cases (b) and (c), the return address is at [%esp]/[%rsp] and 150174395Sjkoshy * the frame pointer doesn't need to be changed when going up one 151174395Sjkoshy * level in the stack. 152174395Sjkoshy * 153174395Sjkoshy * For case (a), we check if the PC lies in low-level trap handling 154174395Sjkoshy * code, and if so we terminate our trace. 155174395Sjkoshy */ 156174395Sjkoshy 157174395Sjkoshyint 158174395Sjkoshypmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf) 159174395Sjkoshy{ 160174395Sjkoshy int n; 161174395Sjkoshy uint32_t instr; 162174395Sjkoshy uintptr_t fp, pc, r, sp, stackstart, stackend; 163174395Sjkoshy struct thread *td; 164174395Sjkoshy 165174395Sjkoshy KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace", 166174395Sjkoshy __LINE__)); 167174395Sjkoshy 168240475Sattilio td = curthread; 169174395Sjkoshy pc = PMC_TRAPFRAME_TO_PC(tf); 170174395Sjkoshy fp = PMC_TRAPFRAME_TO_FP(tf); 171183033Sjkoshy sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf); 172174395Sjkoshy 173174395Sjkoshy *cc++ = pc; 174174395Sjkoshy r = fp + sizeof(uintptr_t); /* points to return address */ 175174395Sjkoshy 176174395Sjkoshy if (nframes <= 1) 177174395Sjkoshy return (1); 178174395Sjkoshy 179174395Sjkoshy stackstart = (uintptr_t) td->td_kstack; 180174395Sjkoshy stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE; 181174395Sjkoshy 182174395Sjkoshy if (PMC_IN_TRAP_HANDLER(pc) || 183200060Sjkoshy !PMC_IN_KERNEL(pc) || 184200060Sjkoshy !PMC_IN_KERNEL_STACK(r, stackstart, stackend) || 185174395Sjkoshy !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) || 186174395Sjkoshy !PMC_IN_KERNEL_STACK(fp, stackstart, stackend)) 187174395Sjkoshy return (1); 188174395Sjkoshy 189174395Sjkoshy instr = *(uint32_t *) pc; 190174395Sjkoshy 191174395Sjkoshy /* 192174395Sjkoshy * Determine whether the interrupted function was in the 193174395Sjkoshy * processing of either laying down its stack frame or taking 194174395Sjkoshy * it off. 195174395Sjkoshy * 196174395Sjkoshy * If we haven't started laying down a stack frame, or are 197174395Sjkoshy * just about to return, then our caller's address is at 198174395Sjkoshy * *sp, and we don't have a frame to unwind. 199174395Sjkoshy */ 200174395Sjkoshy if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) || 201174395Sjkoshy PMC_AT_FUNCTION_EPILOGUE_RET(instr)) 202174395Sjkoshy pc = *(uintptr_t *) sp; 203174395Sjkoshy else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) { 204174395Sjkoshy /* 205174395Sjkoshy * The code was midway through laying down a frame. 206174395Sjkoshy * At this point sp[0] has a frame back pointer, 207174395Sjkoshy * and the caller's address is therefore at sp[1]. 208174395Sjkoshy */ 209174395Sjkoshy sp += sizeof(uintptr_t); 210174395Sjkoshy if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend)) 211174395Sjkoshy return (1); 212174395Sjkoshy pc = *(uintptr_t *) sp; 213174395Sjkoshy } else { 214174395Sjkoshy /* 215174395Sjkoshy * Not in the function prologue or epilogue. 216174395Sjkoshy */ 217174395Sjkoshy pc = *(uintptr_t *) r; 218174395Sjkoshy fp = *(uintptr_t *) fp; 219174395Sjkoshy } 220174395Sjkoshy 221174395Sjkoshy for (n = 1; n < nframes; n++) { 222174395Sjkoshy *cc++ = pc; 223174395Sjkoshy 224174395Sjkoshy if (PMC_IN_TRAP_HANDLER(pc)) 225174395Sjkoshy break; 226174395Sjkoshy 227174395Sjkoshy r = fp + sizeof(uintptr_t); 228174395Sjkoshy if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) || 229200060Sjkoshy !PMC_IN_KERNEL_STACK(r, stackstart, stackend)) 230174395Sjkoshy break; 231174395Sjkoshy pc = *(uintptr_t *) r; 232174395Sjkoshy fp = *(uintptr_t *) fp; 233174395Sjkoshy } 234174395Sjkoshy 235174395Sjkoshy return (n); 236174395Sjkoshy} 237174395Sjkoshy 238147191Sjkoshy/* 239147191Sjkoshy * Machine dependent initialization for x86 class platforms. 240147191Sjkoshy */ 241147191Sjkoshy 242147191Sjkoshystruct pmc_mdep * 243147191Sjkoshypmc_md_initialize() 244147191Sjkoshy{ 245149375Sjkoshy int i; 246149375Sjkoshy struct pmc_mdep *md; 247149375Sjkoshy 248147191Sjkoshy /* determine the CPU kind */ 249185341Sjkim if (cpu_vendor_id == CPU_VENDOR_AMD) 250149375Sjkoshy md = pmc_amd_initialize(); 251185341Sjkim else if (cpu_vendor_id == CPU_VENDOR_INTEL) 252149375Sjkoshy md = pmc_intel_initialize(); 253184802Sjkoshy else 254196224Sjhb return (NULL); 255149375Sjkoshy 256149375Sjkoshy /* disallow sampling if we do not have an LAPIC */ 257230636Semaste if (md != NULL && !lapic_enable_pmc()) 258233628Sfabient for (i = 0; i < md->pmd_nclass; i++) { 259233628Sfabient if (i == PMC_CLASS_INDEX_SOFT) 260233628Sfabient continue; 261184802Sjkoshy md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT; 262233628Sfabient } 263149375Sjkoshy 264184802Sjkoshy return (md); 265147191Sjkoshy} 266184802Sjkoshy 267184802Sjkoshyvoid 268184802Sjkoshypmc_md_finalize(struct pmc_mdep *md) 269184802Sjkoshy{ 270196224Sjhb 271196224Sjhb lapic_disable_pmc(); 272185341Sjkim if (cpu_vendor_id == CPU_VENDOR_AMD) 273184802Sjkoshy pmc_amd_finalize(md); 274185341Sjkim else if (cpu_vendor_id == CPU_VENDOR_INTEL) 275184802Sjkoshy pmc_intel_finalize(md); 276184802Sjkoshy else 277184802Sjkoshy KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__)); 278184802Sjkoshy} 279