mcount.c revision 55206
113116Sbde/*- 213116Sbde * Copyright (c) 1983, 1992, 1993 313116Sbde * The Regents of the University of California. All rights reserved. 413116Sbde * 513116Sbde * Redistribution and use in source and binary forms, with or without 613116Sbde * modification, are permitted provided that the following conditions 713116Sbde * are met: 813116Sbde * 1. Redistributions of source code must retain the above copyright 913116Sbde * notice, this list of conditions and the following disclaimer. 1013116Sbde * 2. Redistributions in binary form must reproduce the above copyright 1113116Sbde * notice, this list of conditions and the following disclaimer in the 1213116Sbde * documentation and/or other materials provided with the distribution. 1313116Sbde * 3. All advertising materials mentioning features or use of this software 1413116Sbde * must display the following acknowledgement: 1513116Sbde * This product includes software developed by the University of 1613116Sbde * California, Berkeley and its contributors. 1713116Sbde * 4. Neither the name of the University nor the names of its contributors 1813116Sbde * may be used to endorse or promote products derived from this software 1913116Sbde * without specific prior written permission. 2013116Sbde * 2113116Sbde * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2213116Sbde * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2313116Sbde * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2413116Sbde * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2513116Sbde * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2613116Sbde * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2713116Sbde * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2813116Sbde * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2913116Sbde * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3013116Sbde * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3113116Sbde * SUCH DAMAGE. 3238409Sbde * 3350477Speter * $FreeBSD: head/sys/libkern/mcount.c 55206 1999-12-29 05:07:58Z peter $ 3413116Sbde */ 351541Srgrimes 3613116Sbde#include <sys/param.h> 3713116Sbde#include <sys/gmon.h> 3855206Speter#ifdef _KERNEL 3919169Sbde#ifndef GUPROF 4019169Sbde#include <sys/systm.h> 4119169Sbde#endif 4213116Sbde#include <vm/vm.h> 4313116Sbde#include <vm/vm_param.h> 4413116Sbde#include <vm/pmap.h> 4513116Sbdevoid bintr __P((void)); 4613116Sbdevoid btrap __P((void)); 4713116Sbdevoid eintr __P((void)); 4813116Sbdevoid user __P((void)); 4913116Sbde#endif 5013116Sbde 5113116Sbde/* 5213116Sbde * mcount is called on entry to each function compiled with the profiling 5313116Sbde * switch set. _mcount(), which is declared in a machine-dependent way 5413116Sbde * with _MCOUNT_DECL, does the actual work and is either inlined into a 5513116Sbde * C routine or called by an assembly stub. In any case, this magic is 5613116Sbde * taken care of by the MCOUNT definition in <machine/profile.h>. 5713116Sbde * 5813116Sbde * _mcount updates data structures that represent traversals of the 5913116Sbde * program's call graph edges. frompc and selfpc are the return 6013116Sbde * address and function address that represents the given call graph edge. 6113116Sbde * 6213116Sbde * Note: the original BSD code used the same variable (frompcindex) for 6313116Sbde * both frompcindex and frompc. Any reasonable, modern compiler will 6413116Sbde * perform this optimization. 6513116Sbde */ 6613116Sbde_MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ 6737629Sbde register uintfptr_t frompc, selfpc; 6813116Sbde{ 6913116Sbde#ifdef GUPROF 7019000Sbde int delta; 7113116Sbde#endif 7213116Sbde register fptrdiff_t frompci; 7313116Sbde register u_short *frompcindex; 7413116Sbde register struct tostruct *top, *prevtop; 7513116Sbde register struct gmonparam *p; 7613116Sbde register long toindex; 7755206Speter#ifdef _KERNEL 7817879Sbde MCOUNT_DECL(s) 7913116Sbde#endif 8013116Sbde 8113116Sbde p = &_gmonparam; 8213116Sbde#ifndef GUPROF /* XXX */ 8313116Sbde /* 8413116Sbde * check that we are profiling 8513116Sbde * and that we aren't recursively invoked. 8613116Sbde */ 8713116Sbde if (p->state != GMON_PROF_ON) 8813116Sbde return; 8913116Sbde#endif 9055206Speter#ifdef _KERNEL 9117879Sbde MCOUNT_ENTER(s); 9213116Sbde#else 9313116Sbde p->state = GMON_PROF_BUSY; 9413116Sbde#endif 9513116Sbde frompci = frompc - p->lowpc; 9613116Sbde 9755206Speter#ifdef _KERNEL 9813116Sbde /* 9913116Sbde * When we are called from an exception handler, frompci may be 10013116Sbde * for a user address. Convert such frompci's to the index of 10113116Sbde * user() to merge all user counts. 10213116Sbde */ 10313116Sbde if (frompci >= p->textsize) { 10413116Sbde if (frompci + p->lowpc 10537629Sbde >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE)) 10613116Sbde goto done; 10737629Sbde frompci = (uintfptr_t)user - p->lowpc; 10813116Sbde if (frompci >= p->textsize) 10913116Sbde goto done; 11013116Sbde } 11155206Speter#endif 11213116Sbde 11313116Sbde#ifdef GUPROF 11419000Sbde if (p->state == GMON_PROF_HIRES) { 11519000Sbde /* 11619000Sbde * Count the time since cputime() was previously called 11719000Sbde * against `frompc'. Compensate for overheads. 11819000Sbde * 11919000Sbde * cputime() sets its prev_count variable to the count when 12019000Sbde * it is called. This in effect starts a counter for 12119000Sbde * the next period of execution (normally from now until 12219000Sbde * the next call to mcount() or mexitcount()). We set 12319000Sbde * cputime_bias to compensate for our own overhead. 12419000Sbde * 12519000Sbde * We use the usual sampling counters since they can be 12619000Sbde * located efficiently. 4-byte counters are usually 12719000Sbde * necessary. gprof will add up the scattered counts 12819000Sbde * just like it does for statistical profiling. All 12919000Sbde * counts are signed so that underflow in the subtractions 13019000Sbde * doesn't matter much (negative counts are normally 13119000Sbde * compensated for by larger counts elsewhere). Underflow 13219000Sbde * shouldn't occur, but may be caused by slightly wrong 13319000Sbde * calibrations or from not clearing cputime_bias. 13419000Sbde */ 13519000Sbde delta = cputime() - cputime_bias - p->mcount_pre_overhead; 13619000Sbde cputime_bias = p->mcount_post_overhead; 13719000Sbde KCOUNT(p, frompci) += delta; 13819000Sbde *p->cputime_count += p->cputime_overhead; 13919000Sbde *p->mcount_count += p->mcount_overhead; 14013116Sbde } 14113116Sbde#endif /* GUPROF */ 14213116Sbde 14355206Speter#ifdef _KERNEL 14413116Sbde /* 14513116Sbde * When we are called from an exception handler, frompc is faked 14613116Sbde * to be for where the exception occurred. We've just solidified 14713116Sbde * the count for there. Now convert frompci to the index of btrap() 14813116Sbde * for trap handlers and bintr() for interrupt handlers to make 14913116Sbde * exceptions appear in the call graph as calls from btrap() and 15013116Sbde * bintr() instead of calls from all over. 15113116Sbde */ 15237629Sbde if ((uintfptr_t)selfpc >= (uintfptr_t)btrap 15337629Sbde && (uintfptr_t)selfpc < (uintfptr_t)eintr) { 15437629Sbde if ((uintfptr_t)selfpc >= (uintfptr_t)bintr) 15537629Sbde frompci = (uintfptr_t)bintr - p->lowpc; 15613116Sbde else 15737629Sbde frompci = (uintfptr_t)btrap - p->lowpc; 15813116Sbde } 15955206Speter#endif 16013116Sbde 16113116Sbde /* 16213116Sbde * check that frompc is a reasonable pc value. 16313116Sbde * for example: signal catchers get called from the stack, 16413116Sbde * not from text space. too bad. 16513116Sbde */ 16613116Sbde if (frompci >= p->textsize) 16713116Sbde goto done; 16813116Sbde 16913116Sbde frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))]; 17013116Sbde toindex = *frompcindex; 17113116Sbde if (toindex == 0) { 17213116Sbde /* 17313116Sbde * first time traversing this arc 17413116Sbde */ 17513116Sbde toindex = ++p->tos[0].link; 17613116Sbde if (toindex >= p->tolimit) 17713116Sbde /* halt further profiling */ 17813116Sbde goto overflow; 17913116Sbde 18013116Sbde *frompcindex = toindex; 18113116Sbde top = &p->tos[toindex]; 18213116Sbde top->selfpc = selfpc; 18313116Sbde top->count = 1; 18413116Sbde top->link = 0; 18513116Sbde goto done; 18613116Sbde } 18713116Sbde top = &p->tos[toindex]; 18813116Sbde if (top->selfpc == selfpc) { 18913116Sbde /* 19013116Sbde * arc at front of chain; usual case. 19113116Sbde */ 19213116Sbde top->count++; 19313116Sbde goto done; 19413116Sbde } 19513116Sbde /* 19613116Sbde * have to go looking down chain for it. 19713116Sbde * top points to what we are looking at, 19813116Sbde * prevtop points to previous top. 19913116Sbde * we know it is not at the head of the chain. 20013116Sbde */ 20113116Sbde for (; /* goto done */; ) { 20213116Sbde if (top->link == 0) { 20313116Sbde /* 20413116Sbde * top is end of the chain and none of the chain 20513116Sbde * had top->selfpc == selfpc. 20613116Sbde * so we allocate a new tostruct 20713116Sbde * and link it to the head of the chain. 20813116Sbde */ 20913116Sbde toindex = ++p->tos[0].link; 21013116Sbde if (toindex >= p->tolimit) 21113116Sbde goto overflow; 21213116Sbde 21313116Sbde top = &p->tos[toindex]; 21413116Sbde top->selfpc = selfpc; 21513116Sbde top->count = 1; 21613116Sbde top->link = *frompcindex; 21713116Sbde *frompcindex = toindex; 21813116Sbde goto done; 21913116Sbde } 22013116Sbde /* 22113116Sbde * otherwise, check the next arc on the chain. 22213116Sbde */ 22313116Sbde prevtop = top; 22413116Sbde top = &p->tos[top->link]; 22513116Sbde if (top->selfpc == selfpc) { 22613116Sbde /* 22713116Sbde * there it is. 22813116Sbde * increment its count 22913116Sbde * move it to the head of the chain. 23013116Sbde */ 23113116Sbde top->count++; 23213116Sbde toindex = prevtop->link; 23313116Sbde prevtop->link = top->link; 23413116Sbde top->link = *frompcindex; 23513116Sbde *frompcindex = toindex; 23613116Sbde goto done; 23713116Sbde } 23813116Sbde 23913116Sbde } 24013116Sbdedone: 24155206Speter#ifdef _KERNEL 24217879Sbde MCOUNT_EXIT(s); 24313116Sbde#else 24413116Sbde p->state = GMON_PROF_ON; 24513116Sbde#endif 24613116Sbde return; 24713116Sbdeoverflow: 24813116Sbde p->state = GMON_PROF_ERROR; 24955206Speter#ifdef _KERNEL 25017879Sbde MCOUNT_EXIT(s); 25113116Sbde#endif 25213116Sbde return; 25313116Sbde} 25413116Sbde 25513116Sbde/* 25613116Sbde * Actual definition of mcount function. Defined in <machine/profile.h>, 25713116Sbde * which is included by <sys/gmon.h>. 25813116Sbde */ 25913116SbdeMCOUNT 26013116Sbde 26113116Sbde#ifdef GUPROF 26213116Sbdevoid 26313116Sbdemexitcount(selfpc) 26437629Sbde uintfptr_t selfpc; 26513116Sbde{ 26613116Sbde struct gmonparam *p; 26737629Sbde uintfptr_t selfpcdiff; 26813116Sbde 26913116Sbde p = &_gmonparam; 27037629Sbde selfpcdiff = selfpc - (uintfptr_t)p->lowpc; 27113116Sbde if (selfpcdiff < p->textsize) { 27219000Sbde int delta; 27313116Sbde 27413116Sbde /* 27519000Sbde * Count the time since cputime() was previously called 27619000Sbde * against `selfpc'. Compensate for overheads. 27713116Sbde */ 27819000Sbde delta = cputime() - cputime_bias - p->mexitcount_pre_overhead; 27919000Sbde cputime_bias = p->mexitcount_post_overhead; 28019000Sbde KCOUNT(p, selfpcdiff) += delta; 28113116Sbde *p->cputime_count += p->cputime_overhead; 28219000Sbde *p->mexitcount_count += p->mexitcount_overhead; 28313116Sbde } 28413116Sbde} 28519000Sbde 28619000Sbdevoid 28719000Sbdeempty_loop() 28819000Sbde{ 28919000Sbde int i; 29019000Sbde 29119000Sbde for (i = 0; i < CALIB_SCALE; i++) 29219000Sbde ; 29319000Sbde} 29419000Sbde 29519000Sbdevoid 29619000Sbdenullfunc() 29719000Sbde{ 29819000Sbde} 29919000Sbde 30019000Sbdevoid 30119000Sbdenullfunc_loop() 30219000Sbde{ 30319000Sbde int i; 30419000Sbde 30519000Sbde for (i = 0; i < CALIB_SCALE; i++) 30619000Sbde nullfunc(); 30719000Sbde} 30813116Sbde#endif /* GUPROF */ 309