mcount.c revision 37629
1241823Smarcel/*- 2241823Smarcel * Copyright (c) 1983, 1992, 1993 3241823Smarcel * The Regents of the University of California. All rights reserved. 4241823Smarcel * 5241823Smarcel * Redistribution and use in source and binary forms, with or without 6241823Smarcel * modification, are permitted provided that the following conditions 7241823Smarcel * are met: 8241823Smarcel * 1. Redistributions of source code must retain the above copyright 9241823Smarcel * notice, this list of conditions and the following disclaimer. 10241823Smarcel * 2. Redistributions in binary form must reproduce the above copyright 11241823Smarcel * notice, this list of conditions and the following disclaimer in the 12241823Smarcel * documentation and/or other materials provided with the distribution. 13241823Smarcel * 3. All advertising materials mentioning features or use of this software 14241823Smarcel * must display the following acknowledgement: 15241823Smarcel * This product includes software developed by the University of 16241823Smarcel * California, Berkeley and its contributors. 17241823Smarcel * 4. Neither the name of the University nor the names of its contributors 18241823Smarcel * may be used to endorse or promote products derived from this software 19241823Smarcel * without specific prior written permission. 20241823Smarcel * 21241823Smarcel * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22241823Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23241823Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24241823Smarcel * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25241823Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26241823Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27241823Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28267181Sjmmv * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29241823Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30241823Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34#if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS) 35#if 0 36static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93"; 37#endif 38static const char rcsid[] = 39 "$Id: mcount.c,v 1.12 1997/02/22 09:39:55 peter Exp $"; 40#endif 41 42#include <sys/param.h> 43#include <sys/gmon.h> 44#ifdef KERNEL 45#ifndef GUPROF 46#include <sys/systm.h> 47#endif 48#include <vm/vm.h> 49#include <vm/vm_param.h> 50#include <vm/pmap.h> 51void bintr __P((void)); 52void btrap __P((void)); 53void eintr __P((void)); 54void user __P((void)); 55#endif 56 57/* 58 * mcount is called on entry to each function compiled with the profiling 59 * switch set. _mcount(), which is declared in a machine-dependent way 60 * with _MCOUNT_DECL, does the actual work and is either inlined into a 61 * C routine or called by an assembly stub. In any case, this magic is 62 * taken care of by the MCOUNT definition in <machine/profile.h>. 63 * 64 * _mcount updates data structures that represent traversals of the 65 * program's call graph edges. frompc and selfpc are the return 66 * address and function address that represents the given call graph edge. 67 * 68 * Note: the original BSD code used the same variable (frompcindex) for 69 * both frompcindex and frompc. Any reasonable, modern compiler will 70 * perform this optimization. 71 */ 72_MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ 73 register uintfptr_t frompc, selfpc; 74{ 75#ifdef GUPROF 76 int delta; 77#endif 78 register fptrdiff_t frompci; 79 register u_short *frompcindex; 80 register struct tostruct *top, *prevtop; 81 register struct gmonparam *p; 82 register long toindex; 83#ifdef KERNEL 84 MCOUNT_DECL(s) 85#endif 86 87 p = &_gmonparam; 88#ifndef GUPROF /* XXX */ 89 /* 90 * check that we are profiling 91 * and that we aren't recursively invoked. 92 */ 93 if (p->state != GMON_PROF_ON) 94 return; 95#endif 96#ifdef KERNEL 97 MCOUNT_ENTER(s); 98#else 99 p->state = GMON_PROF_BUSY; 100#endif 101 frompci = frompc - p->lowpc; 102 103#ifdef KERNEL 104 /* 105 * When we are called from an exception handler, frompci may be 106 * for a user address. Convert such frompci's to the index of 107 * user() to merge all user counts. 108 */ 109 if (frompci >= p->textsize) { 110 if (frompci + p->lowpc 111 >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE)) 112 goto done; 113 frompci = (uintfptr_t)user - p->lowpc; 114 if (frompci >= p->textsize) 115 goto done; 116 } 117#endif /* KERNEL */ 118 119#ifdef GUPROF 120 if (p->state == GMON_PROF_HIRES) { 121 /* 122 * Count the time since cputime() was previously called 123 * against `frompc'. Compensate for overheads. 124 * 125 * cputime() sets its prev_count variable to the count when 126 * it is called. This in effect starts a counter for 127 * the next period of execution (normally from now until 128 * the next call to mcount() or mexitcount()). We set 129 * cputime_bias to compensate for our own overhead. 130 * 131 * We use the usual sampling counters since they can be 132 * located efficiently. 4-byte counters are usually 133 * necessary. gprof will add up the scattered counts 134 * just like it does for statistical profiling. All 135 * counts are signed so that underflow in the subtractions 136 * doesn't matter much (negative counts are normally 137 * compensated for by larger counts elsewhere). Underflow 138 * shouldn't occur, but may be caused by slightly wrong 139 * calibrations or from not clearing cputime_bias. 140 */ 141 delta = cputime() - cputime_bias - p->mcount_pre_overhead; 142 cputime_bias = p->mcount_post_overhead; 143 KCOUNT(p, frompci) += delta; 144 *p->cputime_count += p->cputime_overhead; 145 *p->mcount_count += p->mcount_overhead; 146 } 147#endif /* GUPROF */ 148 149#ifdef KERNEL 150 /* 151 * When we are called from an exception handler, frompc is faked 152 * to be for where the exception occurred. We've just solidified 153 * the count for there. Now convert frompci to the index of btrap() 154 * for trap handlers and bintr() for interrupt handlers to make 155 * exceptions appear in the call graph as calls from btrap() and 156 * bintr() instead of calls from all over. 157 */ 158 if ((uintfptr_t)selfpc >= (uintfptr_t)btrap 159 && (uintfptr_t)selfpc < (uintfptr_t)eintr) { 160 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr) 161 frompci = (uintfptr_t)bintr - p->lowpc; 162 else 163 frompci = (uintfptr_t)btrap - p->lowpc; 164 } 165#endif /* KERNEL */ 166 167 /* 168 * check that frompc is a reasonable pc value. 169 * for example: signal catchers get called from the stack, 170 * not from text space. too bad. 171 */ 172 if (frompci >= p->textsize) 173 goto done; 174 175 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))]; 176 toindex = *frompcindex; 177 if (toindex == 0) { 178 /* 179 * first time traversing this arc 180 */ 181 toindex = ++p->tos[0].link; 182 if (toindex >= p->tolimit) 183 /* halt further profiling */ 184 goto overflow; 185 186 *frompcindex = toindex; 187 top = &p->tos[toindex]; 188 top->selfpc = selfpc; 189 top->count = 1; 190 top->link = 0; 191 goto done; 192 } 193 top = &p->tos[toindex]; 194 if (top->selfpc == selfpc) { 195 /* 196 * arc at front of chain; usual case. 197 */ 198 top->count++; 199 goto done; 200 } 201 /* 202 * have to go looking down chain for it. 203 * top points to what we are looking at, 204 * prevtop points to previous top. 205 * we know it is not at the head of the chain. 206 */ 207 for (; /* goto done */; ) { 208 if (top->link == 0) { 209 /* 210 * top is end of the chain and none of the chain 211 * had top->selfpc == selfpc. 212 * so we allocate a new tostruct 213 * and link it to the head of the chain. 214 */ 215 toindex = ++p->tos[0].link; 216 if (toindex >= p->tolimit) 217 goto overflow; 218 219 top = &p->tos[toindex]; 220 top->selfpc = selfpc; 221 top->count = 1; 222 top->link = *frompcindex; 223 *frompcindex = toindex; 224 goto done; 225 } 226 /* 227 * otherwise, check the next arc on the chain. 228 */ 229 prevtop = top; 230 top = &p->tos[top->link]; 231 if (top->selfpc == selfpc) { 232 /* 233 * there it is. 234 * increment its count 235 * move it to the head of the chain. 236 */ 237 top->count++; 238 toindex = prevtop->link; 239 prevtop->link = top->link; 240 top->link = *frompcindex; 241 *frompcindex = toindex; 242 goto done; 243 } 244 245 } 246done: 247#ifdef KERNEL 248 MCOUNT_EXIT(s); 249#else 250 p->state = GMON_PROF_ON; 251#endif 252 return; 253overflow: 254 p->state = GMON_PROF_ERROR; 255#ifdef KERNEL 256 MCOUNT_EXIT(s); 257#endif 258 return; 259} 260 261/* 262 * Actual definition of mcount function. Defined in <machine/profile.h>, 263 * which is included by <sys/gmon.h>. 264 */ 265MCOUNT 266 267#ifdef GUPROF 268void 269mexitcount(selfpc) 270 uintfptr_t selfpc; 271{ 272 struct gmonparam *p; 273 uintfptr_t selfpcdiff; 274 275 p = &_gmonparam; 276 selfpcdiff = selfpc - (uintfptr_t)p->lowpc; 277 if (selfpcdiff < p->textsize) { 278 int delta; 279 280 /* 281 * Count the time since cputime() was previously called 282 * against `selfpc'. Compensate for overheads. 283 */ 284 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead; 285 cputime_bias = p->mexitcount_post_overhead; 286 KCOUNT(p, selfpcdiff) += delta; 287 *p->cputime_count += p->cputime_overhead; 288 *p->mexitcount_count += p->mexitcount_overhead; 289 } 290} 291 292void 293empty_loop() 294{ 295 int i; 296 297 for (i = 0; i < CALIB_SCALE; i++) 298 ; 299} 300 301void 302nullfunc() 303{ 304} 305 306void 307nullfunc_loop() 308{ 309 int i; 310 311 for (i = 0; i < CALIB_SCALE; i++) 312 nullfunc(); 313} 314#endif /* GUPROF */ 315