mcount.c revision 34361
1/*- 2 * Copyright (c) 1983, 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34#if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS) 35#if 0 36static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93"; 37#endif 38static const char rcsid[] = 39 "$Id: mcount.c,v 1.9 1997/02/22 14:58:27 peter Exp $"; 40#endif 41 42#ifndef __NETBSD_SYSCALLS 43#include <sys/param.h> 44#include <sys/gmon.h> 45#ifdef KERNEL 46#include <sys/systm.h> 47#include <vm/vm.h> 48#include <vm/vm_param.h> 49#include <vm/pmap.h> 50void bintr __P((void)); 51void btrap __P((void)); 52void eintr __P((void)); 53void user __P((void)); 54#endif 55 56/* 57 * mcount is called on entry to each function compiled with the profiling 58 * switch set. _mcount(), which is declared in a machine-dependent way 59 * with _MCOUNT_DECL, does the actual work and is either inlined into a 60 * C routine or called by an assembly stub. In any case, this magic is 61 * taken care of by the MCOUNT definition in <machine/profile.h>. 62 * 63 * _mcount updates data structures that represent traversals of the 64 * program's call graph edges. frompc and selfpc are the return 65 * address and function address that represents the given call graph edge. 66 * 67 * Note: the original BSD code used the same variable (frompcindex) for 68 * both frompcindex and frompc. Any reasonable, modern compiler will 69 * perform this optimization. 70 */ 71_MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ 72 register fptrint_t frompc, selfpc; 73{ 74#ifdef GUPROF 75 u_int delta; 76#endif 77 register fptrdiff_t frompci; 78 register u_short *frompcindex; 79 register struct tostruct *top, *prevtop; 80 register struct gmonparam *p; 81 register long toindex; 82#ifdef KERNEL 83 MCOUNT_DECL(s) 84#endif 85 86 p = &_gmonparam; 87#ifndef GUPROF /* XXX */ 88 /* 89 * check that we are profiling 90 * and that we aren't recursively invoked. 91 */ 92 if (p->state != GMON_PROF_ON) 93 return; 94#endif 95#ifdef KERNEL 96 MCOUNT_ENTER(s); 97#else 98 p->state = GMON_PROF_BUSY; 99#endif 100 frompci = frompc - p->lowpc; 101 102#ifdef KERNEL 103 /* 104 * When we are called from an exception handler, frompci may be 105 * for a user address. Convert such frompci's to the index of 106 * user() to merge all user counts. 107 */ 108 if (frompci >= p->textsize) { 109 if (frompci + p->lowpc 110 >= (fptrint_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE)) 111 goto done; 112 frompci = (fptrint_t)user - p->lowpc; 113 if (frompci >= p->textsize) 114 goto done; 115 } 116#endif /* KERNEL */ 117 118#ifdef GUPROF 119 if (p->state != GMON_PROF_HIRES) 120 goto skip_guprof_stuff; 121 /* 122 * Look at the clock and add the count of clock cycles since the 123 * clock was last looked at to a counter for frompc. This 124 * solidifies the count for the function containing frompc and 125 * effectively starts another clock for the current function. 126 * The count for the new clock will be solidified when another 127 * function call is made or the function returns. 128 * 129 * We use the usual sampling counters since they can be located 130 * efficiently. 4-byte counters are usually necessary. 131 * 132 * There are many complications for subtracting the profiling 133 * overheads from the counts for normal functions and adding 134 * them to the counts for mcount(), mexitcount() and cputime(). 135 * We attempt to handle fractional cycles, but the overheads 136 * are usually underestimated because they are calibrated for 137 * a simpler than usual setup. 138 */ 139 delta = cputime() - p->mcount_overhead; 140 p->cputime_overhead_resid += p->cputime_overhead_frac; 141 p->mcount_overhead_resid += p->mcount_overhead_frac; 142 if ((int)delta < 0) 143 *p->mcount_count += delta + p->mcount_overhead 144 - p->cputime_overhead; 145 else if (delta != 0) { 146 if (p->cputime_overhead_resid >= CALIB_SCALE) { 147 p->cputime_overhead_resid -= CALIB_SCALE; 148 ++*p->cputime_count; 149 --delta; 150 } 151 if (delta != 0) { 152 if (p->mcount_overhead_resid >= CALIB_SCALE) { 153 p->mcount_overhead_resid -= CALIB_SCALE; 154 ++*p->mcount_count; 155 --delta; 156 } 157 KCOUNT(p, frompci) += delta; 158 } 159 *p->mcount_count += p->mcount_overhead_sub; 160 } 161 *p->cputime_count += p->cputime_overhead; 162skip_guprof_stuff: 163#endif /* GUPROF */ 164 165#ifdef KERNEL 166 /* 167 * When we are called from an exception handler, frompc is faked 168 * to be for where the exception occurred. We've just solidified 169 * the count for there. Now convert frompci to the index of btrap() 170 * for trap handlers and bintr() for interrupt handlers to make 171 * exceptions appear in the call graph as calls from btrap() and 172 * bintr() instead of calls from all over. 173 */ 174 if ((fptrint_t)selfpc >= (fptrint_t)btrap 175 && (fptrint_t)selfpc < (fptrint_t)eintr) { 176 if ((fptrint_t)selfpc >= (fptrint_t)bintr) 177 frompci = (fptrint_t)bintr - p->lowpc; 178 else 179 frompci = (fptrint_t)btrap - p->lowpc; 180 } 181#endif /* KERNEL */ 182 183 /* 184 * check that frompc is a reasonable pc value. 185 * for example: signal catchers get called from the stack, 186 * not from text space. too bad. 187 */ 188 if (frompci >= p->textsize) 189 goto done; 190 191 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))]; 192 toindex = *frompcindex; 193 if (toindex == 0) { 194 /* 195 * first time traversing this arc 196 */ 197 toindex = ++p->tos[0].link; 198 if (toindex >= p->tolimit) 199 /* halt further profiling */ 200 goto overflow; 201 202 *frompcindex = toindex; 203 top = &p->tos[toindex]; 204 top->selfpc = selfpc; 205 top->count = 1; 206 top->link = 0; 207 goto done; 208 } 209 top = &p->tos[toindex]; 210 if (top->selfpc == selfpc) { 211 /* 212 * arc at front of chain; usual case. 213 */ 214 top->count++; 215 goto done; 216 } 217 /* 218 * have to go looking down chain for it. 219 * top points to what we are looking at, 220 * prevtop points to previous top. 221 * we know it is not at the head of the chain. 222 */ 223 for (; /* goto done */; ) { 224 if (top->link == 0) { 225 /* 226 * top is end of the chain and none of the chain 227 * had top->selfpc == selfpc. 228 * so we allocate a new tostruct 229 * and link it to the head of the chain. 230 */ 231 toindex = ++p->tos[0].link; 232 if (toindex >= p->tolimit) 233 goto overflow; 234 235 top = &p->tos[toindex]; 236 top->selfpc = selfpc; 237 top->count = 1; 238 top->link = *frompcindex; 239 *frompcindex = toindex; 240 goto done; 241 } 242 /* 243 * otherwise, check the next arc on the chain. 244 */ 245 prevtop = top; 246 top = &p->tos[top->link]; 247 if (top->selfpc == selfpc) { 248 /* 249 * there it is. 250 * increment its count 251 * move it to the head of the chain. 252 */ 253 top->count++; 254 toindex = prevtop->link; 255 prevtop->link = top->link; 256 top->link = *frompcindex; 257 *frompcindex = toindex; 258 goto done; 259 } 260 261 } 262done: 263#ifdef KERNEL 264 MCOUNT_EXIT(s); 265#else 266 p->state = GMON_PROF_ON; 267#endif 268 return; 269overflow: 270 p->state = GMON_PROF_ERROR; 271#ifdef KERNEL 272 MCOUNT_EXIT(s); 273#endif 274 return; 275} 276 277/* 278 * Actual definition of mcount function. Defined in <machine/profile.h>, 279 * which is included by <sys/gmon.h>. 280 */ 281MCOUNT 282 283#ifdef GUPROF 284void 285mexitcount(selfpc) 286 fptrint_t selfpc; 287{ 288 struct gmonparam *p; 289 fptrint_t selfpcdiff; 290 291 p = &_gmonparam; 292 selfpcdiff = selfpc - (fptrint_t)p->lowpc; 293 if (selfpcdiff < p->textsize) { 294 u_int delta; 295 296 /* 297 * Solidify the count for the current function. 298 */ 299 delta = cputime() - p->mexitcount_overhead; 300 p->cputime_overhead_resid += p->cputime_overhead_frac; 301 p->mexitcount_overhead_resid += p->mexitcount_overhead_frac; 302 if ((int)delta < 0) 303 *p->mexitcount_count += delta + p->mexitcount_overhead 304 - p->cputime_overhead; 305 else if (delta != 0) { 306 if (p->cputime_overhead_resid >= CALIB_SCALE) { 307 p->cputime_overhead_resid -= CALIB_SCALE; 308 ++*p->cputime_count; 309 --delta; 310 } 311 if (delta != 0) { 312 if (p->mexitcount_overhead_resid 313 >= CALIB_SCALE) { 314 p->mexitcount_overhead_resid 315 -= CALIB_SCALE; 316 ++*p->mexitcount_count; 317 --delta; 318 } 319 KCOUNT(p, selfpcdiff) += delta; 320 } 321 *p->mexitcount_count += p->mexitcount_overhead_sub; 322 } 323 *p->cputime_count += p->cputime_overhead; 324 } 325} 326#endif /* GUPROF */ 327 328#endif 329