mcount.c revision 116189
1/*-
2 * Copyright (c) 1983, 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/libkern/mcount.c 116189 2003-06-11 05:37:42Z obrien $");
36
37#include <sys/param.h>
38#include <sys/gmon.h>
39#ifdef _KERNEL
40#ifndef GUPROF
41#include <sys/systm.h>
42#endif
43#include <vm/vm.h>
44#include <vm/vm_param.h>
45#include <vm/pmap.h>
46void	bintr(void);
47void	btrap(void);
48void	eintr(void);
49void	user(void);
50#endif
51
52/*
53 * mcount is called on entry to each function compiled with the profiling
54 * switch set.  _mcount(), which is declared in a machine-dependent way
55 * with _MCOUNT_DECL, does the actual work and is either inlined into a
56 * C routine or called by an assembly stub.  In any case, this magic is
57 * taken care of by the MCOUNT definition in <machine/profile.h>.
58 *
59 * _mcount updates data structures that represent traversals of the
60 * program's call graph edges.  frompc and selfpc are the return
61 * address and function address that represents the given call graph edge.
62 *
63 * Note: the original BSD code used the same variable (frompcindex) for
64 * both frompcindex and frompc.  Any reasonable, modern compiler will
65 * perform this optimization.
66 */
67_MCOUNT_DECL(frompc, selfpc)	/* _mcount; may be static, inline, etc */
68	register uintfptr_t frompc, selfpc;
69{
70#ifdef GUPROF
71	int delta;
72#endif
73	register fptrdiff_t frompci;
74	register u_short *frompcindex;
75	register struct tostruct *top, *prevtop;
76	register struct gmonparam *p;
77	register long toindex;
78#ifdef _KERNEL
79	MCOUNT_DECL(s)
80#endif
81
82	p = &_gmonparam;
83#ifndef GUPROF			/* XXX */
84	/*
85	 * check that we are profiling
86	 * and that we aren't recursively invoked.
87	 */
88	if (p->state != GMON_PROF_ON)
89		return;
90#endif
91#ifdef _KERNEL
92	MCOUNT_ENTER(s);
93#else
94	p->state = GMON_PROF_BUSY;
95#endif
96	frompci = frompc - p->lowpc;
97
98#ifdef _KERNEL
99	/*
100	 * When we are called from an exception handler, frompci may be
101	 * for a user address.  Convert such frompci's to the index of
102	 * user() to merge all user counts.
103	 */
104	if (frompci >= p->textsize) {
105		if (frompci + p->lowpc
106		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS))
107			goto done;
108		frompci = (uintfptr_t)user - p->lowpc;
109		if (frompci >= p->textsize)
110		    goto done;
111	}
112#endif
113
114#ifdef GUPROF
115	if (p->state == GMON_PROF_HIRES) {
116		/*
117		 * Count the time since cputime() was previously called
118		 * against `frompc'.  Compensate for overheads.
119		 *
120		 * cputime() sets its prev_count variable to the count when
121		 * it is called.  This in effect starts a counter for
122		 * the next period of execution (normally from now until
123		 * the next call to mcount() or mexitcount()).  We set
124		 * cputime_bias to compensate for our own overhead.
125		 *
126		 * We use the usual sampling counters since they can be
127		 * located efficiently.  4-byte counters are usually
128		 * necessary.  gprof will add up the scattered counts
129		 * just like it does for statistical profiling.  All
130		 * counts are signed so that underflow in the subtractions
131		 * doesn't matter much (negative counts are normally
132		 * compensated for by larger counts elsewhere).  Underflow
133		 * shouldn't occur, but may be caused by slightly wrong
134		 * calibrations or from not clearing cputime_bias.
135		 */
136		delta = cputime() - cputime_bias - p->mcount_pre_overhead;
137		cputime_bias = p->mcount_post_overhead;
138		KCOUNT(p, frompci) += delta;
139		*p->cputime_count += p->cputime_overhead;
140		*p->mcount_count += p->mcount_overhead;
141	}
142#endif /* GUPROF */
143
144#ifdef _KERNEL
145	/*
146	 * When we are called from an exception handler, frompc is faked
147	 * to be for where the exception occurred.  We've just solidified
148	 * the count for there.  Now convert frompci to the index of btrap()
149	 * for trap handlers and bintr() for interrupt handlers to make
150	 * exceptions appear in the call graph as calls from btrap() and
151	 * bintr() instead of calls from all over.
152	 */
153	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
154	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
155		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
156			frompci = (uintfptr_t)bintr - p->lowpc;
157		else
158			frompci = (uintfptr_t)btrap - p->lowpc;
159	}
160#endif
161
162	/*
163	 * check that frompc is a reasonable pc value.
164	 * for example:	signal catchers get called from the stack,
165	 *		not from text space.  too bad.
166	 */
167	if (frompci >= p->textsize)
168		goto done;
169
170	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
171	toindex = *frompcindex;
172	if (toindex == 0) {
173		/*
174		 *	first time traversing this arc
175		 */
176		toindex = ++p->tos[0].link;
177		if (toindex >= p->tolimit)
178			/* halt further profiling */
179			goto overflow;
180
181		*frompcindex = toindex;
182		top = &p->tos[toindex];
183		top->selfpc = selfpc;
184		top->count = 1;
185		top->link = 0;
186		goto done;
187	}
188	top = &p->tos[toindex];
189	if (top->selfpc == selfpc) {
190		/*
191		 * arc at front of chain; usual case.
192		 */
193		top->count++;
194		goto done;
195	}
196	/*
197	 * have to go looking down chain for it.
198	 * top points to what we are looking at,
199	 * prevtop points to previous top.
200	 * we know it is not at the head of the chain.
201	 */
202	for (; /* goto done */; ) {
203		if (top->link == 0) {
204			/*
205			 * top is end of the chain and none of the chain
206			 * had top->selfpc == selfpc.
207			 * so we allocate a new tostruct
208			 * and link it to the head of the chain.
209			 */
210			toindex = ++p->tos[0].link;
211			if (toindex >= p->tolimit)
212				goto overflow;
213
214			top = &p->tos[toindex];
215			top->selfpc = selfpc;
216			top->count = 1;
217			top->link = *frompcindex;
218			*frompcindex = toindex;
219			goto done;
220		}
221		/*
222		 * otherwise, check the next arc on the chain.
223		 */
224		prevtop = top;
225		top = &p->tos[top->link];
226		if (top->selfpc == selfpc) {
227			/*
228			 * there it is.
229			 * increment its count
230			 * move it to the head of the chain.
231			 */
232			top->count++;
233			toindex = prevtop->link;
234			prevtop->link = top->link;
235			top->link = *frompcindex;
236			*frompcindex = toindex;
237			goto done;
238		}
239
240	}
241done:
242#ifdef _KERNEL
243	MCOUNT_EXIT(s);
244#else
245	p->state = GMON_PROF_ON;
246#endif
247	return;
248overflow:
249	p->state = GMON_PROF_ERROR;
250#ifdef _KERNEL
251	MCOUNT_EXIT(s);
252#endif
253	return;
254}
255
256/*
257 * Actual definition of mcount function.  Defined in <machine/profile.h>,
258 * which is included by <sys/gmon.h>.
259 */
260MCOUNT
261
262#ifdef GUPROF
263void
264mexitcount(selfpc)
265	uintfptr_t selfpc;
266{
267	struct gmonparam *p;
268	uintfptr_t selfpcdiff;
269
270	p = &_gmonparam;
271	selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
272	if (selfpcdiff < p->textsize) {
273		int delta;
274
275		/*
276		 * Count the time since cputime() was previously called
277		 * against `selfpc'.  Compensate for overheads.
278		 */
279		delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
280		cputime_bias = p->mexitcount_post_overhead;
281		KCOUNT(p, selfpcdiff) += delta;
282		*p->cputime_count += p->cputime_overhead;
283		*p->mexitcount_count += p->mexitcount_overhead;
284	}
285}
286
287void
288empty_loop()
289{
290	int i;
291
292	for (i = 0; i < CALIB_SCALE; i++)
293		;
294}
295
296void
297nullfunc()
298{
299}
300
301void
302nullfunc_loop()
303{
304	int i;
305
306	for (i = 0; i < CALIB_SCALE; i++)
307		nullfunc();
308}
309#endif /* GUPROF */
310