1/*-
2 * Copyright (c) 1983, 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#if !defined(_KERNEL) && defined(LIBC_SCCS) && !defined(lint)
31static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
32#endif
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include <sys/param.h>
37#include <sys/gmon.h>
38#ifdef _KERNEL
39#include <sys/systm.h>
40#include <vm/vm.h>
41#include <vm/vm_param.h>
42#include <vm/pmap.h>
43void	bintr(void);
44void	btrap(void);
45void	eintr(void);
46void	user(void);
47#endif
48#include <machine/atomic.h>
49
50/*
51 * mcount is called on entry to each function compiled with the profiling
52 * switch set.  _mcount(), which is declared in a machine-dependent way
53 * with _MCOUNT_DECL, does the actual work and is either inlined into a
54 * C routine or called by an assembly stub.  In any case, this magic is
55 * taken care of by the MCOUNT definition in <machine/profile.h>.
56 *
57 * _mcount updates data structures that represent traversals of the
58 * program's call graph edges.  frompc and selfpc are the return
59 * address and function address that represents the given call graph edge.
60 *
61 * Note: the original BSD code used the same variable (frompcindex) for
62 * both frompcindex and frompc.  Any reasonable, modern compiler will
63 * perform this optimization.
64 */
65/* _mcount; may be static, inline, etc */
66_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
67{
68#ifdef GUPROF
69	u_int delta;
70#endif
71	fptrdiff_t frompci;
72	u_short *frompcindex;
73	struct tostruct *top, *prevtop;
74	struct gmonparam *p;
75	long toindex;
76#ifdef _KERNEL
77	MCOUNT_DECL(s)
78#endif
79
80	p = &_gmonparam;
81#ifndef GUPROF			/* XXX */
82	/*
83	 * check that we are profiling
84	 * and that we aren't recursively invoked.
85	 */
86	if (p->state != GMON_PROF_ON)
87		return;
88#endif
89#ifdef _KERNEL
90	MCOUNT_ENTER(s);
91#else
92	if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
93		return;
94#endif
95	frompci = frompc - p->lowpc;
96
97#ifdef _KERNEL
98	/*
99	 * When we are called from an exception handler, frompci may be
100	 * for a user address.  Convert such frompci's to the index of
101	 * user() to merge all user counts.
102	 */
103	if (frompci >= p->textsize) {
104		if (frompci + p->lowpc
105		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
106			goto done;
107		frompci = (uintfptr_t)user - p->lowpc;
108		if (frompci >= p->textsize)
109		    goto done;
110	}
111#endif
112
113#ifdef GUPROF
114	if (p->state != GMON_PROF_HIRES)
115		goto skip_guprof_stuff;
116	/*
117	 * Look at the clock and add the count of clock cycles since the
118	 * clock was last looked at to a counter for frompc.  This
119	 * solidifies the count for the function containing frompc and
120	 * effectively starts another clock for the current function.
121	 * The count for the new clock will be solidified when another
122	 * function call is made or the function returns.
123	 *
124	 * We use the usual sampling counters since they can be located
125	 * efficiently.  4-byte counters are usually necessary.
126	 *
127	 * There are many complications for subtracting the profiling
128	 * overheads from the counts for normal functions and adding
129	 * them to the counts for mcount(), mexitcount() and cputime().
130	 * We attempt to handle fractional cycles, but the overheads
131	 * are usually underestimated because they are calibrated for
132	 * a simpler than usual setup.
133	 */
134	delta = cputime() - p->mcount_overhead;
135	p->cputime_overhead_resid += p->cputime_overhead_frac;
136	p->mcount_overhead_resid += p->mcount_overhead_frac;
137	if ((int)delta < 0)
138		*p->mcount_count += delta + p->mcount_overhead
139				    - p->cputime_overhead;
140	else if (delta != 0) {
141		if (p->cputime_overhead_resid >= CALIB_SCALE) {
142			p->cputime_overhead_resid -= CALIB_SCALE;
143			++*p->cputime_count;
144			--delta;
145		}
146		if (delta != 0) {
147			if (p->mcount_overhead_resid >= CALIB_SCALE) {
148				p->mcount_overhead_resid -= CALIB_SCALE;
149				++*p->mcount_count;
150				--delta;
151			}
152			KCOUNT(p, frompci) += delta;
153		}
154		*p->mcount_count += p->mcount_overhead_sub;
155	}
156	*p->cputime_count += p->cputime_overhead;
157skip_guprof_stuff:
158#endif /* GUPROF */
159
160#ifdef _KERNEL
161	/*
162	 * When we are called from an exception handler, frompc is faked
163	 * to be for where the exception occurred.  We've just solidified
164	 * the count for there.  Now convert frompci to the index of btrap()
165	 * for trap handlers and bintr() for interrupt handlers to make
166	 * exceptions appear in the call graph as calls from btrap() and
167	 * bintr() instead of calls from all over.
168	 */
169	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
170	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
171		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
172			frompci = (uintfptr_t)bintr - p->lowpc;
173		else
174			frompci = (uintfptr_t)btrap - p->lowpc;
175	}
176#endif
177
178	/*
179	 * check that frompc is a reasonable pc value.
180	 * for example:	signal catchers get called from the stack,
181	 *		not from text space.  too bad.
182	 */
183	if (frompci >= p->textsize)
184		goto done;
185
186	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
187	toindex = *frompcindex;
188	if (toindex == 0) {
189		/*
190		 *	first time traversing this arc
191		 */
192		toindex = ++p->tos[0].link;
193		if (toindex >= p->tolimit)
194			/* halt further profiling */
195			goto overflow;
196
197		*frompcindex = toindex;
198		top = &p->tos[toindex];
199		top->selfpc = selfpc;
200		top->count = 1;
201		top->link = 0;
202		goto done;
203	}
204	top = &p->tos[toindex];
205	if (top->selfpc == selfpc) {
206		/*
207		 * arc at front of chain; usual case.
208		 */
209		top->count++;
210		goto done;
211	}
212	/*
213	 * have to go looking down chain for it.
214	 * top points to what we are looking at,
215	 * prevtop points to previous top.
216	 * we know it is not at the head of the chain.
217	 */
218	for (; /* goto done */; ) {
219		if (top->link == 0) {
220			/*
221			 * top is end of the chain and none of the chain
222			 * had top->selfpc == selfpc.
223			 * so we allocate a new tostruct
224			 * and link it to the head of the chain.
225			 */
226			toindex = ++p->tos[0].link;
227			if (toindex >= p->tolimit)
228				goto overflow;
229
230			top = &p->tos[toindex];
231			top->selfpc = selfpc;
232			top->count = 1;
233			top->link = *frompcindex;
234			*frompcindex = toindex;
235			goto done;
236		}
237		/*
238		 * otherwise, check the next arc on the chain.
239		 */
240		prevtop = top;
241		top = &p->tos[top->link];
242		if (top->selfpc == selfpc) {
243			/*
244			 * there it is.
245			 * increment its count
246			 * move it to the head of the chain.
247			 */
248			top->count++;
249			toindex = prevtop->link;
250			prevtop->link = top->link;
251			top->link = *frompcindex;
252			*frompcindex = toindex;
253			goto done;
254		}
255
256	}
257done:
258#ifdef _KERNEL
259	MCOUNT_EXIT(s);
260#else
261	atomic_store_rel_int(&p->state, GMON_PROF_ON);
262#endif
263	return;
264overflow:
265	atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
266#ifdef _KERNEL
267	MCOUNT_EXIT(s);
268#endif
269	return;
270}
271
272/*
273 * Actual definition of mcount function.  Defined in <machine/profile.h>,
274 * which is included by <sys/gmon.h>.
275 */
276MCOUNT
277
278#ifdef GUPROF
279void
280mexitcount(selfpc)
281	uintfptr_t selfpc;
282{
283	struct gmonparam *p;
284	uintfptr_t selfpcdiff;
285
286	p = &_gmonparam;
287	selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
288	if (selfpcdiff < p->textsize) {
289		u_int delta;
290
291		/*
292		 * Solidify the count for the current function.
293		 */
294		delta = cputime() - p->mexitcount_overhead;
295		p->cputime_overhead_resid += p->cputime_overhead_frac;
296		p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
297		if ((int)delta < 0)
298			*p->mexitcount_count += delta + p->mexitcount_overhead
299						- p->cputime_overhead;
300		else if (delta != 0) {
301			if (p->cputime_overhead_resid >= CALIB_SCALE) {
302				p->cputime_overhead_resid -= CALIB_SCALE;
303				++*p->cputime_count;
304				--delta;
305			}
306			if (delta != 0) {
307				if (p->mexitcount_overhead_resid
308				    >= CALIB_SCALE) {
309					p->mexitcount_overhead_resid
310					    -= CALIB_SCALE;
311					++*p->mexitcount_count;
312					--delta;
313				}
314				KCOUNT(p, selfpcdiff) += delta;
315			}
316			*p->mexitcount_count += p->mexitcount_overhead_sub;
317		}
318		*p->cputime_count += p->cputime_overhead;
319	}
320}
321#endif /* GUPROF */
322