mcount.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1983, 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#if !defined(_KERNEL) && defined(LIBC_SCCS) && !defined(lint)
33static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
34#endif
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/lib/libc/gmon/mcount.c 330897 2018-03-14 03:19:51Z eadler $");
37
38#include <sys/param.h>
39#include <sys/gmon.h>
40#ifdef _KERNEL
41#include <sys/systm.h>
42#include <vm/vm.h>
43#include <vm/vm_param.h>
44#include <vm/pmap.h>
45void	bintr(void);
46void	btrap(void);
47void	eintr(void);
48void	user(void);
49#endif
50#include <machine/atomic.h>
51
52/*
53 * mcount is called on entry to each function compiled with the profiling
54 * switch set.  _mcount(), which is declared in a machine-dependent way
55 * with _MCOUNT_DECL, does the actual work and is either inlined into a
56 * C routine or called by an assembly stub.  In any case, this magic is
57 * taken care of by the MCOUNT definition in <machine/profile.h>.
58 *
59 * _mcount updates data structures that represent traversals of the
60 * program's call graph edges.  frompc and selfpc are the return
61 * address and function address that represents the given call graph edge.
62 *
63 * Note: the original BSD code used the same variable (frompcindex) for
64 * both frompcindex and frompc.  Any reasonable, modern compiler will
65 * perform this optimization.
66 */
67/* _mcount; may be static, inline, etc */
68_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
69{
70#ifdef GUPROF
71	u_int delta;
72#endif
73	fptrdiff_t frompci;
74	u_short *frompcindex;
75	struct tostruct *top, *prevtop;
76	struct gmonparam *p;
77	long toindex;
78#ifdef _KERNEL
79	MCOUNT_DECL(s)
80#endif
81
82	p = &_gmonparam;
83#ifndef GUPROF			/* XXX */
84	/*
85	 * check that we are profiling
86	 * and that we aren't recursively invoked.
87	 */
88	if (p->state != GMON_PROF_ON)
89		return;
90#endif
91#ifdef _KERNEL
92	MCOUNT_ENTER(s);
93#else
94	if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
95		return;
96#endif
97	frompci = frompc - p->lowpc;
98
99#ifdef _KERNEL
100	/*
101	 * When we are called from an exception handler, frompci may be
102	 * for a user address.  Convert such frompci's to the index of
103	 * user() to merge all user counts.
104	 */
105	if (frompci >= p->textsize) {
106		if (frompci + p->lowpc
107		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
108			goto done;
109		frompci = (uintfptr_t)user - p->lowpc;
110		if (frompci >= p->textsize)
111		    goto done;
112	}
113#endif
114
115#ifdef GUPROF
116	if (p->state != GMON_PROF_HIRES)
117		goto skip_guprof_stuff;
118	/*
119	 * Look at the clock and add the count of clock cycles since the
120	 * clock was last looked at to a counter for frompc.  This
121	 * solidifies the count for the function containing frompc and
122	 * effectively starts another clock for the current function.
123	 * The count for the new clock will be solidified when another
124	 * function call is made or the function returns.
125	 *
126	 * We use the usual sampling counters since they can be located
127	 * efficiently.  4-byte counters are usually necessary.
128	 *
129	 * There are many complications for subtracting the profiling
130	 * overheads from the counts for normal functions and adding
131	 * them to the counts for mcount(), mexitcount() and cputime().
132	 * We attempt to handle fractional cycles, but the overheads
133	 * are usually underestimated because they are calibrated for
134	 * a simpler than usual setup.
135	 */
136	delta = cputime() - p->mcount_overhead;
137	p->cputime_overhead_resid += p->cputime_overhead_frac;
138	p->mcount_overhead_resid += p->mcount_overhead_frac;
139	if ((int)delta < 0)
140		*p->mcount_count += delta + p->mcount_overhead
141				    - p->cputime_overhead;
142	else if (delta != 0) {
143		if (p->cputime_overhead_resid >= CALIB_SCALE) {
144			p->cputime_overhead_resid -= CALIB_SCALE;
145			++*p->cputime_count;
146			--delta;
147		}
148		if (delta != 0) {
149			if (p->mcount_overhead_resid >= CALIB_SCALE) {
150				p->mcount_overhead_resid -= CALIB_SCALE;
151				++*p->mcount_count;
152				--delta;
153			}
154			KCOUNT(p, frompci) += delta;
155		}
156		*p->mcount_count += p->mcount_overhead_sub;
157	}
158	*p->cputime_count += p->cputime_overhead;
159skip_guprof_stuff:
160#endif /* GUPROF */
161
162#ifdef _KERNEL
163	/*
164	 * When we are called from an exception handler, frompc is faked
165	 * to be for where the exception occurred.  We've just solidified
166	 * the count for there.  Now convert frompci to the index of btrap()
167	 * for trap handlers and bintr() for interrupt handlers to make
168	 * exceptions appear in the call graph as calls from btrap() and
169	 * bintr() instead of calls from all over.
170	 */
171	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
172	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
173		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
174			frompci = (uintfptr_t)bintr - p->lowpc;
175		else
176			frompci = (uintfptr_t)btrap - p->lowpc;
177	}
178#endif
179
180	/*
181	 * check that frompc is a reasonable pc value.
182	 * for example:	signal catchers get called from the stack,
183	 *		not from text space.  too bad.
184	 */
185	if (frompci >= p->textsize)
186		goto done;
187
188	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
189	toindex = *frompcindex;
190	if (toindex == 0) {
191		/*
192		 *	first time traversing this arc
193		 */
194		toindex = ++p->tos[0].link;
195		if (toindex >= p->tolimit)
196			/* halt further profiling */
197			goto overflow;
198
199		*frompcindex = toindex;
200		top = &p->tos[toindex];
201		top->selfpc = selfpc;
202		top->count = 1;
203		top->link = 0;
204		goto done;
205	}
206	top = &p->tos[toindex];
207	if (top->selfpc == selfpc) {
208		/*
209		 * arc at front of chain; usual case.
210		 */
211		top->count++;
212		goto done;
213	}
214	/*
215	 * have to go looking down chain for it.
216	 * top points to what we are looking at,
217	 * prevtop points to previous top.
218	 * we know it is not at the head of the chain.
219	 */
220	for (; /* goto done */; ) {
221		if (top->link == 0) {
222			/*
223			 * top is end of the chain and none of the chain
224			 * had top->selfpc == selfpc.
225			 * so we allocate a new tostruct
226			 * and link it to the head of the chain.
227			 */
228			toindex = ++p->tos[0].link;
229			if (toindex >= p->tolimit)
230				goto overflow;
231
232			top = &p->tos[toindex];
233			top->selfpc = selfpc;
234			top->count = 1;
235			top->link = *frompcindex;
236			*frompcindex = toindex;
237			goto done;
238		}
239		/*
240		 * otherwise, check the next arc on the chain.
241		 */
242		prevtop = top;
243		top = &p->tos[top->link];
244		if (top->selfpc == selfpc) {
245			/*
246			 * there it is.
247			 * increment its count
248			 * move it to the head of the chain.
249			 */
250			top->count++;
251			toindex = prevtop->link;
252			prevtop->link = top->link;
253			top->link = *frompcindex;
254			*frompcindex = toindex;
255			goto done;
256		}
257
258	}
259done:
260#ifdef _KERNEL
261	MCOUNT_EXIT(s);
262#else
263	atomic_store_rel_int(&p->state, GMON_PROF_ON);
264#endif
265	return;
266overflow:
267	atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
268#ifdef _KERNEL
269	MCOUNT_EXIT(s);
270#endif
271	return;
272}
273
274/*
275 * Actual definition of mcount function.  Defined in <machine/profile.h>,
276 * which is included by <sys/gmon.h>.
277 */
278MCOUNT
279
280#ifdef GUPROF
281void
282mexitcount(selfpc)
283	uintfptr_t selfpc;
284{
285	struct gmonparam *p;
286	uintfptr_t selfpcdiff;
287
288	p = &_gmonparam;
289	selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
290	if (selfpcdiff < p->textsize) {
291		u_int delta;
292
293		/*
294		 * Solidify the count for the current function.
295		 */
296		delta = cputime() - p->mexitcount_overhead;
297		p->cputime_overhead_resid += p->cputime_overhead_frac;
298		p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
299		if ((int)delta < 0)
300			*p->mexitcount_count += delta + p->mexitcount_overhead
301						- p->cputime_overhead;
302		else if (delta != 0) {
303			if (p->cputime_overhead_resid >= CALIB_SCALE) {
304				p->cputime_overhead_resid -= CALIB_SCALE;
305				++*p->cputime_count;
306				--delta;
307			}
308			if (delta != 0) {
309				if (p->mexitcount_overhead_resid
310				    >= CALIB_SCALE) {
311					p->mexitcount_overhead_resid
312					    -= CALIB_SCALE;
313					++*p->mexitcount_count;
314					--delta;
315				}
316				KCOUNT(p, selfpcdiff) += delta;
317			}
318			*p->mexitcount_count += p->mexitcount_overhead_sub;
319		}
320		*p->cputime_count += p->cputime_overhead;
321	}
322}
323#endif /* GUPROF */
324