Deleted Added
full compact
mcount.c (8870) mcount.c (13107)
1/*-
2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 24 unchanged lines hidden (view full) ---

33
34#if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS)
35static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93";
36#endif
37
38#include <sys/param.h>
39#include <sys/gmon.h>
40#ifdef KERNEL
1/*-
2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 24 unchanged lines hidden (view full) ---

33
34#if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS)
35static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93";
36#endif
37
38#include <sys/param.h>
39#include <sys/gmon.h>
40#ifdef KERNEL
41#include <i386/include/cpufunc.h>
41#include <sys/systm.h>
42#include <vm/vm.h>
43#include <vm/vm_param.h>
44#include <vm/pmap.h>
45void bintr __P((void));
46void btrap __P((void));
47void eintr __P((void));
48void user __P((void));
42#endif
43
44/*
45 * mcount is called on entry to each function compiled with the profiling
46 * switch set. _mcount(), which is declared in a machine-dependent way
47 * with _MCOUNT_DECL, does the actual work and is either inlined into a
48 * C routine or called by an assembly stub. In any case, this magic is
49 * taken care of by the MCOUNT definition in <machine/profile.h>.
50 *
51 * _mcount updates data structures that represent traversals of the
52 * program's call graph edges. frompc and selfpc are the return
53 * address and function address that represents the given call graph edge.
54 *
55 * Note: the original BSD code used the same variable (frompcindex) for
56 * both frompcindex and frompc. Any reasonable, modern compiler will
57 * perform this optimization.
58 */
59_MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
49#endif
50
51/*
52 * mcount is called on entry to each function compiled with the profiling
53 * switch set. _mcount(), which is declared in a machine-dependent way
54 * with _MCOUNT_DECL, does the actual work and is either inlined into a
55 * C routine or called by an assembly stub. In any case, this magic is
56 * taken care of by the MCOUNT definition in <machine/profile.h>.
57 *
58 * _mcount updates data structures that represent traversals of the
59 * program's call graph edges. frompc and selfpc are the return
60 * address and function address that represents the given call graph edge.
61 *
62 * Note: the original BSD code used the same variable (frompcindex) for
63 * both frompcindex and frompc. Any reasonable, modern compiler will
64 * perform this optimization.
65 */
66_MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
60 register u_long frompc, selfpc;
67 register fptrint_t frompc, selfpc;
61{
68{
69#ifdef GUPROF
70 u_int delta;
71#endif
72 register fptrdiff_t frompci;
62 register u_short *frompcindex;
63 register struct tostruct *top, *prevtop;
64 register struct gmonparam *p;
65 register long toindex;
66#ifdef KERNEL
73 register u_short *frompcindex;
74 register struct tostruct *top, *prevtop;
75 register struct gmonparam *p;
76 register long toindex;
77#ifdef KERNEL
67 register int s;
68 u_long save_eflags;
78 register int s; /* XXX */
79 u_long save_eflags; /* XXX */
69#endif
70
71 p = &_gmonparam;
80#endif
81
82 p = &_gmonparam;
83#ifndef GUPROF /* XXX */
72 /*
73 * check that we are profiling
74 * and that we aren't recursively invoked.
75 */
76 if (p->state != GMON_PROF_ON)
77 return;
84 /*
85 * check that we are profiling
86 * and that we aren't recursively invoked.
87 */
88 if (p->state != GMON_PROF_ON)
89 return;
90#endif
78#ifdef KERNEL
79 MCOUNT_ENTER;
80#else
81 p->state = GMON_PROF_BUSY;
82#endif
91#ifdef KERNEL
92 MCOUNT_ENTER;
93#else
94 p->state = GMON_PROF_BUSY;
95#endif
96 frompci = frompc - p->lowpc;
97
98#ifdef KERNEL
83 /*
99 /*
84 * check that frompcindex is a reasonable pc value.
100 * When we are called from an exception handler, frompci may be
101 * for a user address. Convert such frompci's to the index of
102 * user() to merge all user counts.
103 */
104 if (frompci >= p->textsize) {
105 if (frompci + p->lowpc
106 >= (fptrint_t)(VM_MAXUSER_ADDRESS + UPAGES * NBPG))
107 goto done;
108 frompci = (fptrint_t)user - p->lowpc;
109 if (frompci >= p->textsize)
110 goto done;
111 }
112#endif /* KERNEL */
113
114#ifdef GUPROF
115 if (p->state != GMON_PROF_HIRES)
116 goto skip_guprof_stuff;
117 /*
118 * Look at the clock and add the count of clock cycles since the
119 * clock was last looked at to a counter for frompc. This
120 * solidifies the count for the function containing frompc and
121 * effectively starts another clock for the current function.
122 * The count for the new clock will be solidified when another
123 * function call is made or the function returns.
124 *
125 * We use the usual sampling counters since they can be located
126 * efficiently. 4-byte counters are usually necessary.
127 *
128 * There are many complications for subtracting the profiling
129 * overheads from the counts for normal functions and adding
130 * them to the counts for mcount(), mexitcount() and cputime().
131 * We attempt to handle fractional cycles, but the overheads
132 * are usually underestimated because they are calibrated for
133 * a simpler than usual setup.
134 */
135 delta = cputime() - p->mcount_overhead;
136 p->cputime_overhead_resid += p->cputime_overhead_frac;
137 p->mcount_overhead_resid += p->mcount_overhead_frac;
138 if ((int)delta < 0)
139 *p->mcount_count += delta + p->mcount_overhead
140 - p->cputime_overhead;
141 else if (delta != 0) {
142 if (p->cputime_overhead_resid >= CALIB_SCALE) {
143 p->cputime_overhead_resid -= CALIB_SCALE;
144 ++*p->cputime_count;
145 --delta;
146 }
147 if (delta != 0) {
148 if (p->mcount_overhead_resid >= CALIB_SCALE) {
149 p->mcount_overhead_resid -= CALIB_SCALE;
150 ++*p->mcount_count;
151 --delta;
152 }
153 KCOUNT(p, frompci) += delta;
154 }
155 *p->mcount_count += p->mcount_overhead_sub;
156 }
157 *p->cputime_count += p->cputime_overhead;
158skip_guprof_stuff:
159#endif /* GUPROF */
160
161#ifdef KERNEL
162 /*
163 * When we are called from an exception handler, frompc is faked
164 * to be for where the exception occurred. We've just solidified
165 * the count for there. Now convert frompci to the index of btrap()
166 * for trap handlers and bintr() for interrupt handlers to make
167 * exceptions appear in the call graph as calls from btrap() and
168 * bintr() instead of calls from all over.
169 */
170 if ((fptrint_t)selfpc >= (fptrint_t)btrap
171 && (fptrint_t)selfpc < (fptrint_t)eintr) {
172 if ((fptrint_t)selfpc >= (fptrint_t)bintr)
173 frompci = (fptrint_t)bintr - p->lowpc;
174 else
175 frompci = (fptrint_t)btrap - p->lowpc;
176 }
177#endif /* KERNEL */
178
179 /*
180 * check that frompc is a reasonable pc value.
85 * for example: signal catchers get called from the stack,
86 * not from text space. too bad.
87 */
181 * for example: signal catchers get called from the stack,
182 * not from text space. too bad.
183 */
88 frompc -= p->lowpc;
89 if (frompc > p->textsize)
184 if (frompci >= p->textsize)
90 goto done;
91
185 goto done;
186
92 frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
187 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
93 toindex = *frompcindex;
94 if (toindex == 0) {
95 /*
96 * first time traversing this arc
97 */
98 toindex = ++p->tos[0].link;
99 if (toindex >= p->tolimit)
100 /* halt further profiling */

--- 74 unchanged lines hidden (view full) ---

175 return;
176}
177
178/*
179 * Actual definition of mcount function. Defined in <machine/profile.h>,
180 * which is included by <sys/gmon.h>.
181 */
182MCOUNT
188 toindex = *frompcindex;
189 if (toindex == 0) {
190 /*
191 * first time traversing this arc
192 */
193 toindex = ++p->tos[0].link;
194 if (toindex >= p->tolimit)
195 /* halt further profiling */

--- 74 unchanged lines hidden (view full) ---

270 return;
271}
272
273/*
274 * Actual definition of mcount function. Defined in <machine/profile.h>,
275 * which is included by <sys/gmon.h>.
276 */
277MCOUNT
278
279#ifdef GUPROF
280void
281mexitcount(selfpc)
282 fptrint_t selfpc;
283{
284 struct gmonparam *p;
285 fptrint_t selfpcdiff;
286
287 p = &_gmonparam;
288 selfpcdiff = selfpc - (fptrint_t)p->lowpc;
289 if (selfpcdiff < p->textsize) {
290 u_int delta;
291
292 /*
293 * Solidify the count for the current function.
294 */
295 delta = cputime() - p->mexitcount_overhead;
296 p->cputime_overhead_resid += p->cputime_overhead_frac;
297 p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
298 if ((int)delta < 0)
299 *p->mexitcount_count += delta + p->mexitcount_overhead
300 - p->cputime_overhead;
301 else if (delta != 0) {
302 if (p->cputime_overhead_resid >= CALIB_SCALE) {
303 p->cputime_overhead_resid -= CALIB_SCALE;
304 ++*p->cputime_count;
305 --delta;
306 }
307 if (delta != 0) {
308 if (p->mexitcount_overhead_resid
309 >= CALIB_SCALE) {
310 p->mexitcount_overhead_resid
311 -= CALIB_SCALE;
312 ++*p->mexitcount_count;
313 --delta;
314 }
315 KCOUNT(p, selfpcdiff) += delta;
316 }
317 *p->mexitcount_count += p->mexitcount_overhead_sub;
318 }
319 *p->cputime_count += p->cputime_overhead;
320 }
321}
322#endif /* GUPROF */