1/*-
2 * Copyright (c) 1982, 1986, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)subr_prof.c	8.3 (Berkeley) 9/23/93
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/sysproto.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/resourcevar.h>
43#include <sys/sysctl.h>
44
45#include <machine/cpu.h>
46
47#ifdef GPROF
48#include <sys/malloc.h>
49#include <sys/gmon.h>
50#undef MCOUNT
51
52static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
53
54static void kmstartup(void *);
55SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL);
56
57struct gmonparam _gmonparam = { GMON_PROF_OFF };
58
59#ifdef GUPROF
60void
61nullfunc_loop_profiled()
62{
63	int i;
64
65	for (i = 0; i < CALIB_SCALE; i++)
66		nullfunc_profiled();
67}
68
69#define	nullfunc_loop_profiled_end	nullfunc_profiled	/* XXX */
70
71void
72nullfunc_profiled()
73{
74}
75#endif /* GUPROF */
76
77/*
78 * Update the histograms to support extending the text region arbitrarily.
79 * This is done slightly naively (no sparse regions), so will waste slight
80 * amounts of memory, but will overall work nicely enough to allow profiling
81 * of KLDs.
82 */
83void
84kmupetext(uintfptr_t nhighpc)
85{
86	struct gmonparam np;	/* slightly large */
87	struct gmonparam *p = &_gmonparam;
88	char *cp;
89
90	GIANT_REQUIRED;
91	bcopy(p, &np, sizeof(*p));
92	np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
93	if (np.highpc <= p->highpc)
94		return;
95	np.textsize = np.highpc - p->lowpc;
96	np.kcountsize = np.textsize / HISTFRACTION;
97	np.hashfraction = HASHFRACTION;
98	np.fromssize = np.textsize / HASHFRACTION;
99	np.tolimit = np.textsize * ARCDENSITY / 100;
100	if (np.tolimit < MINARCS)
101		np.tolimit = MINARCS;
102	else if (np.tolimit > MAXARCS)
103		np.tolimit = MAXARCS;
104	np.tossize = np.tolimit * sizeof(struct tostruct);
105	cp = malloc(np.kcountsize + np.fromssize + np.tossize,
106	    M_GPROF, M_WAITOK);
107	/*
108	 * Check for something else extending highpc while we slept.
109	 */
110	if (np.highpc <= p->highpc) {
111		free(cp, M_GPROF);
112		return;
113	}
114	np.tos = (struct tostruct *)cp;
115	cp += np.tossize;
116	np.kcount = (HISTCOUNTER *)cp;
117	cp += np.kcountsize;
118	np.froms = (u_short *)cp;
119#ifdef GUPROF
120	/* Reinitialize pointers to overhead counters. */
121	np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
122	np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
123	np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
124#endif
125	critical_enter();
126	bcopy(p->tos, np.tos, p->tossize);
127	bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
128	bcopy(p->kcount, np.kcount, p->kcountsize);
129	bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
130	    p->kcountsize);
131	bcopy(p->froms, np.froms, p->fromssize);
132	bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
133	cp = (char *)p->tos;
134	bcopy(&np, p, sizeof(*p));
135	critical_exit();
136	free(cp, M_GPROF);
137}
138
139static void
140kmstartup(dummy)
141	void *dummy;
142{
143	char *cp;
144	struct gmonparam *p = &_gmonparam;
145#ifdef GUPROF
146	int cputime_overhead;
147	int empty_loop_time;
148	int i;
149	int mcount_overhead;
150	int mexitcount_overhead;
151	int nullfunc_loop_overhead;
152	int nullfunc_loop_profiled_time;
153	uintfptr_t tmp_addr;
154#endif
155
156	/*
157	 * Round lowpc and highpc to multiples of the density we're using
158	 * so the rest of the scaling (here and in gprof) stays in ints.
159	 */
160	p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
161	p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
162	p->textsize = p->highpc - p->lowpc;
163	printf("Profiling kernel, textsize=%lu [%jx..%jx]\n",
164	    p->textsize, (uintmax_t)p->lowpc, (uintmax_t)p->highpc);
165	p->kcountsize = p->textsize / HISTFRACTION;
166	p->hashfraction = HASHFRACTION;
167	p->fromssize = p->textsize / HASHFRACTION;
168	p->tolimit = p->textsize * ARCDENSITY / 100;
169	if (p->tolimit < MINARCS)
170		p->tolimit = MINARCS;
171	else if (p->tolimit > MAXARCS)
172		p->tolimit = MAXARCS;
173	p->tossize = p->tolimit * sizeof(struct tostruct);
174	cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
175	    M_GPROF, M_WAITOK | M_ZERO);
176	p->tos = (struct tostruct *)cp;
177	cp += p->tossize;
178	p->kcount = (HISTCOUNTER *)cp;
179	cp += p->kcountsize;
180	p->froms = (u_short *)cp;
181	p->histcounter_type = FUNCTION_ALIGNMENT / HISTFRACTION * NBBY;
182
183#ifdef GUPROF
184	/* Signed counters. */
185	p->histcounter_type = -p->histcounter_type;
186
187	/* Initialize pointers to overhead counters. */
188	p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
189	p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
190	p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
191
192	/*
193	 * Disable interrupts to avoid interference while we calibrate
194	 * things.
195	 */
196	critical_enter();
197
198	/*
199	 * Determine overheads.
200	 * XXX this needs to be repeated for each useful timer/counter.
201	 */
202	cputime_overhead = 0;
203	startguprof(p);
204	for (i = 0; i < CALIB_SCALE; i++)
205		cputime_overhead += cputime();
206
207	empty_loop();
208	startguprof(p);
209	empty_loop();
210	empty_loop_time = cputime();
211
212	nullfunc_loop_profiled();
213
214	/*
215	 * Start profiling.  There won't be any normal function calls since
216	 * interrupts are disabled, but we will call the profiling routines
217	 * directly to determine their overheads.
218	 */
219	p->state = GMON_PROF_HIRES;
220
221	startguprof(p);
222	nullfunc_loop_profiled();
223
224	startguprof(p);
225	for (i = 0; i < CALIB_SCALE; i++)
226		MCOUNT_OVERHEAD(sys_profil);
227	mcount_overhead = KCOUNT(p, PC_TO_I(p, sys_profil));
228
229	startguprof(p);
230	for (i = 0; i < CALIB_SCALE; i++)
231		MEXITCOUNT_OVERHEAD();
232	MEXITCOUNT_OVERHEAD_GETLABEL(tmp_addr);
233	mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
234
235	p->state = GMON_PROF_OFF;
236	stopguprof(p);
237
238	critical_exit();
239
240	nullfunc_loop_profiled_time = 0;
241	for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
242	     tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
243	     tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
244		nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
245#define CALIB_DOSCALE(count)	(((count) + CALIB_SCALE / 3) / CALIB_SCALE)
246#define	c2n(count, freq)	((int)((count) * 1000000000LL / freq))
247	printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
248	       CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
249	       CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
250	       CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
251	       CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
252	       CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
253	cputime_overhead -= empty_loop_time;
254	mcount_overhead -= empty_loop_time;
255	mexitcount_overhead -= empty_loop_time;
256
257	/*-
258	 * Profiling overheads are determined by the times between the
259	 * following events:
260	 *	MC1: mcount() is called
261	 *	MC2: cputime() (called from mcount()) latches the timer
262	 *	MC3: mcount() completes
263	 *	ME1: mexitcount() is called
264	 *	ME2: cputime() (called from mexitcount()) latches the timer
265	 *	ME3: mexitcount() completes.
266	 * The times between the events vary slightly depending on instruction
267	 * combination and cache misses, etc.  Attempt to determine the
268	 * minimum times.  These can be subtracted from the profiling times
269	 * without much risk of reducing the profiling times below what they
270	 * would be when profiling is not configured.  Abbreviate:
271	 *	ab = minimum time between MC1 and MC3
272	 *	a  = minimum time between MC1 and MC2
273	 *	b  = minimum time between MC2 and MC3
274	 *	cd = minimum time between ME1 and ME3
275	 *	c  = minimum time between ME1 and ME2
276	 *	d  = minimum time between ME2 and ME3.
277	 * These satisfy the relations:
278	 *	ab            <= mcount_overhead		(just measured)
279	 *	a + b         <= ab
280	 *	        cd    <= mexitcount_overhead		(just measured)
281	 *	        c + d <= cd
282	 *	a         + d <= nullfunc_loop_profiled_time	(just measured)
283	 *	a >= 0, b >= 0, c >= 0, d >= 0.
284	 * Assume that ab and cd are equal to the minimums.
285	 */
286	p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
287	p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
288	p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
289					       - cputime_overhead);
290	nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
291	p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
292						     - nullfunc_loop_overhead)
293						    / 4);
294	p->mexitcount_pre_overhead = p->mexitcount_overhead
295				     + p->cputime_overhead
296				     - p->mexitcount_post_overhead;
297	p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
298				 - p->mexitcount_post_overhead;
299	p->mcount_post_overhead = p->mcount_overhead
300				  + p->cputime_overhead
301				  - p->mcount_pre_overhead;
302	printf(
303"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
304	       c2n(p->cputime_overhead, p->profrate),
305	       c2n(p->mcount_overhead, p->profrate),
306	       c2n(p->mcount_pre_overhead, p->profrate),
307	       c2n(p->mcount_post_overhead, p->profrate),
308	       c2n(p->cputime_overhead, p->profrate),
309	       c2n(p->mexitcount_overhead, p->profrate),
310	       c2n(p->mexitcount_pre_overhead, p->profrate),
311	       c2n(p->mexitcount_post_overhead, p->profrate));
312	printf(
313"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
314	       p->cputime_overhead, p->mcount_overhead,
315	       p->mcount_pre_overhead, p->mcount_post_overhead,
316	       p->cputime_overhead, p->mexitcount_overhead,
317	       p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
318#endif /* GUPROF */
319}
320
321/*
322 * Return kernel profiling information.
323 */
324static int
325sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
326{
327	int *name = (int *) arg1;
328	u_int namelen = arg2;
329	struct gmonparam *gp = &_gmonparam;
330	int error;
331	int state;
332
333	/* all sysctl names at this level are terminal */
334	if (namelen != 1)
335		return (ENOTDIR);		/* overloaded */
336
337	switch (name[0]) {
338	case GPROF_STATE:
339		state = gp->state;
340		error = sysctl_handle_int(oidp, &state, 0, req);
341		if (error)
342			return (error);
343		if (!req->newptr)
344			return (0);
345		if (state == GMON_PROF_OFF) {
346			gp->state = state;
347			PROC_LOCK(&proc0);
348			stopprofclock(&proc0);
349			PROC_UNLOCK(&proc0);
350			stopguprof(gp);
351		} else if (state == GMON_PROF_ON) {
352			gp->state = GMON_PROF_OFF;
353			stopguprof(gp);
354			gp->profrate = profhz;
355			PROC_LOCK(&proc0);
356			startprofclock(&proc0);
357			PROC_UNLOCK(&proc0);
358			gp->state = state;
359#ifdef GUPROF
360		} else if (state == GMON_PROF_HIRES) {
361			gp->state = GMON_PROF_OFF;
362			PROC_LOCK(&proc0);
363			stopprofclock(&proc0);
364			PROC_UNLOCK(&proc0);
365			startguprof(gp);
366			gp->state = state;
367#endif
368		} else if (state != gp->state)
369			return (EINVAL);
370		return (0);
371	case GPROF_COUNT:
372		return (sysctl_handle_opaque(oidp,
373			gp->kcount, gp->kcountsize, req));
374	case GPROF_FROMS:
375		return (sysctl_handle_opaque(oidp,
376			gp->froms, gp->fromssize, req));
377	case GPROF_TOS:
378		return (sysctl_handle_opaque(oidp,
379			gp->tos, gp->tossize, req));
380	case GPROF_GMONPARAM:
381		return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
382	default:
383		return (EOPNOTSUPP);
384	}
385	/* NOTREACHED */
386}
387
388static SYSCTL_NODE(_kern, KERN_PROF, prof, CTLFLAG_RW, sysctl_kern_prof, "");
389#endif /* GPROF */
390
391/*
392 * Profiling system call.
393 *
394 * The scale factor is a fixed point number with 16 bits of fraction, so that
395 * 1.0 is represented as 0x10000.  A scale factor of 0 turns off profiling.
396 */
397#ifndef _SYS_SYSPROTO_H_
398struct profil_args {
399	caddr_t	samples;
400	size_t	size;
401	size_t	offset;
402	u_int	scale;
403};
404#endif
405/* ARGSUSED */
406int
407sys_profil(struct thread *td, struct profil_args *uap)
408{
409	struct uprof *upp;
410	struct proc *p;
411
412	if (uap->scale > (1 << 16))
413		return (EINVAL);
414
415	p = td->td_proc;
416	if (uap->scale == 0) {
417		PROC_LOCK(p);
418		stopprofclock(p);
419		PROC_UNLOCK(p);
420		return (0);
421	}
422	PROC_LOCK(p);
423	upp = &td->td_proc->p_stats->p_prof;
424	PROC_PROFLOCK(p);
425	upp->pr_off = uap->offset;
426	upp->pr_scale = uap->scale;
427	upp->pr_base = uap->samples;
428	upp->pr_size = uap->size;
429	PROC_PROFUNLOCK(p);
430	startprofclock(p);
431	PROC_UNLOCK(p);
432
433	return (0);
434}
435
436/*
437 * Scale is a fixed-point number with the binary point 16 bits
438 * into the value, and is <= 1.0.  pc is at most 32 bits, so the
439 * intermediate result is at most 48 bits.
440 */
441#define	PC_TO_INDEX(pc, prof) \
442	((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
443	    (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
444
445/*
446 * Collect user-level profiling statistics; called on a profiling tick,
447 * when a process is running in user-mode.  This routine may be called
448 * from an interrupt context.  We try to update the user profiling buffers
449 * cheaply with fuswintr() and suswintr().  If that fails, we revert to
450 * an AST that will vector us to trap() with a context in which copyin
451 * and copyout will work.  Trap will then call addupc_task().
452 *
453 * Note that we may (rarely) not get around to the AST soon enough, and
454 * lose profile ticks when the next tick overwrites this one, but in this
455 * case the system is overloaded and the profile is probably already
456 * inaccurate.
457 */
458void
459addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
460{
461	struct uprof *prof;
462	caddr_t addr;
463	u_int i;
464	int v;
465
466	if (ticks == 0)
467		return;
468	prof = &td->td_proc->p_stats->p_prof;
469	PROC_PROFLOCK(td->td_proc);
470	if (pc < prof->pr_off ||
471	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
472		PROC_PROFUNLOCK(td->td_proc);
473		return;			/* out of range; ignore */
474	}
475
476	addr = prof->pr_base + i;
477	PROC_PROFUNLOCK(td->td_proc);
478	if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
479		td->td_profil_addr = pc;
480		td->td_profil_ticks = ticks;
481		td->td_pflags |= TDP_OWEUPC;
482		thread_lock(td);
483		td->td_flags |= TDF_ASTPENDING;
484		thread_unlock(td);
485	}
486}
487
488/*
489 * Much like before, but we can afford to take faults here.  If the
490 * update fails, we simply turn off profiling.
491 */
492void
493addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
494{
495	struct proc *p = td->td_proc;
496	struct uprof *prof;
497	caddr_t addr;
498	u_int i;
499	u_short v;
500	int stop = 0;
501
502	if (ticks == 0)
503		return;
504
505	PROC_LOCK(p);
506	if (!(p->p_flag & P_PROFIL)) {
507		PROC_UNLOCK(p);
508		return;
509	}
510	p->p_profthreads++;
511	prof = &p->p_stats->p_prof;
512	PROC_PROFLOCK(p);
513	if (pc < prof->pr_off ||
514	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
515		PROC_PROFUNLOCK(p);
516		goto out;
517	}
518
519	addr = prof->pr_base + i;
520	PROC_PROFUNLOCK(p);
521	PROC_UNLOCK(p);
522	if (copyin(addr, &v, sizeof(v)) == 0) {
523		v += ticks;
524		if (copyout(&v, addr, sizeof(v)) == 0) {
525			PROC_LOCK(p);
526			goto out;
527		}
528	}
529	stop = 1;
530	PROC_LOCK(p);
531
532out:
533	if (--p->p_profthreads == 0) {
534		if (p->p_flag & P_STOPPROF) {
535			wakeup(&p->p_profthreads);
536			p->p_flag &= ~P_STOPPROF;
537			stop = 0;
538		}
539	}
540	if (stop)
541		stopprofclock(p);
542	PROC_UNLOCK(p);
543}
544