1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34#include <mach/mach_types.h>
35#include <machine/machine_routines.h>
36// #include <libkern/libkern.h>
37#include <kern/kalloc.h>
38#include <kern/debug.h> /* panic */
39#include <kern/thread.h>
40#include <sys/errno.h>
41
42#include <chud/chud_xnu.h>
43#include <kperf/kperf.h>
44
45#include <kperf/buffer.h>
46#include <kperf/timetrigger.h>
47#include <kperf/threadinfo.h>
48#include <kperf/callstack.h>
49#include <kperf/sample.h>
50#include <kperf/filter.h>
51#include <kperf/action.h>
52#include <kperf/context.h>
53#include <kperf/ast.h>
54
55#define ACTION_MAX 32
56
57/* XXX: callback handler from chudxnu */
58/* FIXME: hook this up to something */
59//void (*kperf_thread_ast_handler)(thread_t);
60
61/* the list of different actions to take */
62struct action
63{
64	unsigned sample;
65};
66
67/* the list of actions */
68static unsigned actionc = 0;
69static struct action *actionv = NULL;
70
71
72/* Do the real work! */
73/* this can be called in any context ... right? */
74static kern_return_t
75kperf_sample_internal( struct kperf_sample *sbuf,
76              struct kperf_context *context,
77              unsigned sample_what, boolean_t pend_user )
78{
79	boolean_t enabled;
80	int did_ucallstack = 0, did_tinfo_extra = 0;
81
82	/* not much point continuing here, but what to do ? return
83	 * Shutdown? cut a tracepoint and continue?
84	 */
85	if( sample_what == 0 )
86		return SAMPLE_CONTINUE;
87
88	int is_kernel = (context->cur_pid == 0);
89
90	/*  an event occurred. Sample everything and dump it in a
91	 *  buffer.
92	 */
93
94	/* collect data from samplers */
95	if( sample_what & SAMPLER_TINFO ) {
96		kperf_threadinfo_sample( &sbuf->threadinfo, context );
97
98		/* XXX FIXME This drops events when the thread is idle.
99		 * This should be configurable. */
100		if (sbuf->threadinfo.runmode & 0x40)
101			return SAMPLE_CONTINUE;
102	}
103
104	if( sample_what & SAMPLER_KSTACK )
105		kperf_kcallstack_sample( &sbuf->kcallstack, context );
106
107	/* sensitive ones */
108	if ( !is_kernel ) {
109		if( pend_user )
110		{
111			if( sample_what & SAMPLER_USTACK )
112				did_ucallstack = kperf_ucallstack_pend( context );
113
114			if( sample_what & SAMPLER_TINFOEX )
115				did_tinfo_extra = kperf_threadinfo_extra_pend( context );
116		}
117		else
118		{
119			if( sample_what & SAMPLER_USTACK )
120				kperf_ucallstack_sample( &sbuf->ucallstack, context );
121
122			if( sample_what & SAMPLER_TINFOEX )
123				kperf_threadinfo_extra_sample( &sbuf->tinfo_ex,
124							       context );
125		}
126	}
127
128	/* stash the data into the buffer
129	 * interrupts off to ensure we don't get split
130	 */
131	enabled = ml_set_interrupts_enabled(FALSE);
132
133	if ( pend_user )
134		BUF_DATA1( PERF_GEN_EVENT | DBG_FUNC_START, sample_what );
135
136	/* dump threadinfo */
137	if( sample_what & SAMPLER_TINFO )
138		kperf_threadinfo_log( &sbuf->threadinfo );
139
140	/* dump kcallstack */
141	if( sample_what & SAMPLER_KSTACK )
142		kperf_kcallstack_log( &sbuf->kcallstack );
143
144
145	/* dump user stuff */
146	if ( !is_kernel ) {
147		if ( pend_user )
148		{
149			if ( did_ucallstack )
150				BUF_INFO1( PERF_CS_UPEND, 0 );
151
152			if ( did_tinfo_extra )
153				BUF_INFO1( PERF_TI_XPEND, 0 );
154		}
155		else
156		{
157			if( sample_what & SAMPLER_USTACK )
158				kperf_ucallstack_log( &sbuf->ucallstack );
159
160			if( sample_what & SAMPLER_TINFOEX )
161				kperf_threadinfo_extra_log( &sbuf->tinfo_ex );
162		}
163	}
164
165	if ( pend_user )
166		BUF_DATA1( PERF_GEN_EVENT | DBG_FUNC_END, sample_what );
167
168	/* intrs back on */
169	ml_set_interrupts_enabled(enabled);
170
171	return SAMPLE_CONTINUE;
172}
173
174/* Translate actionid into sample bits and take a sample */
175kern_return_t
176kperf_sample( struct kperf_sample *sbuf,
177	      struct kperf_context *context,
178              unsigned actionid, boolean_t pend_user )
179{
180	unsigned sample_what = 0;
181
182	/* check samppling is on, or panic */
183	if( kperf_sampling_status() == KPERF_SAMPLING_OFF )
184		panic("trigger fired while sampling off");
185	else if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN )
186		return SAMPLE_SHUTDOWN;
187
188	/* work out what to sample, if anything */
189	if( actionid >= actionc )
190		return SAMPLE_SHUTDOWN;
191
192	sample_what = actionv[actionid].sample;
193
194	return kperf_sample_internal( sbuf, context, sample_what, pend_user );
195}
196
197/* ast callback on a thread */
198void
199kperf_thread_ast_handler( thread_t thread )
200{
201	int r;
202	uint32_t t_chud;
203	unsigned sample_what = 0;
204	/* we know we're on a thread, so let's do stuff */
205	task_t task = NULL;
206
207	/* Don't sample if we are shutting down or off */
208	if( kperf_sampling_status() != KPERF_SAMPLING_ON )
209		return;
210
211	BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_START, thread);
212
213	/* FIXME: probably want a faster allocator here... :P */
214	struct kperf_sample *sbuf = kalloc( sizeof(*sbuf) );
215	if( sbuf == NULL )
216	{
217		/* FIXME: error code */
218		BUF_INFO1( PERF_AST_ERROR, 0 );
219		goto error;
220	}
221
222	/* make a context, take a sample */
223	struct kperf_context ctx;
224	ctx.cur_thread = thread;
225	ctx.cur_pid = -1;
226
227	task = chudxnu_task_for_thread(thread);
228	if(task)
229		ctx.cur_pid = chudxnu_pid_for_task(task);
230
231	/* decode the chud bits so we know what to sample */
232	t_chud = kperf_get_thread_bits(thread);
233
234	if (t_chud & T_AST_NAME)
235		sample_what |= SAMPLER_TINFOEX;
236
237	if (t_chud & T_AST_CALLSTACK)
238		sample_what |= SAMPLER_USTACK;
239
240	/* do the sample, just of the user stuff */
241	r = kperf_sample_internal( sbuf, &ctx, sample_what, FALSE );
242
243	/* free it again */
244	kfree( sbuf, sizeof(*sbuf) );
245
246error:
247	BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_END, r);
248
249}
250
251/* register AST bits */
252int
253kperf_ast_pend( thread_t cur_thread, uint32_t check_bits,
254		uint32_t set_bits )
255{
256	/* pend on the thread */
257	uint32_t t_chud, set_done = 0;
258
259	/* can only pend on the current thread */
260	if( cur_thread != chudxnu_current_thread() )
261		panic("pending to non-current thread");
262
263	/* get our current bits */
264	t_chud = kperf_get_thread_bits(cur_thread);
265
266	/* see if it's already been done or pended */
267	if( !(t_chud & check_bits ) )
268	{
269		/* set the bit on the thread */
270		t_chud |= set_bits;
271		kperf_set_thread_bits(cur_thread, t_chud);
272
273		/* set the actual AST */
274		kperf_set_thread_ast( cur_thread );
275
276		set_done = 1;
277	}
278
279	return set_done;
280
281//	BUF_INFO3( dbg_code, (uintptr_t)cur_thread, t_chud, set_done );
282}
283
284unsigned
285kperf_action_get_count(void)
286{
287	return actionc;
288}
289
290int
291kperf_action_set_samplers( unsigned actionid, uint32_t samplers )
292{
293	if( actionid >= actionc )
294		return EINVAL;
295
296	actionv[actionid].sample = samplers;
297
298	return 0;
299}
300
301int
302kperf_action_get_samplers( unsigned actionid, uint32_t *samplers_out )
303{
304	if( actionid >= actionc )
305		return EINVAL;
306
307	*samplers_out = actionv[actionid].sample;
308
309	return 0;
310}
311
312int
313kperf_action_set_count(unsigned count)
314{
315	struct action *new_actionv = NULL, *old_actionv = NULL;
316	unsigned old_count;
317
318	/* easy no-op */
319	if( count == actionc )
320		return 0;
321
322	/* TODO: allow shrinking? */
323	if( count < actionc )
324		return EINVAL;
325
326	/* cap it for good measure */
327	if( count > ACTION_MAX )
328		return EINVAL;
329
330	/* creating the action arror for the first time. create a few
331	 * more things, too.
332	 */
333       	if( actionc == 0 )
334	{
335		int r;
336		r = kperf_init();
337
338		if( r != 0 )
339			return r;
340	}
341
342	/* create a new array */
343	new_actionv = kalloc( count * sizeof(*new_actionv) );
344	if( new_actionv == NULL )
345		return ENOMEM;
346
347	old_actionv = actionv;
348	old_count = actionc;
349
350	if( old_actionv != NULL )
351		bcopy( actionv, new_actionv, actionc * sizeof(*actionv) );
352
353	bzero( &new_actionv[actionc], (count - old_count) * sizeof(*actionv) );
354
355	actionv = new_actionv;
356	actionc = count;
357
358	if( old_actionv != NULL )
359		kfree( old_actionv, old_count * sizeof(*actionv) );
360
361	printf( "kperf: done the alloc\n" );
362
363	return 0;
364}
365