1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* all thread states code */
30#include <mach/mach_types.h>
31#include <IOKit/IOTypes.h>
32#include <IOKit/IOLocks.h>
33#include <sys/errno.h>
34
35#include <chud/chud_xnu.h>
36
37#include <kperf/buffer.h>
38#include <kperf/sample.h>
39#include <kperf/context.h>
40#include <kperf/action.h>
41#include <kperf/pet.h>
42#include <kperf/timetrigger.h>
43
44extern kern_return_t task_resume_internal(task_t);
45extern kern_return_t task_suspend_internal(task_t);
46
47/* timer id to call back on */
48static unsigned pet_timerid = 0;
49
50/* aciton ID to call
51 * We also use this as the sync point for waiting, for no good reason
52 */
53static unsigned pet_actionid = 0;
54
55/* the actual thread pointer */
56static thread_t pet_thread = NULL;
57
58/* Lock on which to synchronise */
59static IOLock *pet_lock = NULL;
60
61/* where to sample data to */
62static struct kperf_sample pet_sample_buf;
63
64static int pet_idle_rate = 15;
65
66/* sample an actual, honest to god thread! */
67static void
68pet_sample_thread( thread_t thread )
69{
70	struct kperf_context ctx;
71	task_t task;
72	unsigned skip_callstack;
73
74	/* work out the context */
75	ctx.cur_thread = thread;
76	ctx.cur_pid = 0;
77
78	task = chudxnu_task_for_thread(thread);
79	if(task)
80		ctx.cur_pid = chudxnu_pid_for_task(task);
81
82	skip_callstack = (chudxnu_thread_get_dirty(thread) == TRUE) || ((thread->kperf_pet_cnt % (uint64_t)pet_idle_rate) == 0) ? 0 : SAMPLE_FLAG_EMPTY_CALLSTACK;
83
84	/* do the actual sample */
85	kperf_sample( &pet_sample_buf, &ctx, pet_actionid,
86	              SAMPLE_FLAG_IDLE_THREADS | skip_callstack );
87
88	if (!skip_callstack)
89		chudxnu_thread_set_dirty(thread, FALSE);
90
91	thread->kperf_pet_cnt++;
92}
93
94/* given a list of threads, preferably stopped, sample 'em! */
95static void
96pet_sample_thread_list( mach_msg_type_number_t threadc, thread_array_t threadv )
97{
98	unsigned int i;
99	int ncpu;
100
101	for( i = 0; i < threadc; i++ )
102	{
103		thread_t thread = threadv[i];
104
105		if( !thread )
106			/* XXX? */
107			continue;
108
109		for (ncpu = 0; ncpu < machine_info.logical_cpu_max; ++ncpu)
110		{
111			thread_t candidate = kperf_thread_on_cpus[ncpu];
112			if (candidate && candidate->thread_id == thread->thread_id)
113				break;
114		}
115
116		/* the thread was not on a CPU */
117		if (ncpu == machine_info.logical_cpu_max)
118			pet_sample_thread( thread );
119	}
120}
121
122/* given a task (preferably stopped), sample all the threads in it */
123static void
124pet_sample_task( task_t task )
125{
126	mach_msg_type_number_t threadc;
127	thread_array_t threadv;
128	kern_return_t kr;
129
130	kr = chudxnu_task_threads(task, &threadv, &threadc);
131	if( kr != KERN_SUCCESS )
132	{
133		BUF_INFO2(PERF_PET_ERROR, ERR_THREAD, kr);
134		return;
135	}
136
137	pet_sample_thread_list( threadc, threadv );
138
139	chudxnu_free_thread_list(&threadv, &threadc);
140}
141
142/* given a list of tasks, sample all the threads in 'em */
143static void
144pet_sample_task_list( int taskc, task_array_t taskv  )
145{
146	int i;
147
148	for( i = 0; i < taskc; i++ )
149	{
150		kern_return_t kr;
151		task_t task = taskv[i];
152
153		/* FIXME: necessary? old code did this, our hacky
154		 * filtering code does, too
155		 */
156		if(!task) {
157			continue;
158		}
159
160		/* try and stop any task other than the kernel task */
161		if( task != kernel_task )
162		{
163			kr = task_suspend_internal( task );
164
165			/* try the next task */
166			if( kr != KERN_SUCCESS )
167				continue;
168		}
169
170		/* sample it */
171		pet_sample_task( task );
172
173		/* if it wasn't the kernel, resume it */
174		if( task != kernel_task )
175			(void) task_resume_internal(task);
176	}
177}
178
179static void
180pet_sample_all_tasks(void)
181{
182	task_array_t taskv = NULL;
183	mach_msg_type_number_t taskc = 0;
184	kern_return_t kr;
185
186	kr = chudxnu_all_tasks(&taskv, &taskc);
187
188	if( kr != KERN_SUCCESS )
189	{
190		BUF_INFO2(PERF_PET_ERROR, ERR_TASK, kr);
191		return;
192	}
193
194	pet_sample_task_list( taskc, taskv );
195	chudxnu_free_task_list(&taskv, &taskc);
196}
197
198#if 0
199static void
200pet_sample_pid_filter(void)
201{
202	task_t *taskv = NULL;
203	int *pidv, pidc, i;
204	vm_size_t asize;
205
206	kperf_filter_pid_list( &pidc, &pidv );
207	if( pidc == 0  )
208	{
209		BUF_INFO2(PERF_PET_ERROR, ERR_PID, 0);
210		return;
211	}
212
213	asize = pidc * sizeof(task_t);
214	taskv = kalloc( asize );
215
216	if( taskv == NULL )
217		goto out;
218
219	/* convert the pid list into a task list */
220	for( i = 0; i < pidc; i++ )
221	{
222		int pid = pidv[i];
223		if( pid == -1 )
224			taskv[i] = NULL;
225		else
226			taskv[i] = chudxnu_task_for_pid(pid);
227	}
228
229	/* now sample the task list */
230	pet_sample_task_list( pidc, taskv );
231
232	kfree(taskv, asize);
233
234out:
235	kperf_filter_free_pid_list( &pidc, &pidv );
236}
237#endif
238
239/* do the pet sample */
240static void
241pet_work_unit(void)
242{
243	int pid_filter;
244
245	/* check if we're filtering on pid  */
246	// pid_filter = kperf_filter_on_pid();
247	pid_filter = 0;  // FIXME
248
249#if 0
250	if( pid_filter )
251	{
252		BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_START, 1);
253		pet_sample_pid_filter();
254	}
255	else
256#endif
257	{
258		/* otherwise filter everything */
259		BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_START, 0);
260		pet_sample_all_tasks();
261	}
262
263	BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_END, 0);
264
265}
266
267/* sleep indefinitely */
268static void
269pet_idle(void)
270{
271	IOLockSleep(pet_lock, &pet_actionid, THREAD_UNINT);
272}
273
274/* loop between sampling and waiting */
275static void
276pet_thread_loop( __unused void *param, __unused wait_result_t wr )
277{
278	uint64_t work_unit_ticks;
279
280	BUF_INFO1(PERF_PET_THREAD, 1);
281
282	IOLockLock(pet_lock);
283	while(1)
284	{
285		BUF_INFO1(PERF_PET_IDLE, 0);
286		pet_idle();
287
288		BUF_INFO1(PERF_PET_RUN, 0);
289
290		/* measure how long the work unit takes */
291		work_unit_ticks = mach_absolute_time();
292		pet_work_unit();
293		work_unit_ticks = mach_absolute_time() - work_unit_ticks;
294
295		/* re-program the timer */
296		kperf_timer_pet_set( pet_timerid, work_unit_ticks );
297
298		/* FIXME: break here on a condition? */
299	}
300}
301
302/* make sure the thread takes a new period value */
303void
304kperf_pet_timer_config( unsigned timerid, unsigned actionid )
305{
306	if( !pet_lock )
307		return;
308
309	/* hold the lock so pet thread doesn't run while we do this */
310	IOLockLock(pet_lock);
311
312	BUF_INFO1(PERF_PET_THREAD, 3);
313
314	/* set values */
315	pet_timerid = timerid;
316	pet_actionid = actionid;
317
318	/* done */
319	IOLockUnlock(pet_lock);
320}
321
322/* make the thread run! */
323void
324kperf_pet_thread_go(void)
325{
326	if( !pet_lock )
327		return;
328
329	/* Make the thread go */
330	IOLockWakeup(pet_lock, &pet_actionid, FALSE);
331}
332
333
334/* wait for the pet thread to finish a run */
335void
336kperf_pet_thread_wait(void)
337{
338	if( !pet_lock )
339		return;
340
341	/* acquire the lock to ensure the thread is parked. */
342	IOLockLock(pet_lock);
343	IOLockUnlock(pet_lock);
344}
345
346/* keep the pet thread around while we run */
347int
348kperf_pet_init(void)
349{
350	kern_return_t rc;
351	thread_t t;
352
353	if( pet_thread != NULL )
354		return 0;
355
356	/* make the sync poing */
357	pet_lock = IOLockAlloc();
358	if( pet_lock == NULL )
359		return ENOMEM;
360
361	/* create the thread */
362	BUF_INFO1(PERF_PET_THREAD, 0);
363	rc = kernel_thread_start( pet_thread_loop, NULL, &t );
364	if( rc != KERN_SUCCESS )
365	{
366		IOLockFree( pet_lock );
367		pet_lock = NULL;
368		return ENOMEM;
369	}
370
371	/* OK! */
372	return 0;
373}
374
375int
376kperf_get_pet_idle_rate( void )
377{
378	return pet_idle_rate;
379}
380
381void
382kperf_set_pet_idle_rate( int val )
383{
384	pet_idle_rate = val;
385}
386