1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
34#include <kperf/buffer.h>
35#include <kern/thread.h>
36
37#include <kern/kpc.h>
38
39#include <kperf/kperf.h>
40#include <kperf/sample.h>
41#include <kperf/context.h>
42#include <kperf/action.h>
43
44#include <chud/chud_xnu.h>
45
46uint32_t kpc_actionid[KPC_MAX_COUNTERS];
47
48/* locks */
49static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
50static lck_grp_t      *kpc_config_lckgrp = NULL;
51static lck_mtx_t       kpc_config_lock;
52
53void kpc_arch_init(void);
54void
55kpc_arch_init(void)
56{
57	kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
58	kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
59	lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
60}
61
62uint32_t
63kpc_get_running(void)
64{
65	uint32_t cur_state = 0;
66
67	if( kpc_is_running_fixed() )
68		cur_state |= KPC_CLASS_FIXED_MASK;
69
70	if( kpc_is_running_configurable() )
71		cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
72
73	return cur_state;
74}
75
76/* generic counter reading function */
77int
78kpc_get_cpu_counters( boolean_t all_cpus, uint32_t classes,
79                      int *curcpu, uint64_t *buf  )
80{
81	int r, enabled, offset = 0;
82
83	(void) all_cpus;
84
85	/* grab counters and CPU number as close as possible */
86	enabled = ml_set_interrupts_enabled(FALSE);
87
88	/* and the CPU ID */
89	if( curcpu )
90		*curcpu = current_processor()->cpu_id;
91
92	if( classes & KPC_CLASS_FIXED_MASK )
93	{
94		kpc_get_fixed_counters( &buf[offset] );
95
96		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
97	}
98
99	if( classes & KPC_CLASS_CONFIGURABLE_MASK )
100	{
101		r = kpc_get_configurable_counters(  &buf[offset] );
102
103		offset += kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
104	}
105
106	ml_set_interrupts_enabled(enabled);
107
108	return offset;
109}
110
111int
112kpc_get_shadow_counters( boolean_t all_cpus, uint32_t classes,
113                         int *curcpu, uint64_t *buf )
114{
115	int enabled, count, offset = 0;
116
117	(void)all_cpus;
118
119	enabled = ml_set_interrupts_enabled(FALSE);
120
121	if( curcpu )
122		*curcpu = current_processor()->cpu_id;
123
124	if( classes & KPC_CLASS_FIXED_MASK )
125	{
126		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
127
128		memcpy( &buf[offset], &FIXED_SHADOW(0), count*sizeof(uint64_t) );
129
130		offset += count;
131	}
132
133	if( classes & KPC_CLASS_CONFIGURABLE_MASK )
134	{
135		count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
136
137		memcpy( &buf[offset], &CONFIGURABLE_SHADOW(0), count*sizeof(uint64_t) );
138
139		offset += count;
140	}
141
142	ml_set_interrupts_enabled(enabled);
143
144	return offset;
145}
146
147uint32_t
148kpc_get_counter_count(uint32_t classes)
149{
150	int count = 0;
151
152	if( classes & KPC_CLASS_FIXED_MASK )
153		count += kpc_fixed_count();
154
155	if( classes & KPC_CLASS_CONFIGURABLE_MASK )
156		count += kpc_configurable_count() ;
157
158	return count;
159}
160
161uint32_t
162kpc_get_config_count(uint32_t classes)
163{
164	int count = 0;
165
166	if( classes & KPC_CLASS_FIXED_MASK )
167		count += kpc_fixed_config_count();
168
169	if( classes & KPC_CLASS_CONFIGURABLE_MASK )
170		count += kpc_configurable_config_count();
171
172	return count;
173}
174
175int
176kpc_get_config(uint32_t classes, kpc_config_t *current_config)
177{
178	int count = 0;
179
180	if( classes & KPC_CLASS_FIXED_MASK )
181	{
182		kpc_get_fixed_config(&current_config[count]);
183		count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
184	}
185
186	if( classes & KPC_CLASS_CONFIGURABLE_MASK )
187	{
188		kpc_get_configurable_config(&current_config[count]);
189		count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
190	}
191
192	return 0;
193}
194
195int
196kpc_set_config(uint32_t classes, kpc_config_t *configv)
197{
198	struct kpc_config_remote mp_config;
199
200	lck_mtx_lock(&kpc_config_lock);
201
202	mp_config.classes = classes;
203	mp_config.configv = configv;
204
205	kpc_set_config_arch( &mp_config );
206
207	lck_mtx_unlock(&kpc_config_lock);
208
209	return 0;
210}
211
212/* allocate a buffer big enough for all the counters */
213uint64_t *
214kpc_counterbuf_alloc(void)
215{
216	uint64_t *buf;
217
218	buf = kalloc(KPC_MAX_COUNTERS * sizeof(uint64_t));
219	if(buf)
220		bzero( buf, KPC_MAX_COUNTERS * sizeof(uint64_t) );
221
222	return buf;
223}
224
225void
226kpc_counterbuf_free(uint64_t *buf)
227{
228	if( buf )
229		kfree(buf, KPC_MAX_COUNTERS * sizeof(uint64_t));
230}
231
232void kpc_sample_kperf(uint32_t actionid)
233{
234	struct kperf_sample sbuf;
235	struct kperf_context ctx;
236	task_t task = NULL;
237	int r;
238
239	BUF_DATA1(PERF_KPC_HNDLR | DBG_FUNC_START, 0);
240
241	ctx.cur_pid = 0;
242	ctx.cur_thread = current_thread();
243
244	task = chudxnu_task_for_thread(ctx.cur_thread);
245	if (task)
246		ctx.cur_pid = chudxnu_pid_for_task(task);
247
248	ctx.trigger_type = TRIGGER_TYPE_PMI;
249	ctx.trigger_id = 0;
250
251	r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
252
253	BUF_INFO1(PERF_KPC_HNDLR | DBG_FUNC_END, r);
254}
255
256
257int kpc_set_period(uint32_t classes, uint64_t *val)
258{
259	struct kpc_config_remote mp_config;
260
261	lck_mtx_lock(&kpc_config_lock);
262
263#ifndef FIXED_COUNTER_SHADOW
264	if (classes & KPC_CLASS_FIXED_MASK) {
265		lck_mtx_unlock(&kpc_config_lock);
266		return -1;
267	}
268#endif
269
270	kprintf("setting period %u\n", classes);
271
272	mp_config.classes = classes;
273	mp_config.configv = val;
274
275	kpc_set_period_arch( &mp_config );
276
277	lck_mtx_unlock(&kpc_config_lock);
278
279	return 0;
280}
281
282
283int kpc_get_period(uint32_t classes, uint64_t *val)
284{
285	uint32_t i, count, offset = 0;
286
287	lck_mtx_lock(&kpc_config_lock);
288
289	if (classes & KPC_CLASS_FIXED_MASK) {
290		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
291
292		/* convert reload values to periods */
293		for (i = 0; i < count; i++)
294			val[i] = kpc_fixed_max() - FIXED_RELOAD(i);
295
296		offset += count;
297	}
298
299	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
300		count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
301
302		/* convert reload values to periods */
303		for (i = 0; i < count; i++)
304			val[i + offset] = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
305	}
306
307	lck_mtx_unlock(&kpc_config_lock);
308
309	return 0;
310}
311
312int kpc_set_actionid(uint32_t classes, uint32_t *val)
313{
314	uint32_t count, offset = 0;
315
316	/* NOTE: what happens if a pmi occurs while actionids are being
317	 * set is undefined. */
318	lck_mtx_lock(&kpc_config_lock);
319
320	if (classes & KPC_CLASS_FIXED_MASK) {
321		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
322
323		memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
324
325		offset += count;
326	}
327
328	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
329		count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
330
331		memcpy(&CONFIGURABLE_ACTIONID(0), &val[offset], count*sizeof(uint32_t));
332	}
333
334	lck_mtx_unlock(&kpc_config_lock);
335
336	return 0;
337}
338
339int kpc_get_actionid(uint32_t classes, uint32_t *val)
340{
341	uint32_t count, offset = 0;
342
343	lck_mtx_lock(&kpc_config_lock);
344
345	if (classes & KPC_CLASS_FIXED_MASK) {
346		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
347
348		memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
349
350		offset += count;
351	}
352
353	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
354		count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
355
356		memcpy(&val[offset], &CONFIGURABLE_ACTIONID(0), count*sizeof(uint32_t));
357	}
358
359	lck_mtx_unlock(&kpc_config_lock);
360
361	return 0;
362
363}
364
365