cpc_subr.c revision 3434:5142e1d7d0bc
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * x86-specific routines used by the CPU Performance counter driver.
30 */
31
32#include <sys/types.h>
33#include <sys/time.h>
34#include <sys/atomic.h>
35#include <sys/regset.h>
36#include <sys/privregs.h>
37#include <sys/x86_archext.h>
38#include <sys/cpuvar.h>
39#include <sys/machcpuvar.h>
40#include <sys/archsystm.h>
41#include <sys/cpc_pcbe.h>
42#include <sys/cpc_impl.h>
43#include <sys/x_call.h>
44#include <sys/cmn_err.h>
45#include <sys/cmt.h>
46#include <sys/spl.h>
47#include <io/pcplusmp/apic.h>
48
49static const uint64_t allstopped = 0;
50static kcpc_ctx_t *(*overflow_intr_handler)(caddr_t);
51
52int kcpc_hw_overflow_intr_installed;		/* set by APIC code */
53extern kcpc_ctx_t *kcpc_overflow_intr(caddr_t arg, uint64_t bitmap);
54
55extern int kcpc_counts_include_idle; /* Project Private /etc/system variable */
56
57void (*kcpc_hw_enable_cpc_intr)(void);		/* set by APIC code */
58
59int
60kcpc_hw_add_ovf_intr(kcpc_ctx_t *(*handler)(caddr_t))
61{
62	if (x86_type != X86_TYPE_P6)
63		return (0);
64	overflow_intr_handler = handler;
65	return (ipltospl(APIC_PCINT_IPL));
66}
67
68void
69kcpc_hw_rem_ovf_intr(void)
70{
71	overflow_intr_handler = NULL;
72}
73
74/*
75 * Hook used on P4 systems to catch online/offline events.
76 */
77/*ARGSUSED*/
78static int
79kcpc_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
80{
81	pg_cmt_t	*chip_pg;
82	int		active_cpus_cnt;
83
84	if (what != CPU_ON)
85		return (0);
86
87	/*
88	 * If any CPU-bound contexts exist, we don't need to invalidate
89	 * anything, as no per-LWP contexts can coexist.
90	 */
91	if (kcpc_cpuctx)
92		return (0);
93
94	/*
95	 * If this chip now has more than 1 active cpu, we must invalidate all
96	 * contexts in the system.
97	 */
98	chip_pg = (pg_cmt_t *)pghw_find_pg(cpu[cpuid], PGHW_CHIP);
99	if (chip_pg != NULL) {
100		active_cpus_cnt = GROUP_SIZE(&chip_pg->cmt_cpus_actv);
101		if (active_cpus_cnt > 1)
102			kcpc_invalidate_all();
103	}
104
105	return (0);
106}
107
108static kmutex_t cpu_setup_lock;	/* protects setup_registered */
109static int setup_registered;
110
111void
112kcpc_hw_init(cpu_t *cp)
113{
114	kthread_t		*t = cp->cpu_idle_thread;
115
116	if (x86_feature & X86_HTT) {
117		mutex_enter(&cpu_setup_lock);
118		if (setup_registered == 0) {
119			mutex_enter(&cpu_lock);
120			register_cpu_setup_func(kcpc_cpu_setup, NULL);
121			mutex_exit(&cpu_lock);
122			setup_registered = 1;
123		}
124		mutex_exit(&cpu_setup_lock);
125	}
126
127	mutex_init(&cp->cpu_cpc_ctxlock, "cpu_cpc_ctxlock", MUTEX_DEFAULT, 0);
128
129	if (kcpc_counts_include_idle)
130		return;
131
132	installctx(t, cp, kcpc_idle_save, kcpc_idle_restore,
133	    NULL, NULL, NULL, NULL);
134}
135
136#define	BITS(v, u, l)	\
137	(((v) >> (l)) & ((1 << (1 + (u) - (l))) - 1))
138
139#define	PCBE_NAMELEN 30	/* Enough Room for pcbe.manuf.model.family.stepping */
140
141/*
142 * Examine the processor and load an appropriate PCBE.
143 */
144int
145kcpc_hw_load_pcbe(void)
146{
147	return (kcpc_pcbe_tryload(cpuid_getvendorstr(CPU), cpuid_getfamily(CPU),
148	    cpuid_getmodel(CPU), cpuid_getstep(CPU)));
149}
150
151static int
152kcpc_remotestop_func(void)
153{
154	ASSERT(CPU->cpu_cpc_ctx != NULL);
155	pcbe_ops->pcbe_allstop();
156	atomic_or_uint(&CPU->cpu_cpc_ctx->kc_flags, KCPC_CTX_INVALID_STOPPED);
157
158	return (0);
159}
160
161/*
162 * Ensure the counters are stopped on the given processor.
163 *
164 * Callers must ensure kernel preemption is disabled.
165 */
166void
167kcpc_remote_stop(cpu_t *cp)
168{
169	cpuset_t set;
170
171	CPUSET_ZERO(set);
172
173	CPUSET_ADD(set, cp->cpu_id);
174
175	xc_sync(0, 0, 0, X_CALL_HIPRI, set, (xc_func_t)kcpc_remotestop_func);
176}
177
178/*
179 * Called by the generic framework to check if it's OK to bind a set to a CPU.
180 */
181int
182kcpc_hw_cpu_hook(processorid_t cpuid, ulong_t *kcpc_cpumap)
183{
184	cpu_t		*cpu, *p;
185	pg_t		*chip_pg;
186	pg_cpu_itr_t	itr;
187
188	if ((x86_feature & X86_HTT) == 0)
189		return (0);
190
191	/*
192	 * Only one logical CPU on each Pentium 4 HT CPU may be bound to at
193	 * once.
194	 *
195	 * This loop is protected by holding cpu_lock, in order to properly
196	 * access the cpu_t of the desired cpu.
197	 */
198	mutex_enter(&cpu_lock);
199	if ((cpu = cpu_get(cpuid)) == NULL) {
200		mutex_exit(&cpu_lock);
201		return (-1);
202	}
203
204	chip_pg = (pg_t *)pghw_find_pg(cpu, PGHW_CHIP);
205
206	PG_CPU_ITR_INIT(chip_pg, itr);
207	while ((p = pg_cpu_next(&itr)) != NULL) {
208		if (p == cpu)
209			continue;
210		if (BT_TEST(kcpc_cpumap, p->cpu_id)) {
211			mutex_exit(&cpu_lock);
212			return (-1);
213		}
214	}
215
216	mutex_exit(&cpu_lock);
217	return (0);
218}
219
220/*
221 * Called by the generic framework to check if it's OK to bind a set to an LWP.
222 */
223int
224kcpc_hw_lwp_hook(void)
225{
226	pg_cmt_t	*chip;
227	group_t		*chips;
228	group_iter_t	i;
229
230	if ((x86_feature & X86_HTT) == 0)
231		return (0);
232
233	/*
234	 * Only one CPU per chip may be online.
235	 */
236	mutex_enter(&cpu_lock);
237
238	chips = pghw_set_lookup(PGHW_CHIP);
239	if (chips == NULL) {
240		mutex_exit(&cpu_lock);
241		return (0);
242	}
243
244	group_iter_init(&i);
245	while ((chip = group_iterate(chips, &i)) != NULL) {
246		if (GROUP_SIZE(&chip->cmt_cpus_actv) > 1) {
247			mutex_exit(&cpu_lock);
248			return (-1);
249		}
250	}
251
252	mutex_exit(&cpu_lock);
253	return (0);
254}
255