Deleted Added
full compact
hwpmc_mod.c (185363) hwpmc_mod.c (186037)
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_mod.c 185363 2008-11-27 09:00:47Z jkoshy $");
33__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_mod.c 186037 2008-12-13 13:07:12Z jkoshy $");
34
35#include <sys/param.h>
36#include <sys/eventhandler.h>
37#include <sys/jail.h>
38#include <sys/kernel.h>
39#include <sys/kthread.h>
40#include <sys/limits.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/module.h>
44#include <sys/mutex.h>
45#include <sys/pmc.h>
46#include <sys/pmckern.h>
47#include <sys/pmclog.h>
48#include <sys/priv.h>
49#include <sys/proc.h>
50#include <sys/queue.h>
51#include <sys/resourcevar.h>
52#include <sys/sched.h>
53#include <sys/signalvar.h>
54#include <sys/smp.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/sysent.h>
58#include <sys/systm.h>
59#include <sys/vnode.h>
60
61#include <sys/linker.h> /* needs to be after <sys/malloc.h> */
62
63#include <machine/atomic.h>
64#include <machine/md_var.h>
65
66/*
67 * Types
68 */
69
70enum pmc_flags {
71 PMC_FLAG_NONE = 0x00, /* do nothing */
72 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
73 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
74};
75
76/*
77 * The offset in sysent where the syscall is allocated.
78 */
79
80static int pmc_syscall_num = NO_SYSCALL;
81struct pmc_cpu **pmc_pcpu; /* per-cpu state */
82pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
83
84#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
85
86struct mtx_pool *pmc_mtxpool;
87static int *pmc_pmcdisp; /* PMC row dispositions */
88
89#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
90#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
91#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
92
93#define PMC_MARK_ROW_FREE(R) do { \
94 pmc_pmcdisp[(R)] = 0; \
95} while (0)
96
97#define PMC_MARK_ROW_STANDALONE(R) do { \
98 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
99 __LINE__)); \
100 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
101 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
102 ("[pmc,%d] row disposition error", __LINE__)); \
103} while (0)
104
105#define PMC_UNMARK_ROW_STANDALONE(R) do { \
106 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
107 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
108 __LINE__)); \
109} while (0)
110
111#define PMC_MARK_ROW_THREAD(R) do { \
112 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
113 __LINE__)); \
114 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
115} while (0)
116
117#define PMC_UNMARK_ROW_THREAD(R) do { \
118 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
119 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
120 __LINE__)); \
121} while (0)
122
123
124/* various event handlers */
125static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
126
127/* Module statistics */
128struct pmc_op_getdriverstats pmc_stats;
129
130/* Machine/processor dependent operations */
131static struct pmc_mdep *md;
132
133/*
134 * Hash tables mapping owner processes and target threads to PMCs.
135 */
136
137struct mtx pmc_processhash_mtx; /* spin mutex */
138static u_long pmc_processhashmask;
139static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
140
141/*
142 * Hash table of PMC owner descriptors. This table is protected by
143 * the shared PMC "sx" lock.
144 */
145
146static u_long pmc_ownerhashmask;
147static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
148
149/*
150 * List of PMC owners with system-wide sampling PMCs.
151 */
152
153static LIST_HEAD(, pmc_owner) pmc_ss_owners;
154
155
156/*
157 * A map of row indices to classdep structures.
158 */
159static struct pmc_classdep **pmc_rowindex_to_classdep;
160
161/*
162 * Prototypes
163 */
164
165#ifdef DEBUG
166static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
167static int pmc_debugflags_parse(char *newstr, char *fence);
168#endif
169
170static int load(struct module *module, int cmd, void *arg);
171static int pmc_attach_process(struct proc *p, struct pmc *pm);
172static struct pmc *pmc_allocate_pmc_descriptor(void);
173static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
174static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
175static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
176 int cpu);
177static int pmc_can_attach(struct pmc *pm, struct proc *p);
178static void pmc_capture_user_callchain(int cpu, struct trapframe *tf);
179static void pmc_cleanup(void);
180static int pmc_detach_process(struct proc *p, struct pmc *pm);
181static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
182 int flags);
183static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
184static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
185static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
186static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
187 pmc_id_t pmc);
188static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
189 uint32_t mode);
190static void pmc_force_context_switch(void);
191static void pmc_link_target_process(struct pmc *pm,
192 struct pmc_process *pp);
193static void pmc_log_all_process_mappings(struct pmc_owner *po);
194static void pmc_log_kernel_mappings(struct pmc *pm);
195static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
196static void pmc_maybe_remove_owner(struct pmc_owner *po);
197static void pmc_process_csw_in(struct thread *td);
198static void pmc_process_csw_out(struct thread *td);
199static void pmc_process_exit(void *arg, struct proc *p);
200static void pmc_process_fork(void *arg, struct proc *p1,
201 struct proc *p2, int n);
202static void pmc_process_samples(int cpu);
203static void pmc_release_pmc_descriptor(struct pmc *pmc);
204static void pmc_remove_owner(struct pmc_owner *po);
205static void pmc_remove_process_descriptor(struct pmc_process *pp);
206static void pmc_restore_cpu_binding(struct pmc_binding *pb);
207static void pmc_save_cpu_binding(struct pmc_binding *pb);
208static void pmc_select_cpu(int cpu);
209static int pmc_start(struct pmc *pm);
210static int pmc_stop(struct pmc *pm);
211static int pmc_syscall_handler(struct thread *td, void *syscall_args);
212static void pmc_unlink_target_process(struct pmc *pmc,
213 struct pmc_process *pp);
214
215/*
216 * Kernel tunables and sysctl(8) interface.
217 */
218
219SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
220
221static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
222TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
223SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
224 &pmc_callchaindepth, 0, "depth of call chain records");
225
226#ifdef DEBUG
227struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
228char pmc_debugstr[PMC_DEBUG_STRSIZE];
229TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
230 sizeof(pmc_debugstr));
231SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
232 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
233 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
234#endif
235
236/*
237 * kern.hwpmc.hashrows -- determines the number of rows in the
238 * of the hash table used to look up threads
239 */
240
241static int pmc_hashsize = PMC_HASH_SIZE;
242TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
243SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
244 &pmc_hashsize, 0, "rows in hash tables");
245
246/*
247 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
248 */
249
250static int pmc_nsamples = PMC_NSAMPLES;
251TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
252SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
253 &pmc_nsamples, 0, "number of PC samples per CPU");
254
255
256/*
257 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
258 */
259
260static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
261TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
262SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
263 &pmc_mtxpool_size, 0, "size of spin mutex pool");
264
265
266/*
267 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
268 * allocate system-wide PMCs.
269 *
270 * Allowing unprivileged processes to allocate system PMCs is convenient
271 * if system-wide measurements need to be taken concurrently with other
272 * per-process measurements. This feature is turned off by default.
273 */
274
275static int pmc_unprivileged_syspmcs = 0;
276TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
277SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
278 &pmc_unprivileged_syspmcs, 0,
279 "allow unprivileged process to allocate system PMCs");
280
281/*
282 * Hash function. Discard the lower 2 bits of the pointer since
283 * these are always zero for our uses. The hash multiplier is
284 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
285 */
286
287#if LONG_BIT == 64
288#define _PMC_HM 11400714819323198486u
289#elif LONG_BIT == 32
290#define _PMC_HM 2654435769u
291#else
292#error Must know the size of 'long' to compile
293#endif
294
295#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
296
297/*
298 * Syscall structures
299 */
300
301/* The `sysent' for the new syscall */
302static struct sysent pmc_sysent = {
303 2, /* sy_narg */
304 pmc_syscall_handler /* sy_call */
305};
306
307static struct syscall_module_data pmc_syscall_mod = {
308 load,
309 NULL,
310 &pmc_syscall_num,
311 &pmc_sysent,
312 { 0, NULL }
313};
314
315static moduledata_t pmc_mod = {
316 PMC_MODULE_NAME,
317 syscall_module_handler,
318 &pmc_syscall_mod
319};
320
321DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
322MODULE_VERSION(pmc, PMC_VERSION);
323
324#ifdef DEBUG
325enum pmc_dbgparse_state {
326 PMCDS_WS, /* in whitespace */
327 PMCDS_MAJOR, /* seen a major keyword */
328 PMCDS_MINOR
329};
330
331static int
332pmc_debugflags_parse(char *newstr, char *fence)
333{
334 char c, *p, *q;
335 struct pmc_debugflags *tmpflags;
336 int error, found, *newbits, tmp;
337 size_t kwlen;
338
339 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
340
341 p = newstr;
342 error = 0;
343
344 for (; p < fence && (c = *p); p++) {
345
346 /* skip white space */
347 if (c == ' ' || c == '\t')
348 continue;
349
350 /* look for a keyword followed by "=" */
351 for (q = p; p < fence && (c = *p) && c != '='; p++)
352 ;
353 if (c != '=') {
354 error = EINVAL;
355 goto done;
356 }
357
358 kwlen = p - q;
359 newbits = NULL;
360
361 /* lookup flag group name */
362#define DBG_SET_FLAG_MAJ(S,F) \
363 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
364 newbits = &tmpflags->pdb_ ## F;
365
366 DBG_SET_FLAG_MAJ("cpu", CPU);
367 DBG_SET_FLAG_MAJ("csw", CSW);
368 DBG_SET_FLAG_MAJ("logging", LOG);
369 DBG_SET_FLAG_MAJ("module", MOD);
370 DBG_SET_FLAG_MAJ("md", MDP);
371 DBG_SET_FLAG_MAJ("owner", OWN);
372 DBG_SET_FLAG_MAJ("pmc", PMC);
373 DBG_SET_FLAG_MAJ("process", PRC);
374 DBG_SET_FLAG_MAJ("sampling", SAM);
375
376 if (newbits == NULL) {
377 error = EINVAL;
378 goto done;
379 }
380
381 p++; /* skip the '=' */
382
383 /* Now parse the individual flags */
384 tmp = 0;
385 newflag:
386 for (q = p; p < fence && (c = *p); p++)
387 if (c == ' ' || c == '\t' || c == ',')
388 break;
389
390 /* p == fence or c == ws or c == "," or c == 0 */
391
392 if ((kwlen = p - q) == 0) {
393 *newbits = tmp;
394 continue;
395 }
396
397 found = 0;
398#define DBG_SET_FLAG_MIN(S,F) \
399 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
400 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
401
402 /* a '*' denotes all possible flags in the group */
403 if (kwlen == 1 && *q == '*')
404 tmp = found = ~0;
405 /* look for individual flag names */
406 DBG_SET_FLAG_MIN("allocaterow", ALR);
407 DBG_SET_FLAG_MIN("allocate", ALL);
408 DBG_SET_FLAG_MIN("attach", ATT);
409 DBG_SET_FLAG_MIN("bind", BND);
410 DBG_SET_FLAG_MIN("config", CFG);
411 DBG_SET_FLAG_MIN("exec", EXC);
412 DBG_SET_FLAG_MIN("exit", EXT);
413 DBG_SET_FLAG_MIN("find", FND);
414 DBG_SET_FLAG_MIN("flush", FLS);
415 DBG_SET_FLAG_MIN("fork", FRK);
416 DBG_SET_FLAG_MIN("getbuf", GTB);
417 DBG_SET_FLAG_MIN("hook", PMH);
418 DBG_SET_FLAG_MIN("init", INI);
419 DBG_SET_FLAG_MIN("intr", INT);
420 DBG_SET_FLAG_MIN("linktarget", TLK);
421 DBG_SET_FLAG_MIN("mayberemove", OMR);
422 DBG_SET_FLAG_MIN("ops", OPS);
423 DBG_SET_FLAG_MIN("read", REA);
424 DBG_SET_FLAG_MIN("register", REG);
425 DBG_SET_FLAG_MIN("release", REL);
426 DBG_SET_FLAG_MIN("remove", ORM);
427 DBG_SET_FLAG_MIN("sample", SAM);
428 DBG_SET_FLAG_MIN("scheduleio", SIO);
429 DBG_SET_FLAG_MIN("select", SEL);
430 DBG_SET_FLAG_MIN("signal", SIG);
431 DBG_SET_FLAG_MIN("swi", SWI);
432 DBG_SET_FLAG_MIN("swo", SWO);
433 DBG_SET_FLAG_MIN("start", STA);
434 DBG_SET_FLAG_MIN("stop", STO);
435 DBG_SET_FLAG_MIN("syscall", PMS);
436 DBG_SET_FLAG_MIN("unlinktarget", TUL);
437 DBG_SET_FLAG_MIN("write", WRI);
438 if (found == 0) {
439 /* unrecognized flag name */
440 error = EINVAL;
441 goto done;
442 }
443
444 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
445 *newbits = tmp;
446 continue;
447 }
448
449 p++;
450 goto newflag;
451 }
452
453 /* save the new flag set */
454 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
455
456 done:
457 free(tmpflags, M_PMC);
458 return error;
459}
460
461static int
462pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
463{
464 char *fence, *newstr;
465 int error;
466 unsigned int n;
467
468 (void) arg1; (void) arg2; /* unused parameters */
469
470 n = sizeof(pmc_debugstr);
471 newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
472 (void) strlcpy(newstr, pmc_debugstr, n);
473
474 error = sysctl_handle_string(oidp, newstr, n, req);
475
476 /* if there is a new string, parse and copy it */
477 if (error == 0 && req->newptr != NULL) {
478 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
479 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
480 (void) strlcpy(pmc_debugstr, newstr,
481 sizeof(pmc_debugstr));
482 }
483
484 free(newstr, M_PMC);
485
486 return error;
487}
488#endif
489
490/*
491 * Map a row index to a classdep structure and return the adjusted row
492 * index for the PMC class index.
493 */
494static struct pmc_classdep *
495pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
496{
497 struct pmc_classdep *pcd;
498
499 (void) md;
500
501 KASSERT(ri >= 0 && ri < md->pmd_npmc,
502 ("[pmc,%d] illegal row-index %d", __LINE__, ri));
503
504 pcd = pmc_rowindex_to_classdep[ri];
505
506 KASSERT(pcd != NULL,
507 ("[amd,%d] ri %d null pcd", __LINE__, ri));
508
509 *adjri = ri - pcd->pcd_ri;
510
511 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
512 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
513
514 return (pcd);
515}
516
517/*
518 * Concurrency Control
519 *
520 * The driver manages the following data structures:
521 *
522 * - target process descriptors, one per target process
523 * - owner process descriptors (and attached lists), one per owner process
524 * - lookup hash tables for owner and target processes
525 * - PMC descriptors (and attached lists)
526 * - per-cpu hardware state
527 * - the 'hook' variable through which the kernel calls into
528 * this module
529 * - the machine hardware state (managed by the MD layer)
530 *
531 * These data structures are accessed from:
532 *
533 * - thread context-switch code
534 * - interrupt handlers (possibly on multiple cpus)
535 * - kernel threads on multiple cpus running on behalf of user
536 * processes doing system calls
537 * - this driver's private kernel threads
538 *
539 * = Locks and Locking strategy =
540 *
541 * The driver uses four locking strategies for its operation:
542 *
543 * - The global SX lock "pmc_sx" is used to protect internal
544 * data structures.
545 *
546 * Calls into the module by syscall() start with this lock being
547 * held in exclusive mode. Depending on the requested operation,
548 * the lock may be downgraded to 'shared' mode to allow more
549 * concurrent readers into the module. Calls into the module from
550 * other parts of the kernel acquire the lock in shared mode.
551 *
552 * This SX lock is held in exclusive mode for any operations that
553 * modify the linkages between the driver's internal data structures.
554 *
555 * The 'pmc_hook' function pointer is also protected by this lock.
556 * It is only examined with the sx lock held in exclusive mode. The
557 * kernel module is allowed to be unloaded only with the sx lock held
558 * in exclusive mode. In normal syscall handling, after acquiring the
559 * pmc_sx lock we first check that 'pmc_hook' is non-null before
560 * proceeding. This prevents races between the thread unloading the module
561 * and other threads seeking to use the module.
562 *
563 * - Lookups of target process structures and owner process structures
564 * cannot use the global "pmc_sx" SX lock because these lookups need
565 * to happen during context switches and in other critical sections
566 * where sleeping is not allowed. We protect these lookup tables
567 * with their own private spin-mutexes, "pmc_processhash_mtx" and
568 * "pmc_ownerhash_mtx".
569 *
570 * - Interrupt handlers work in a lock free manner. At interrupt
571 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
572 * when the PMC was started. If this pointer is NULL, the interrupt
573 * is ignored after updating driver statistics. We ensure that this
574 * pointer is set (using an atomic operation if necessary) before the
575 * PMC hardware is started. Conversely, this pointer is unset atomically
576 * only after the PMC hardware is stopped.
577 *
578 * We ensure that everything needed for the operation of an
579 * interrupt handler is available without it needing to acquire any
580 * locks. We also ensure that a PMC's software state is destroyed only
581 * after the PMC is taken off hardware (on all CPUs).
582 *
583 * - Context-switch handling with process-private PMCs needs more
584 * care.
585 *
586 * A given process may be the target of multiple PMCs. For example,
587 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
588 * while the target process is running on another. A PMC could also
589 * be getting released because its owner is exiting. We tackle
590 * these situations in the following manner:
591 *
592 * - each target process structure 'pmc_process' has an array
593 * of 'struct pmc *' pointers, one for each hardware PMC.
594 *
595 * - At context switch IN time, each "target" PMC in RUNNING state
596 * gets started on hardware and a pointer to each PMC is copied into
597 * the per-cpu phw array. The 'runcount' for the PMC is
598 * incremented.
599 *
600 * - At context switch OUT time, all process-virtual PMCs are stopped
601 * on hardware. The saved value is added to the PMCs value field
602 * only if the PMC is in a non-deleted state (the PMCs state could
603 * have changed during the current time slice).
604 *
605 * Note that since in-between a switch IN on a processor and a switch
606 * OUT, the PMC could have been released on another CPU. Therefore
607 * context switch OUT always looks at the hardware state to turn
608 * OFF PMCs and will update a PMC's saved value only if reachable
609 * from the target process record.
610 *
611 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
612 * be attached to many processes at the time of the call and could
613 * be active on multiple CPUs).
614 *
615 * We prevent further scheduling of the PMC by marking it as in
616 * state 'DELETED'. If the runcount of the PMC is non-zero then
617 * this PMC is currently running on a CPU somewhere. The thread
618 * doing the PMCRELEASE operation waits by repeatedly doing a
619 * pause() till the runcount comes to zero.
620 *
621 * The contents of a PMC descriptor (struct pmc) are protected using
622 * a spin-mutex. In order to save space, we use a mutex pool.
623 *
624 * In terms of lock types used by witness(4), we use:
625 * - Type "pmc-sx", used by the global SX lock.
626 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
627 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
628 * - Type "pmc-leaf", used for all other spin mutexes.
629 */
630
631/*
632 * save the cpu binding of the current kthread
633 */
634
635static void
636pmc_save_cpu_binding(struct pmc_binding *pb)
637{
638 PMCDBG(CPU,BND,2, "%s", "save-cpu");
639 thread_lock(curthread);
640 pb->pb_bound = sched_is_bound(curthread);
641 pb->pb_cpu = curthread->td_oncpu;
642 thread_unlock(curthread);
643 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
644}
645
646/*
647 * restore the cpu binding of the current thread
648 */
649
650static void
651pmc_restore_cpu_binding(struct pmc_binding *pb)
652{
653 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
654 curthread->td_oncpu, pb->pb_cpu);
655 thread_lock(curthread);
656 if (pb->pb_bound)
657 sched_bind(curthread, pb->pb_cpu);
658 else
659 sched_unbind(curthread);
660 thread_unlock(curthread);
661 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
662}
663
664/*
665 * move execution over the specified cpu and bind it there.
666 */
667
668static void
669pmc_select_cpu(int cpu)
670{
671 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
672 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
673
674 /* Never move to an inactive CPU. */
675 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
676 "CPU %d", __LINE__, cpu));
677
678 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
679 thread_lock(curthread);
680 sched_bind(curthread, cpu);
681 thread_unlock(curthread);
682
683 KASSERT(curthread->td_oncpu == cpu,
684 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
685 cpu, curthread->td_oncpu));
686
687 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
688}
689
690/*
691 * Force a context switch.
692 *
693 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
694 * guaranteed to force a context switch.
695 */
696
697static void
698pmc_force_context_switch(void)
699{
700
701 pause("pmcctx", 1);
702}
703
704/*
705 * Get the file name for an executable. This is a simple wrapper
706 * around vn_fullpath(9).
707 */
708
709static void
710pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
711{
712
713 *fullpath = "unknown";
714 *freepath = NULL;
715 vn_fullpath(curthread, v, fullpath, freepath);
716}
717
718/*
719 * remove an process owning PMCs
720 */
721
722void
723pmc_remove_owner(struct pmc_owner *po)
724{
725 struct pmc *pm, *tmp;
726
727 sx_assert(&pmc_sx, SX_XLOCKED);
728
729 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
730
731 /* Remove descriptor from the owner hash table */
732 LIST_REMOVE(po, po_next);
733
734 /* release all owned PMC descriptors */
735 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
736 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
737 KASSERT(pm->pm_owner == po,
738 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
739
740 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
741 }
742
743 KASSERT(po->po_sscount == 0,
744 ("[pmc,%d] SS count not zero", __LINE__));
745 KASSERT(LIST_EMPTY(&po->po_pmcs),
746 ("[pmc,%d] PMC list not empty", __LINE__));
747
748 /* de-configure the log file if present */
749 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
750 pmclog_deconfigure_log(po);
751}
752
753/*
754 * remove an owner process record if all conditions are met.
755 */
756
757static void
758pmc_maybe_remove_owner(struct pmc_owner *po)
759{
760
761 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
762
763 /*
764 * Remove owner record if
765 * - this process does not own any PMCs
766 * - this process has not allocated a system-wide sampling buffer
767 */
768
769 if (LIST_EMPTY(&po->po_pmcs) &&
770 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
771 pmc_remove_owner(po);
772 pmc_destroy_owner_descriptor(po);
773 }
774}
775
776/*
777 * Add an association between a target process and a PMC.
778 */
779
780static void
781pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
782{
783 int ri;
784 struct pmc_target *pt;
785
786 sx_assert(&pmc_sx, SX_XLOCKED);
787
788 KASSERT(pm != NULL && pp != NULL,
789 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
790 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
791 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
792 __LINE__, pm, pp->pp_proc->p_pid));
793 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
794 ("[pmc,%d] Illegal reference count %d for process record %p",
795 __LINE__, pp->pp_refcnt, (void *) pp));
796
797 ri = PMC_TO_ROWINDEX(pm);
798
799 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
800 pm, ri, pp);
801
802#ifdef DEBUG
803 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
804 if (pt->pt_process == pp)
805 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
806 __LINE__, pp, pm));
807#endif
808
809 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
810 pt->pt_process = pp;
811
812 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
813
814 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
815 (uintptr_t)pm);
816
817 if (pm->pm_owner->po_owner == pp->pp_proc)
818 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
819
820 /*
821 * Initialize the per-process values at this row index.
822 */
823 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
824 pm->pm_sc.pm_reloadcount : 0;
825
826 pp->pp_refcnt++;
827
828}
829
830/*
831 * Removes the association between a target process and a PMC.
832 */
833
834static void
835pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
836{
837 int ri;
838 struct proc *p;
839 struct pmc_target *ptgt;
840
841 sx_assert(&pmc_sx, SX_XLOCKED);
842
843 KASSERT(pm != NULL && pp != NULL,
844 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
845
846 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
847 ("[pmc,%d] Illegal ref count %d on process record %p",
848 __LINE__, pp->pp_refcnt, (void *) pp));
849
850 ri = PMC_TO_ROWINDEX(pm);
851
852 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
853 pm, ri, pp);
854
855 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
856 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
857 ri, pm, pp->pp_pmcs[ri].pp_pmc));
858
859 pp->pp_pmcs[ri].pp_pmc = NULL;
860 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
861
862 /* Remove owner-specific flags */
863 if (pm->pm_owner->po_owner == pp->pp_proc) {
864 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
865 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
866 }
867
868 pp->pp_refcnt--;
869
870 /* Remove the target process from the PMC structure */
871 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
872 if (ptgt->pt_process == pp)
873 break;
874
875 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
876 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
877
878 LIST_REMOVE(ptgt, pt_next);
879 free(ptgt, M_PMC);
880
881 /* if the PMC now lacks targets, send the owner a SIGIO */
882 if (LIST_EMPTY(&pm->pm_targets)) {
883 p = pm->pm_owner->po_owner;
884 PROC_LOCK(p);
885 psignal(p, SIGIO);
886 PROC_UNLOCK(p);
887
888 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
889 SIGIO);
890 }
891}
892
893/*
894 * Check if PMC 'pm' may be attached to target process 't'.
895 */
896
897static int
898pmc_can_attach(struct pmc *pm, struct proc *t)
899{
900 struct proc *o; /* pmc owner */
901 struct ucred *oc, *tc; /* owner, target credentials */
902 int decline_attach, i;
903
904 /*
905 * A PMC's owner can always attach that PMC to itself.
906 */
907
908 if ((o = pm->pm_owner->po_owner) == t)
909 return 0;
910
911 PROC_LOCK(o);
912 oc = o->p_ucred;
913 crhold(oc);
914 PROC_UNLOCK(o);
915
916 PROC_LOCK(t);
917 tc = t->p_ucred;
918 crhold(tc);
919 PROC_UNLOCK(t);
920
921 /*
922 * The effective uid of the PMC owner should match at least one
923 * of the {effective,real,saved} uids of the target process.
924 */
925
926 decline_attach = oc->cr_uid != tc->cr_uid &&
927 oc->cr_uid != tc->cr_svuid &&
928 oc->cr_uid != tc->cr_ruid;
929
930 /*
931 * Every one of the target's group ids, must be in the owner's
932 * group list.
933 */
934 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
935 decline_attach = !groupmember(tc->cr_groups[i], oc);
936
937 /* check the read and saved gids too */
938 if (decline_attach == 0)
939 decline_attach = !groupmember(tc->cr_rgid, oc) ||
940 !groupmember(tc->cr_svgid, oc);
941
942 crfree(tc);
943 crfree(oc);
944
945 return !decline_attach;
946}
947
948/*
949 * Attach a process to a PMC.
950 */
951
952static int
953pmc_attach_one_process(struct proc *p, struct pmc *pm)
954{
955 int ri;
956 char *fullpath, *freepath;
957 struct pmc_process *pp;
958
959 sx_assert(&pmc_sx, SX_XLOCKED);
960
961 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
962 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
963
964 /*
965 * Locate the process descriptor corresponding to process 'p',
966 * allocating space as needed.
967 *
968 * Verify that rowindex 'pm_rowindex' is free in the process
969 * descriptor.
970 *
971 * If not, allocate space for a descriptor and link the
972 * process descriptor and PMC.
973 */
974 ri = PMC_TO_ROWINDEX(pm);
975
976 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
977 return ENOMEM;
978
979 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
980 return EEXIST;
981
982 if (pp->pp_pmcs[ri].pp_pmc != NULL)
983 return EBUSY;
984
985 pmc_link_target_process(pm, pp);
986
987 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
988 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
989 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
990
991 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
992
993 /* issue an attach event to a configured log file */
994 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
995 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
996 if (p->p_flag & P_KTHREAD) {
997 fullpath = kernelname;
998 freepath = NULL;
999 } else
1000 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1001 if (freepath)
1002 free(freepath, M_TEMP);
1003 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1004 pmc_log_process_mappings(pm->pm_owner, p);
1005 }
1006 /* mark process as using HWPMCs */
1007 PROC_LOCK(p);
1008 p->p_flag |= P_HWPMC;
1009 PROC_UNLOCK(p);
1010
1011 return 0;
1012}
1013
1014/*
1015 * Attach a process and optionally its children
1016 */
1017
1018static int
1019pmc_attach_process(struct proc *p, struct pmc *pm)
1020{
1021 int error;
1022 struct proc *top;
1023
1024 sx_assert(&pmc_sx, SX_XLOCKED);
1025
1026 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1027 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1028
1029
1030 /*
1031 * If this PMC successfully allowed a GETMSR operation
1032 * in the past, disallow further ATTACHes.
1033 */
1034
1035 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1036 return EPERM;
1037
1038 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1039 return pmc_attach_one_process(p, pm);
1040
1041 /*
1042 * Traverse all child processes, attaching them to
1043 * this PMC.
1044 */
1045
1046 sx_slock(&proctree_lock);
1047
1048 top = p;
1049
1050 for (;;) {
1051 if ((error = pmc_attach_one_process(p, pm)) != 0)
1052 break;
1053 if (!LIST_EMPTY(&p->p_children))
1054 p = LIST_FIRST(&p->p_children);
1055 else for (;;) {
1056 if (p == top)
1057 goto done;
1058 if (LIST_NEXT(p, p_sibling)) {
1059 p = LIST_NEXT(p, p_sibling);
1060 break;
1061 }
1062 p = p->p_pptr;
1063 }
1064 }
1065
1066 if (error)
1067 (void) pmc_detach_process(top, pm);
1068
1069 done:
1070 sx_sunlock(&proctree_lock);
1071 return error;
1072}
1073
1074/*
1075 * Detach a process from a PMC. If there are no other PMCs tracking
1076 * this process, remove the process structure from its hash table. If
1077 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1078 */
1079
1080static int
1081pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1082{
1083 int ri;
1084 struct pmc_process *pp;
1085
1086 sx_assert(&pmc_sx, SX_XLOCKED);
1087
1088 KASSERT(pm != NULL,
1089 ("[pmc,%d] null pm pointer", __LINE__));
1090
1091 ri = PMC_TO_ROWINDEX(pm);
1092
1093 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1094 pm, ri, p, p->p_pid, p->p_comm, flags);
1095
1096 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1097 return ESRCH;
1098
1099 if (pp->pp_pmcs[ri].pp_pmc != pm)
1100 return EINVAL;
1101
1102 pmc_unlink_target_process(pm, pp);
1103
1104 /* Issue a detach entry if a log file is configured */
1105 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1106 pmclog_process_pmcdetach(pm, p->p_pid);
1107
1108 /*
1109 * If there are no PMCs targetting this process, we remove its
1110 * descriptor from the target hash table and unset the P_HWPMC
1111 * flag in the struct proc.
1112 */
1113 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1114 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1115 __LINE__, pp->pp_refcnt, pp));
1116
1117 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1118 return 0;
1119
1120 pmc_remove_process_descriptor(pp);
1121
1122 if (flags & PMC_FLAG_REMOVE)
1123 free(pp, M_PMC);
1124
1125 PROC_LOCK(p);
1126 p->p_flag &= ~P_HWPMC;
1127 PROC_UNLOCK(p);
1128
1129 return 0;
1130}
1131
1132/*
1133 * Detach a process and optionally its descendants from a PMC.
1134 */
1135
1136static int
1137pmc_detach_process(struct proc *p, struct pmc *pm)
1138{
1139 struct proc *top;
1140
1141 sx_assert(&pmc_sx, SX_XLOCKED);
1142
1143 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1144 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1145
1146 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1147 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1148
1149 /*
1150 * Traverse all children, detaching them from this PMC. We
1151 * ignore errors since we could be detaching a PMC from a
1152 * partially attached proc tree.
1153 */
1154
1155 sx_slock(&proctree_lock);
1156
1157 top = p;
1158
1159 for (;;) {
1160 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1161
1162 if (!LIST_EMPTY(&p->p_children))
1163 p = LIST_FIRST(&p->p_children);
1164 else for (;;) {
1165 if (p == top)
1166 goto done;
1167 if (LIST_NEXT(p, p_sibling)) {
1168 p = LIST_NEXT(p, p_sibling);
1169 break;
1170 }
1171 p = p->p_pptr;
1172 }
1173 }
1174
1175 done:
1176 sx_sunlock(&proctree_lock);
1177
1178 if (LIST_EMPTY(&pm->pm_targets))
1179 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1180
1181 return 0;
1182}
1183
1184
1185/*
1186 * Thread context switch IN
1187 */
1188
1189static void
1190pmc_process_csw_in(struct thread *td)
1191{
1192 int cpu;
1193 unsigned int adjri, ri;
1194 struct pmc *pm;
1195 struct proc *p;
1196 struct pmc_cpu *pc;
1197 struct pmc_hw *phw;
1198 pmc_value_t newvalue;
1199 struct pmc_process *pp;
1200 struct pmc_classdep *pcd;
1201
1202 p = td->td_proc;
1203
1204 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1205 return;
1206
1207 KASSERT(pp->pp_proc == td->td_proc,
1208 ("[pmc,%d] not my thread state", __LINE__));
1209
1210 critical_enter(); /* no preemption from this point */
1211
1212 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1213
1214 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1215 p->p_pid, p->p_comm, pp);
1216
1217 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1218 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1219
1220 pc = pmc_pcpu[cpu];
1221
1222 for (ri = 0; ri < md->pmd_npmc; ri++) {
1223
1224 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1225 continue;
1226
1227 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1228 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1229 __LINE__, PMC_TO_MODE(pm)));
1230
1231 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1232 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1233 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1234
1235 /*
1236 * Only PMCs that are marked as 'RUNNING' need
1237 * be placed on hardware.
1238 */
1239
1240 if (pm->pm_state != PMC_STATE_RUNNING)
1241 continue;
1242
1243 /* increment PMC runcount */
1244 atomic_add_rel_32(&pm->pm_runcount, 1);
1245
1246 /* configure the HWPMC we are going to use. */
1247 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1248 pcd->pcd_config_pmc(cpu, adjri, pm);
1249
1250 phw = pc->pc_hwpmcs[ri];
1251
1252 KASSERT(phw != NULL,
1253 ("[pmc,%d] null hw pointer", __LINE__));
1254
1255 KASSERT(phw->phw_pmc == pm,
1256 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1257 phw->phw_pmc, pm));
1258
1259 /*
1260 * Write out saved value and start the PMC.
1261 *
1262 * Sampling PMCs use a per-process value, while
1263 * counting mode PMCs use a per-pmc value that is
1264 * inherited across descendants.
1265 */
1266 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1267 mtx_pool_lock_spin(pmc_mtxpool, pm);
1268 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1269 pp->pp_pmcs[ri].pp_pmcval;
1270 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1271 } else {
1272 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1273 ("[pmc,%d] illegal mode=%d", __LINE__,
1274 PMC_TO_MODE(pm)));
1275 mtx_pool_lock_spin(pmc_mtxpool, pm);
1276 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1277 pm->pm_gv.pm_savedvalue;
1278 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1279 }
1280
1281 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1282
1283 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1284 pcd->pcd_start_pmc(cpu, adjri);
1285 }
1286
1287 /*
1288 * perform any other architecture/cpu dependent thread
1289 * switch-in actions.
1290 */
1291
1292 (void) (*md->pmd_switch_in)(pc, pp);
1293
1294 critical_exit();
1295
1296}
1297
1298/*
1299 * Thread context switch OUT.
1300 */
1301
1302static void
1303pmc_process_csw_out(struct thread *td)
1304{
1305 int cpu;
1306 int64_t tmp;
1307 struct pmc *pm;
1308 struct proc *p;
1309 enum pmc_mode mode;
1310 struct pmc_cpu *pc;
1311 pmc_value_t newvalue;
1312 unsigned int adjri, ri;
1313 struct pmc_process *pp;
1314 struct pmc_classdep *pcd;
1315
1316
1317 /*
1318 * Locate our process descriptor; this may be NULL if
1319 * this process is exiting and we have already removed
1320 * the process from the target process table.
1321 *
1322 * Note that due to kernel preemption, multiple
1323 * context switches may happen while the process is
1324 * exiting.
1325 *
1326 * Note also that if the target process cannot be
1327 * found we still need to deconfigure any PMCs that
1328 * are currently running on hardware.
1329 */
1330
1331 p = td->td_proc;
1332 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1333
1334 /*
1335 * save PMCs
1336 */
1337
1338 critical_enter();
1339
1340 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1341
1342 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1343 p->p_pid, p->p_comm, pp);
1344
1345 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1346 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1347
1348 pc = pmc_pcpu[cpu];
1349
1350 /*
1351 * When a PMC gets unlinked from a target PMC, it will
1352 * be removed from the target's pp_pmc[] array.
1353 *
1354 * However, on a MP system, the target could have been
1355 * executing on another CPU at the time of the unlink.
1356 * So, at context switch OUT time, we need to look at
1357 * the hardware to determine if a PMC is scheduled on
1358 * it.
1359 */
1360
1361 for (ri = 0; ri < md->pmd_npmc; ri++) {
1362
1363 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1364 pm = NULL;
1365 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1366
1367 if (pm == NULL) /* nothing at this row index */
1368 continue;
1369
1370 mode = PMC_TO_MODE(pm);
1371 if (!PMC_IS_VIRTUAL_MODE(mode))
1372 continue; /* not a process virtual PMC */
1373
1374 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1375 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1376 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1377
1378 /* Stop hardware if not already stopped */
1379 if (pm->pm_stalled == 0)
1380 pcd->pcd_stop_pmc(cpu, adjri);
1381
1382 /* reduce this PMC's runcount */
1383 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1384
1385 /*
1386 * If this PMC is associated with this process,
1387 * save the reading.
1388 */
1389
1390 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1391
1392 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1393 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1394 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1395
1396 KASSERT(pp->pp_refcnt > 0,
1397 ("[pmc,%d] pp refcnt = %d", __LINE__,
1398 pp->pp_refcnt));
1399
1400 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1401
1402 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1403
1404 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1405 tmp);
1406
1407 if (mode == PMC_MODE_TS) {
1408
1409 /*
1410 * For sampling process-virtual PMCs,
1411 * we expect the count to be
1412 * decreasing as the 'value'
1413 * programmed into the PMC is the
1414 * number of events to be seen till
1415 * the next sampling interrupt.
1416 */
1417 if (tmp < 0)
1418 tmp += pm->pm_sc.pm_reloadcount;
1419 mtx_pool_lock_spin(pmc_mtxpool, pm);
1420 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1421 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1422 pp->pp_pmcs[ri].pp_pmcval +=
1423 pm->pm_sc.pm_reloadcount;
1424 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1425
1426 } else {
1427
1428 /*
1429 * For counting process-virtual PMCs,
1430 * we expect the count to be
1431 * increasing monotonically, modulo a 64
1432 * bit wraparound.
1433 */
1434 KASSERT((int64_t) tmp >= 0,
1435 ("[pmc,%d] negative increment cpu=%d "
1436 "ri=%d newvalue=%jx saved=%jx "
1437 "incr=%jx", __LINE__, cpu, ri,
1438 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1439
1440 mtx_pool_lock_spin(pmc_mtxpool, pm);
1441 pm->pm_gv.pm_savedvalue += tmp;
1442 pp->pp_pmcs[ri].pp_pmcval += tmp;
1443 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1444
1445 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1446 pmclog_process_proccsw(pm, pp, tmp);
1447 }
1448 }
1449
1450 /* mark hardware as free */
1451 pcd->pcd_config_pmc(cpu, adjri, NULL);
1452 }
1453
1454 /*
1455 * perform any other architecture/cpu dependent thread
1456 * switch out functions.
1457 */
1458
1459 (void) (*md->pmd_switch_out)(pc, pp);
1460
1461 critical_exit();
1462}
1463
1464/*
1465 * Log a KLD operation.
1466 */
1467
1468static void
1469pmc_process_kld_load(struct pmckern_map_in *pkm)
1470{
1471 struct pmc_owner *po;
1472
1473 sx_assert(&pmc_sx, SX_LOCKED);
1474
1475 /*
1476 * Notify owners of system sampling PMCs about KLD operations.
1477 */
1478
1479 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1480 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1481 pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
1482 (char *) pkm->pm_file);
1483
1484 /*
1485 * TODO: Notify owners of (all) process-sampling PMCs too.
1486 */
1487
1488 return;
1489}
1490
1491static void
1492pmc_process_kld_unload(struct pmckern_map_out *pkm)
1493{
1494 struct pmc_owner *po;
1495
1496 sx_assert(&pmc_sx, SX_LOCKED);
1497
1498 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1499 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1500 pmclog_process_map_out(po, (pid_t) -1,
1501 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1502
1503 /*
1504 * TODO: Notify owners of process-sampling PMCs.
1505 */
1506}
1507
1508/*
1509 * A mapping change for a process.
1510 */
1511
1512static void
1513pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1514{
1515 int ri;
1516 pid_t pid;
1517 char *fullpath, *freepath;
1518 const struct pmc *pm;
1519 struct pmc_owner *po;
1520 const struct pmc_process *pp;
1521
1522 freepath = fullpath = NULL;
1523 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1524
1525 pid = td->td_proc->p_pid;
1526
1527 /* Inform owners of all system-wide sampling PMCs. */
1528 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1529 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1530 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1531
1532 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1533 goto done;
1534
1535 /*
1536 * Inform sampling PMC owners tracking this process.
1537 */
1538 for (ri = 0; ri < md->pmd_npmc; ri++)
1539 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1540 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1541 pmclog_process_map_in(pm->pm_owner,
1542 pid, pkm->pm_address, fullpath);
1543
1544 done:
1545 if (freepath)
1546 free(freepath, M_TEMP);
1547}
1548
1549
1550/*
1551 * Log an munmap request.
1552 */
1553
1554static void
1555pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1556{
1557 int ri;
1558 pid_t pid;
1559 struct pmc_owner *po;
1560 const struct pmc *pm;
1561 const struct pmc_process *pp;
1562
1563 pid = td->td_proc->p_pid;
1564
1565 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1566 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1567 pmclog_process_map_out(po, pid, pkm->pm_address,
1568 pkm->pm_address + pkm->pm_size);
1569
1570 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1571 return;
1572
1573 for (ri = 0; ri < md->pmd_npmc; ri++)
1574 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1575 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1576 pmclog_process_map_out(pm->pm_owner, pid,
1577 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1578}
1579
1580/*
1581 * Log mapping information about the kernel.
1582 */
1583
1584static void
1585pmc_log_kernel_mappings(struct pmc *pm)
1586{
1587 struct pmc_owner *po;
1588 struct pmckern_map_in *km, *kmbase;
1589
1590 sx_assert(&pmc_sx, SX_LOCKED);
1591 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1592 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1593 __LINE__, (void *) pm));
1594
1595 po = pm->pm_owner;
1596
1597 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1598 return;
1599
1600 /*
1601 * Log the current set of kernel modules.
1602 */
1603 kmbase = linker_hwpmc_list_objects();
1604 for (km = kmbase; km->pm_file != NULL; km++) {
1605 PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
1606 (void *) km->pm_address);
1607 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1608 km->pm_file);
1609 }
1610 free(kmbase, M_LINKER);
1611
1612 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1613}
1614
1615/*
1616 * Log the mappings for a single process.
1617 */
1618
1619static void
1620pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1621{
1622}
1623
1624/*
1625 * Log mappings for all processes in the system.
1626 */
1627
1628static void
1629pmc_log_all_process_mappings(struct pmc_owner *po)
1630{
1631 struct proc *p, *top;
1632
1633 sx_assert(&pmc_sx, SX_XLOCKED);
1634
1635 if ((p = pfind(1)) == NULL)
1636 panic("[pmc,%d] Cannot find init", __LINE__);
1637
1638 PROC_UNLOCK(p);
1639
1640 sx_slock(&proctree_lock);
1641
1642 top = p;
1643
1644 for (;;) {
1645 pmc_log_process_mappings(po, p);
1646 if (!LIST_EMPTY(&p->p_children))
1647 p = LIST_FIRST(&p->p_children);
1648 else for (;;) {
1649 if (p == top)
1650 goto done;
1651 if (LIST_NEXT(p, p_sibling)) {
1652 p = LIST_NEXT(p, p_sibling);
1653 break;
1654 }
1655 p = p->p_pptr;
1656 }
1657 }
1658 done:
1659 sx_sunlock(&proctree_lock);
1660}
1661
1662/*
1663 * The 'hook' invoked from the kernel proper
1664 */
1665
1666
1667#ifdef DEBUG
1668const char *pmc_hooknames[] = {
1669 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
1670 "",
1671 "EXEC",
1672 "CSW-IN",
1673 "CSW-OUT",
1674 "SAMPLE",
1675 "KLDLOAD",
1676 "KLDUNLOAD",
1677 "MMAP",
1678 "MUNMAP",
1679 "CALLCHAIN"
1680};
1681#endif
1682
1683static int
1684pmc_hook_handler(struct thread *td, int function, void *arg)
1685{
1686
1687 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1688 pmc_hooknames[function], arg);
1689
1690 switch (function)
1691 {
1692
1693 /*
1694 * Process exec()
1695 */
1696
1697 case PMC_FN_PROCESS_EXEC:
1698 {
1699 char *fullpath, *freepath;
1700 unsigned int ri;
1701 int is_using_hwpmcs;
1702 struct pmc *pm;
1703 struct proc *p;
1704 struct pmc_owner *po;
1705 struct pmc_process *pp;
1706 struct pmckern_procexec *pk;
1707
1708 sx_assert(&pmc_sx, SX_XLOCKED);
1709
1710 p = td->td_proc;
1711 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1712
1713 pk = (struct pmckern_procexec *) arg;
1714
1715 /* Inform owners of SS mode PMCs of the exec event. */
1716 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1717 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1718 pmclog_process_procexec(po, PMC_ID_INVALID,
1719 p->p_pid, pk->pm_entryaddr, fullpath);
1720
1721 PROC_LOCK(p);
1722 is_using_hwpmcs = p->p_flag & P_HWPMC;
1723 PROC_UNLOCK(p);
1724
1725 if (!is_using_hwpmcs) {
1726 if (freepath)
1727 free(freepath, M_TEMP);
1728 break;
1729 }
1730
1731 /*
1732 * PMCs are not inherited across an exec(): remove any
1733 * PMCs that this process is the owner of.
1734 */
1735
1736 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1737 pmc_remove_owner(po);
1738 pmc_destroy_owner_descriptor(po);
1739 }
1740
1741 /*
1742 * If the process being exec'ed is not the target of any
1743 * PMC, we are done.
1744 */
1745 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1746 if (freepath)
1747 free(freepath, M_TEMP);
1748 break;
1749 }
1750
1751 /*
1752 * Log the exec event to all monitoring owners. Skip
1753 * owners who have already recieved the event because
1754 * they had system sampling PMCs active.
1755 */
1756 for (ri = 0; ri < md->pmd_npmc; ri++)
1757 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1758 po = pm->pm_owner;
1759 if (po->po_sscount == 0 &&
1760 po->po_flags & PMC_PO_OWNS_LOGFILE)
1761 pmclog_process_procexec(po, pm->pm_id,
1762 p->p_pid, pk->pm_entryaddr,
1763 fullpath);
1764 }
1765
1766 if (freepath)
1767 free(freepath, M_TEMP);
1768
1769
1770 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1771 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1772
1773 if (pk->pm_credentialschanged == 0) /* no change */
1774 break;
1775
1776 /*
1777 * If the newly exec()'ed process has a different credential
1778 * than before, allow it to be the target of a PMC only if
1779 * the PMC's owner has sufficient priviledge.
1780 */
1781
1782 for (ri = 0; ri < md->pmd_npmc; ri++)
1783 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1784 if (pmc_can_attach(pm, td->td_proc) != 0)
1785 pmc_detach_one_process(td->td_proc,
1786 pm, PMC_FLAG_NONE);
1787
1788 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1789 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1790 pp->pp_refcnt, pp));
1791
1792 /*
1793 * If this process is no longer the target of any
1794 * PMCs, we can remove the process entry and free
1795 * up space.
1796 */
1797
1798 if (pp->pp_refcnt == 0) {
1799 pmc_remove_process_descriptor(pp);
1800 free(pp, M_PMC);
1801 break;
1802 }
1803
1804 }
1805 break;
1806
1807 case PMC_FN_CSW_IN:
1808 pmc_process_csw_in(td);
1809 break;
1810
1811 case PMC_FN_CSW_OUT:
1812 pmc_process_csw_out(td);
1813 break;
1814
1815 /*
1816 * Process accumulated PC samples.
1817 *
1818 * This function is expected to be called by hardclock() for
1819 * each CPU that has accumulated PC samples.
1820 *
1821 * This function is to be executed on the CPU whose samples
1822 * are being processed.
1823 */
1824 case PMC_FN_DO_SAMPLES:
1825
1826 /*
1827 * Clear the cpu specific bit in the CPU mask before
1828 * do the rest of the processing. If the NMI handler
1829 * gets invoked after the "atomic_clear_int()" call
1830 * below but before "pmc_process_samples()" gets
1831 * around to processing the interrupt, then we will
1832 * come back here at the next hardclock() tick (and
1833 * may find nothing to do if "pmc_process_samples()"
1834 * had already processed the interrupt). We don't
1835 * lose the interrupt sample.
1836 */
1837 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1838 pmc_process_samples(PCPU_GET(cpuid));
1839 break;
1840
1841
1842 case PMC_FN_KLD_LOAD:
1843 sx_assert(&pmc_sx, SX_LOCKED);
1844 pmc_process_kld_load((struct pmckern_map_in *) arg);
1845 break;
1846
1847 case PMC_FN_KLD_UNLOAD:
1848 sx_assert(&pmc_sx, SX_LOCKED);
1849 pmc_process_kld_unload((struct pmckern_map_out *) arg);
1850 break;
1851
1852 case PMC_FN_MMAP:
1853 sx_assert(&pmc_sx, SX_LOCKED);
1854 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
1855 break;
1856
1857 case PMC_FN_MUNMAP:
1858 sx_assert(&pmc_sx, SX_LOCKED);
1859 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
1860 break;
1861
1862 case PMC_FN_USER_CALLCHAIN:
1863 /*
1864 * Record a call chain.
1865 */
34
35#include <sys/param.h>
36#include <sys/eventhandler.h>
37#include <sys/jail.h>
38#include <sys/kernel.h>
39#include <sys/kthread.h>
40#include <sys/limits.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/module.h>
44#include <sys/mutex.h>
45#include <sys/pmc.h>
46#include <sys/pmckern.h>
47#include <sys/pmclog.h>
48#include <sys/priv.h>
49#include <sys/proc.h>
50#include <sys/queue.h>
51#include <sys/resourcevar.h>
52#include <sys/sched.h>
53#include <sys/signalvar.h>
54#include <sys/smp.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/sysent.h>
58#include <sys/systm.h>
59#include <sys/vnode.h>
60
61#include <sys/linker.h> /* needs to be after <sys/malloc.h> */
62
63#include <machine/atomic.h>
64#include <machine/md_var.h>
65
66/*
67 * Types
68 */
69
70enum pmc_flags {
71 PMC_FLAG_NONE = 0x00, /* do nothing */
72 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
73 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
74};
75
76/*
77 * The offset in sysent where the syscall is allocated.
78 */
79
80static int pmc_syscall_num = NO_SYSCALL;
81struct pmc_cpu **pmc_pcpu; /* per-cpu state */
82pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
83
84#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
85
86struct mtx_pool *pmc_mtxpool;
87static int *pmc_pmcdisp; /* PMC row dispositions */
88
89#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
90#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
91#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
92
93#define PMC_MARK_ROW_FREE(R) do { \
94 pmc_pmcdisp[(R)] = 0; \
95} while (0)
96
97#define PMC_MARK_ROW_STANDALONE(R) do { \
98 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
99 __LINE__)); \
100 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
101 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
102 ("[pmc,%d] row disposition error", __LINE__)); \
103} while (0)
104
105#define PMC_UNMARK_ROW_STANDALONE(R) do { \
106 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
107 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
108 __LINE__)); \
109} while (0)
110
111#define PMC_MARK_ROW_THREAD(R) do { \
112 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
113 __LINE__)); \
114 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
115} while (0)
116
117#define PMC_UNMARK_ROW_THREAD(R) do { \
118 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
119 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
120 __LINE__)); \
121} while (0)
122
123
124/* various event handlers */
125static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
126
127/* Module statistics */
128struct pmc_op_getdriverstats pmc_stats;
129
130/* Machine/processor dependent operations */
131static struct pmc_mdep *md;
132
133/*
134 * Hash tables mapping owner processes and target threads to PMCs.
135 */
136
137struct mtx pmc_processhash_mtx; /* spin mutex */
138static u_long pmc_processhashmask;
139static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
140
141/*
142 * Hash table of PMC owner descriptors. This table is protected by
143 * the shared PMC "sx" lock.
144 */
145
146static u_long pmc_ownerhashmask;
147static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
148
149/*
150 * List of PMC owners with system-wide sampling PMCs.
151 */
152
153static LIST_HEAD(, pmc_owner) pmc_ss_owners;
154
155
156/*
157 * A map of row indices to classdep structures.
158 */
159static struct pmc_classdep **pmc_rowindex_to_classdep;
160
161/*
162 * Prototypes
163 */
164
165#ifdef DEBUG
166static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
167static int pmc_debugflags_parse(char *newstr, char *fence);
168#endif
169
170static int load(struct module *module, int cmd, void *arg);
171static int pmc_attach_process(struct proc *p, struct pmc *pm);
172static struct pmc *pmc_allocate_pmc_descriptor(void);
173static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
174static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
175static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
176 int cpu);
177static int pmc_can_attach(struct pmc *pm, struct proc *p);
178static void pmc_capture_user_callchain(int cpu, struct trapframe *tf);
179static void pmc_cleanup(void);
180static int pmc_detach_process(struct proc *p, struct pmc *pm);
181static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
182 int flags);
183static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
184static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
185static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
186static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
187 pmc_id_t pmc);
188static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
189 uint32_t mode);
190static void pmc_force_context_switch(void);
191static void pmc_link_target_process(struct pmc *pm,
192 struct pmc_process *pp);
193static void pmc_log_all_process_mappings(struct pmc_owner *po);
194static void pmc_log_kernel_mappings(struct pmc *pm);
195static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
196static void pmc_maybe_remove_owner(struct pmc_owner *po);
197static void pmc_process_csw_in(struct thread *td);
198static void pmc_process_csw_out(struct thread *td);
199static void pmc_process_exit(void *arg, struct proc *p);
200static void pmc_process_fork(void *arg, struct proc *p1,
201 struct proc *p2, int n);
202static void pmc_process_samples(int cpu);
203static void pmc_release_pmc_descriptor(struct pmc *pmc);
204static void pmc_remove_owner(struct pmc_owner *po);
205static void pmc_remove_process_descriptor(struct pmc_process *pp);
206static void pmc_restore_cpu_binding(struct pmc_binding *pb);
207static void pmc_save_cpu_binding(struct pmc_binding *pb);
208static void pmc_select_cpu(int cpu);
209static int pmc_start(struct pmc *pm);
210static int pmc_stop(struct pmc *pm);
211static int pmc_syscall_handler(struct thread *td, void *syscall_args);
212static void pmc_unlink_target_process(struct pmc *pmc,
213 struct pmc_process *pp);
214
215/*
216 * Kernel tunables and sysctl(8) interface.
217 */
218
219SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
220
221static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
222TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
223SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
224 &pmc_callchaindepth, 0, "depth of call chain records");
225
226#ifdef DEBUG
227struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
228char pmc_debugstr[PMC_DEBUG_STRSIZE];
229TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
230 sizeof(pmc_debugstr));
231SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
232 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
233 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
234#endif
235
236/*
237 * kern.hwpmc.hashrows -- determines the number of rows in the
238 * of the hash table used to look up threads
239 */
240
241static int pmc_hashsize = PMC_HASH_SIZE;
242TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
243SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
244 &pmc_hashsize, 0, "rows in hash tables");
245
246/*
247 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
248 */
249
250static int pmc_nsamples = PMC_NSAMPLES;
251TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
252SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
253 &pmc_nsamples, 0, "number of PC samples per CPU");
254
255
256/*
257 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
258 */
259
260static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
261TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
262SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
263 &pmc_mtxpool_size, 0, "size of spin mutex pool");
264
265
266/*
267 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
268 * allocate system-wide PMCs.
269 *
270 * Allowing unprivileged processes to allocate system PMCs is convenient
271 * if system-wide measurements need to be taken concurrently with other
272 * per-process measurements. This feature is turned off by default.
273 */
274
275static int pmc_unprivileged_syspmcs = 0;
276TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
277SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
278 &pmc_unprivileged_syspmcs, 0,
279 "allow unprivileged process to allocate system PMCs");
280
281/*
282 * Hash function. Discard the lower 2 bits of the pointer since
283 * these are always zero for our uses. The hash multiplier is
284 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
285 */
286
287#if LONG_BIT == 64
288#define _PMC_HM 11400714819323198486u
289#elif LONG_BIT == 32
290#define _PMC_HM 2654435769u
291#else
292#error Must know the size of 'long' to compile
293#endif
294
295#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
296
297/*
298 * Syscall structures
299 */
300
301/* The `sysent' for the new syscall */
302static struct sysent pmc_sysent = {
303 2, /* sy_narg */
304 pmc_syscall_handler /* sy_call */
305};
306
307static struct syscall_module_data pmc_syscall_mod = {
308 load,
309 NULL,
310 &pmc_syscall_num,
311 &pmc_sysent,
312 { 0, NULL }
313};
314
315static moduledata_t pmc_mod = {
316 PMC_MODULE_NAME,
317 syscall_module_handler,
318 &pmc_syscall_mod
319};
320
321DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
322MODULE_VERSION(pmc, PMC_VERSION);
323
324#ifdef DEBUG
325enum pmc_dbgparse_state {
326 PMCDS_WS, /* in whitespace */
327 PMCDS_MAJOR, /* seen a major keyword */
328 PMCDS_MINOR
329};
330
331static int
332pmc_debugflags_parse(char *newstr, char *fence)
333{
334 char c, *p, *q;
335 struct pmc_debugflags *tmpflags;
336 int error, found, *newbits, tmp;
337 size_t kwlen;
338
339 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
340
341 p = newstr;
342 error = 0;
343
344 for (; p < fence && (c = *p); p++) {
345
346 /* skip white space */
347 if (c == ' ' || c == '\t')
348 continue;
349
350 /* look for a keyword followed by "=" */
351 for (q = p; p < fence && (c = *p) && c != '='; p++)
352 ;
353 if (c != '=') {
354 error = EINVAL;
355 goto done;
356 }
357
358 kwlen = p - q;
359 newbits = NULL;
360
361 /* lookup flag group name */
362#define DBG_SET_FLAG_MAJ(S,F) \
363 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
364 newbits = &tmpflags->pdb_ ## F;
365
366 DBG_SET_FLAG_MAJ("cpu", CPU);
367 DBG_SET_FLAG_MAJ("csw", CSW);
368 DBG_SET_FLAG_MAJ("logging", LOG);
369 DBG_SET_FLAG_MAJ("module", MOD);
370 DBG_SET_FLAG_MAJ("md", MDP);
371 DBG_SET_FLAG_MAJ("owner", OWN);
372 DBG_SET_FLAG_MAJ("pmc", PMC);
373 DBG_SET_FLAG_MAJ("process", PRC);
374 DBG_SET_FLAG_MAJ("sampling", SAM);
375
376 if (newbits == NULL) {
377 error = EINVAL;
378 goto done;
379 }
380
381 p++; /* skip the '=' */
382
383 /* Now parse the individual flags */
384 tmp = 0;
385 newflag:
386 for (q = p; p < fence && (c = *p); p++)
387 if (c == ' ' || c == '\t' || c == ',')
388 break;
389
390 /* p == fence or c == ws or c == "," or c == 0 */
391
392 if ((kwlen = p - q) == 0) {
393 *newbits = tmp;
394 continue;
395 }
396
397 found = 0;
398#define DBG_SET_FLAG_MIN(S,F) \
399 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
400 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
401
402 /* a '*' denotes all possible flags in the group */
403 if (kwlen == 1 && *q == '*')
404 tmp = found = ~0;
405 /* look for individual flag names */
406 DBG_SET_FLAG_MIN("allocaterow", ALR);
407 DBG_SET_FLAG_MIN("allocate", ALL);
408 DBG_SET_FLAG_MIN("attach", ATT);
409 DBG_SET_FLAG_MIN("bind", BND);
410 DBG_SET_FLAG_MIN("config", CFG);
411 DBG_SET_FLAG_MIN("exec", EXC);
412 DBG_SET_FLAG_MIN("exit", EXT);
413 DBG_SET_FLAG_MIN("find", FND);
414 DBG_SET_FLAG_MIN("flush", FLS);
415 DBG_SET_FLAG_MIN("fork", FRK);
416 DBG_SET_FLAG_MIN("getbuf", GTB);
417 DBG_SET_FLAG_MIN("hook", PMH);
418 DBG_SET_FLAG_MIN("init", INI);
419 DBG_SET_FLAG_MIN("intr", INT);
420 DBG_SET_FLAG_MIN("linktarget", TLK);
421 DBG_SET_FLAG_MIN("mayberemove", OMR);
422 DBG_SET_FLAG_MIN("ops", OPS);
423 DBG_SET_FLAG_MIN("read", REA);
424 DBG_SET_FLAG_MIN("register", REG);
425 DBG_SET_FLAG_MIN("release", REL);
426 DBG_SET_FLAG_MIN("remove", ORM);
427 DBG_SET_FLAG_MIN("sample", SAM);
428 DBG_SET_FLAG_MIN("scheduleio", SIO);
429 DBG_SET_FLAG_MIN("select", SEL);
430 DBG_SET_FLAG_MIN("signal", SIG);
431 DBG_SET_FLAG_MIN("swi", SWI);
432 DBG_SET_FLAG_MIN("swo", SWO);
433 DBG_SET_FLAG_MIN("start", STA);
434 DBG_SET_FLAG_MIN("stop", STO);
435 DBG_SET_FLAG_MIN("syscall", PMS);
436 DBG_SET_FLAG_MIN("unlinktarget", TUL);
437 DBG_SET_FLAG_MIN("write", WRI);
438 if (found == 0) {
439 /* unrecognized flag name */
440 error = EINVAL;
441 goto done;
442 }
443
444 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
445 *newbits = tmp;
446 continue;
447 }
448
449 p++;
450 goto newflag;
451 }
452
453 /* save the new flag set */
454 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
455
456 done:
457 free(tmpflags, M_PMC);
458 return error;
459}
460
461static int
462pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
463{
464 char *fence, *newstr;
465 int error;
466 unsigned int n;
467
468 (void) arg1; (void) arg2; /* unused parameters */
469
470 n = sizeof(pmc_debugstr);
471 newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
472 (void) strlcpy(newstr, pmc_debugstr, n);
473
474 error = sysctl_handle_string(oidp, newstr, n, req);
475
476 /* if there is a new string, parse and copy it */
477 if (error == 0 && req->newptr != NULL) {
478 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
479 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
480 (void) strlcpy(pmc_debugstr, newstr,
481 sizeof(pmc_debugstr));
482 }
483
484 free(newstr, M_PMC);
485
486 return error;
487}
488#endif
489
490/*
491 * Map a row index to a classdep structure and return the adjusted row
492 * index for the PMC class index.
493 */
494static struct pmc_classdep *
495pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
496{
497 struct pmc_classdep *pcd;
498
499 (void) md;
500
501 KASSERT(ri >= 0 && ri < md->pmd_npmc,
502 ("[pmc,%d] illegal row-index %d", __LINE__, ri));
503
504 pcd = pmc_rowindex_to_classdep[ri];
505
506 KASSERT(pcd != NULL,
507 ("[amd,%d] ri %d null pcd", __LINE__, ri));
508
509 *adjri = ri - pcd->pcd_ri;
510
511 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
512 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
513
514 return (pcd);
515}
516
517/*
518 * Concurrency Control
519 *
520 * The driver manages the following data structures:
521 *
522 * - target process descriptors, one per target process
523 * - owner process descriptors (and attached lists), one per owner process
524 * - lookup hash tables for owner and target processes
525 * - PMC descriptors (and attached lists)
526 * - per-cpu hardware state
527 * - the 'hook' variable through which the kernel calls into
528 * this module
529 * - the machine hardware state (managed by the MD layer)
530 *
531 * These data structures are accessed from:
532 *
533 * - thread context-switch code
534 * - interrupt handlers (possibly on multiple cpus)
535 * - kernel threads on multiple cpus running on behalf of user
536 * processes doing system calls
537 * - this driver's private kernel threads
538 *
539 * = Locks and Locking strategy =
540 *
541 * The driver uses four locking strategies for its operation:
542 *
543 * - The global SX lock "pmc_sx" is used to protect internal
544 * data structures.
545 *
546 * Calls into the module by syscall() start with this lock being
547 * held in exclusive mode. Depending on the requested operation,
548 * the lock may be downgraded to 'shared' mode to allow more
549 * concurrent readers into the module. Calls into the module from
550 * other parts of the kernel acquire the lock in shared mode.
551 *
552 * This SX lock is held in exclusive mode for any operations that
553 * modify the linkages between the driver's internal data structures.
554 *
555 * The 'pmc_hook' function pointer is also protected by this lock.
556 * It is only examined with the sx lock held in exclusive mode. The
557 * kernel module is allowed to be unloaded only with the sx lock held
558 * in exclusive mode. In normal syscall handling, after acquiring the
559 * pmc_sx lock we first check that 'pmc_hook' is non-null before
560 * proceeding. This prevents races between the thread unloading the module
561 * and other threads seeking to use the module.
562 *
563 * - Lookups of target process structures and owner process structures
564 * cannot use the global "pmc_sx" SX lock because these lookups need
565 * to happen during context switches and in other critical sections
566 * where sleeping is not allowed. We protect these lookup tables
567 * with their own private spin-mutexes, "pmc_processhash_mtx" and
568 * "pmc_ownerhash_mtx".
569 *
570 * - Interrupt handlers work in a lock free manner. At interrupt
571 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
572 * when the PMC was started. If this pointer is NULL, the interrupt
573 * is ignored after updating driver statistics. We ensure that this
574 * pointer is set (using an atomic operation if necessary) before the
575 * PMC hardware is started. Conversely, this pointer is unset atomically
576 * only after the PMC hardware is stopped.
577 *
578 * We ensure that everything needed for the operation of an
579 * interrupt handler is available without it needing to acquire any
580 * locks. We also ensure that a PMC's software state is destroyed only
581 * after the PMC is taken off hardware (on all CPUs).
582 *
583 * - Context-switch handling with process-private PMCs needs more
584 * care.
585 *
586 * A given process may be the target of multiple PMCs. For example,
587 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
588 * while the target process is running on another. A PMC could also
589 * be getting released because its owner is exiting. We tackle
590 * these situations in the following manner:
591 *
592 * - each target process structure 'pmc_process' has an array
593 * of 'struct pmc *' pointers, one for each hardware PMC.
594 *
595 * - At context switch IN time, each "target" PMC in RUNNING state
596 * gets started on hardware and a pointer to each PMC is copied into
597 * the per-cpu phw array. The 'runcount' for the PMC is
598 * incremented.
599 *
600 * - At context switch OUT time, all process-virtual PMCs are stopped
601 * on hardware. The saved value is added to the PMCs value field
602 * only if the PMC is in a non-deleted state (the PMCs state could
603 * have changed during the current time slice).
604 *
605 * Note that since in-between a switch IN on a processor and a switch
606 * OUT, the PMC could have been released on another CPU. Therefore
607 * context switch OUT always looks at the hardware state to turn
608 * OFF PMCs and will update a PMC's saved value only if reachable
609 * from the target process record.
610 *
611 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
612 * be attached to many processes at the time of the call and could
613 * be active on multiple CPUs).
614 *
615 * We prevent further scheduling of the PMC by marking it as in
616 * state 'DELETED'. If the runcount of the PMC is non-zero then
617 * this PMC is currently running on a CPU somewhere. The thread
618 * doing the PMCRELEASE operation waits by repeatedly doing a
619 * pause() till the runcount comes to zero.
620 *
621 * The contents of a PMC descriptor (struct pmc) are protected using
622 * a spin-mutex. In order to save space, we use a mutex pool.
623 *
624 * In terms of lock types used by witness(4), we use:
625 * - Type "pmc-sx", used by the global SX lock.
626 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
627 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
628 * - Type "pmc-leaf", used for all other spin mutexes.
629 */
630
631/*
632 * save the cpu binding of the current kthread
633 */
634
635static void
636pmc_save_cpu_binding(struct pmc_binding *pb)
637{
638 PMCDBG(CPU,BND,2, "%s", "save-cpu");
639 thread_lock(curthread);
640 pb->pb_bound = sched_is_bound(curthread);
641 pb->pb_cpu = curthread->td_oncpu;
642 thread_unlock(curthread);
643 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
644}
645
646/*
647 * restore the cpu binding of the current thread
648 */
649
650static void
651pmc_restore_cpu_binding(struct pmc_binding *pb)
652{
653 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
654 curthread->td_oncpu, pb->pb_cpu);
655 thread_lock(curthread);
656 if (pb->pb_bound)
657 sched_bind(curthread, pb->pb_cpu);
658 else
659 sched_unbind(curthread);
660 thread_unlock(curthread);
661 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
662}
663
664/*
665 * move execution over the specified cpu and bind it there.
666 */
667
668static void
669pmc_select_cpu(int cpu)
670{
671 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
672 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
673
674 /* Never move to an inactive CPU. */
675 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
676 "CPU %d", __LINE__, cpu));
677
678 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
679 thread_lock(curthread);
680 sched_bind(curthread, cpu);
681 thread_unlock(curthread);
682
683 KASSERT(curthread->td_oncpu == cpu,
684 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
685 cpu, curthread->td_oncpu));
686
687 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
688}
689
690/*
691 * Force a context switch.
692 *
693 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
694 * guaranteed to force a context switch.
695 */
696
697static void
698pmc_force_context_switch(void)
699{
700
701 pause("pmcctx", 1);
702}
703
704/*
705 * Get the file name for an executable. This is a simple wrapper
706 * around vn_fullpath(9).
707 */
708
709static void
710pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
711{
712
713 *fullpath = "unknown";
714 *freepath = NULL;
715 vn_fullpath(curthread, v, fullpath, freepath);
716}
717
718/*
719 * remove an process owning PMCs
720 */
721
722void
723pmc_remove_owner(struct pmc_owner *po)
724{
725 struct pmc *pm, *tmp;
726
727 sx_assert(&pmc_sx, SX_XLOCKED);
728
729 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
730
731 /* Remove descriptor from the owner hash table */
732 LIST_REMOVE(po, po_next);
733
734 /* release all owned PMC descriptors */
735 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
736 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
737 KASSERT(pm->pm_owner == po,
738 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
739
740 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
741 }
742
743 KASSERT(po->po_sscount == 0,
744 ("[pmc,%d] SS count not zero", __LINE__));
745 KASSERT(LIST_EMPTY(&po->po_pmcs),
746 ("[pmc,%d] PMC list not empty", __LINE__));
747
748 /* de-configure the log file if present */
749 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
750 pmclog_deconfigure_log(po);
751}
752
753/*
754 * remove an owner process record if all conditions are met.
755 */
756
757static void
758pmc_maybe_remove_owner(struct pmc_owner *po)
759{
760
761 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
762
763 /*
764 * Remove owner record if
765 * - this process does not own any PMCs
766 * - this process has not allocated a system-wide sampling buffer
767 */
768
769 if (LIST_EMPTY(&po->po_pmcs) &&
770 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
771 pmc_remove_owner(po);
772 pmc_destroy_owner_descriptor(po);
773 }
774}
775
776/*
777 * Add an association between a target process and a PMC.
778 */
779
780static void
781pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
782{
783 int ri;
784 struct pmc_target *pt;
785
786 sx_assert(&pmc_sx, SX_XLOCKED);
787
788 KASSERT(pm != NULL && pp != NULL,
789 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
790 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
791 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
792 __LINE__, pm, pp->pp_proc->p_pid));
793 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
794 ("[pmc,%d] Illegal reference count %d for process record %p",
795 __LINE__, pp->pp_refcnt, (void *) pp));
796
797 ri = PMC_TO_ROWINDEX(pm);
798
799 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
800 pm, ri, pp);
801
802#ifdef DEBUG
803 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
804 if (pt->pt_process == pp)
805 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
806 __LINE__, pp, pm));
807#endif
808
809 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
810 pt->pt_process = pp;
811
812 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
813
814 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
815 (uintptr_t)pm);
816
817 if (pm->pm_owner->po_owner == pp->pp_proc)
818 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
819
820 /*
821 * Initialize the per-process values at this row index.
822 */
823 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
824 pm->pm_sc.pm_reloadcount : 0;
825
826 pp->pp_refcnt++;
827
828}
829
830/*
831 * Removes the association between a target process and a PMC.
832 */
833
834static void
835pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
836{
837 int ri;
838 struct proc *p;
839 struct pmc_target *ptgt;
840
841 sx_assert(&pmc_sx, SX_XLOCKED);
842
843 KASSERT(pm != NULL && pp != NULL,
844 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
845
846 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
847 ("[pmc,%d] Illegal ref count %d on process record %p",
848 __LINE__, pp->pp_refcnt, (void *) pp));
849
850 ri = PMC_TO_ROWINDEX(pm);
851
852 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
853 pm, ri, pp);
854
855 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
856 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
857 ri, pm, pp->pp_pmcs[ri].pp_pmc));
858
859 pp->pp_pmcs[ri].pp_pmc = NULL;
860 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
861
862 /* Remove owner-specific flags */
863 if (pm->pm_owner->po_owner == pp->pp_proc) {
864 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
865 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
866 }
867
868 pp->pp_refcnt--;
869
870 /* Remove the target process from the PMC structure */
871 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
872 if (ptgt->pt_process == pp)
873 break;
874
875 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
876 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
877
878 LIST_REMOVE(ptgt, pt_next);
879 free(ptgt, M_PMC);
880
881 /* if the PMC now lacks targets, send the owner a SIGIO */
882 if (LIST_EMPTY(&pm->pm_targets)) {
883 p = pm->pm_owner->po_owner;
884 PROC_LOCK(p);
885 psignal(p, SIGIO);
886 PROC_UNLOCK(p);
887
888 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
889 SIGIO);
890 }
891}
892
893/*
894 * Check if PMC 'pm' may be attached to target process 't'.
895 */
896
897static int
898pmc_can_attach(struct pmc *pm, struct proc *t)
899{
900 struct proc *o; /* pmc owner */
901 struct ucred *oc, *tc; /* owner, target credentials */
902 int decline_attach, i;
903
904 /*
905 * A PMC's owner can always attach that PMC to itself.
906 */
907
908 if ((o = pm->pm_owner->po_owner) == t)
909 return 0;
910
911 PROC_LOCK(o);
912 oc = o->p_ucred;
913 crhold(oc);
914 PROC_UNLOCK(o);
915
916 PROC_LOCK(t);
917 tc = t->p_ucred;
918 crhold(tc);
919 PROC_UNLOCK(t);
920
921 /*
922 * The effective uid of the PMC owner should match at least one
923 * of the {effective,real,saved} uids of the target process.
924 */
925
926 decline_attach = oc->cr_uid != tc->cr_uid &&
927 oc->cr_uid != tc->cr_svuid &&
928 oc->cr_uid != tc->cr_ruid;
929
930 /*
931 * Every one of the target's group ids, must be in the owner's
932 * group list.
933 */
934 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
935 decline_attach = !groupmember(tc->cr_groups[i], oc);
936
937 /* check the read and saved gids too */
938 if (decline_attach == 0)
939 decline_attach = !groupmember(tc->cr_rgid, oc) ||
940 !groupmember(tc->cr_svgid, oc);
941
942 crfree(tc);
943 crfree(oc);
944
945 return !decline_attach;
946}
947
948/*
949 * Attach a process to a PMC.
950 */
951
952static int
953pmc_attach_one_process(struct proc *p, struct pmc *pm)
954{
955 int ri;
956 char *fullpath, *freepath;
957 struct pmc_process *pp;
958
959 sx_assert(&pmc_sx, SX_XLOCKED);
960
961 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
962 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
963
964 /*
965 * Locate the process descriptor corresponding to process 'p',
966 * allocating space as needed.
967 *
968 * Verify that rowindex 'pm_rowindex' is free in the process
969 * descriptor.
970 *
971 * If not, allocate space for a descriptor and link the
972 * process descriptor and PMC.
973 */
974 ri = PMC_TO_ROWINDEX(pm);
975
976 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
977 return ENOMEM;
978
979 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
980 return EEXIST;
981
982 if (pp->pp_pmcs[ri].pp_pmc != NULL)
983 return EBUSY;
984
985 pmc_link_target_process(pm, pp);
986
987 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
988 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
989 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
990
991 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
992
993 /* issue an attach event to a configured log file */
994 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
995 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
996 if (p->p_flag & P_KTHREAD) {
997 fullpath = kernelname;
998 freepath = NULL;
999 } else
1000 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1001 if (freepath)
1002 free(freepath, M_TEMP);
1003 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1004 pmc_log_process_mappings(pm->pm_owner, p);
1005 }
1006 /* mark process as using HWPMCs */
1007 PROC_LOCK(p);
1008 p->p_flag |= P_HWPMC;
1009 PROC_UNLOCK(p);
1010
1011 return 0;
1012}
1013
1014/*
1015 * Attach a process and optionally its children
1016 */
1017
1018static int
1019pmc_attach_process(struct proc *p, struct pmc *pm)
1020{
1021 int error;
1022 struct proc *top;
1023
1024 sx_assert(&pmc_sx, SX_XLOCKED);
1025
1026 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1027 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1028
1029
1030 /*
1031 * If this PMC successfully allowed a GETMSR operation
1032 * in the past, disallow further ATTACHes.
1033 */
1034
1035 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1036 return EPERM;
1037
1038 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1039 return pmc_attach_one_process(p, pm);
1040
1041 /*
1042 * Traverse all child processes, attaching them to
1043 * this PMC.
1044 */
1045
1046 sx_slock(&proctree_lock);
1047
1048 top = p;
1049
1050 for (;;) {
1051 if ((error = pmc_attach_one_process(p, pm)) != 0)
1052 break;
1053 if (!LIST_EMPTY(&p->p_children))
1054 p = LIST_FIRST(&p->p_children);
1055 else for (;;) {
1056 if (p == top)
1057 goto done;
1058 if (LIST_NEXT(p, p_sibling)) {
1059 p = LIST_NEXT(p, p_sibling);
1060 break;
1061 }
1062 p = p->p_pptr;
1063 }
1064 }
1065
1066 if (error)
1067 (void) pmc_detach_process(top, pm);
1068
1069 done:
1070 sx_sunlock(&proctree_lock);
1071 return error;
1072}
1073
1074/*
1075 * Detach a process from a PMC. If there are no other PMCs tracking
1076 * this process, remove the process structure from its hash table. If
1077 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1078 */
1079
1080static int
1081pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1082{
1083 int ri;
1084 struct pmc_process *pp;
1085
1086 sx_assert(&pmc_sx, SX_XLOCKED);
1087
1088 KASSERT(pm != NULL,
1089 ("[pmc,%d] null pm pointer", __LINE__));
1090
1091 ri = PMC_TO_ROWINDEX(pm);
1092
1093 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1094 pm, ri, p, p->p_pid, p->p_comm, flags);
1095
1096 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1097 return ESRCH;
1098
1099 if (pp->pp_pmcs[ri].pp_pmc != pm)
1100 return EINVAL;
1101
1102 pmc_unlink_target_process(pm, pp);
1103
1104 /* Issue a detach entry if a log file is configured */
1105 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1106 pmclog_process_pmcdetach(pm, p->p_pid);
1107
1108 /*
1109 * If there are no PMCs targetting this process, we remove its
1110 * descriptor from the target hash table and unset the P_HWPMC
1111 * flag in the struct proc.
1112 */
1113 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1114 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1115 __LINE__, pp->pp_refcnt, pp));
1116
1117 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1118 return 0;
1119
1120 pmc_remove_process_descriptor(pp);
1121
1122 if (flags & PMC_FLAG_REMOVE)
1123 free(pp, M_PMC);
1124
1125 PROC_LOCK(p);
1126 p->p_flag &= ~P_HWPMC;
1127 PROC_UNLOCK(p);
1128
1129 return 0;
1130}
1131
1132/*
1133 * Detach a process and optionally its descendants from a PMC.
1134 */
1135
1136static int
1137pmc_detach_process(struct proc *p, struct pmc *pm)
1138{
1139 struct proc *top;
1140
1141 sx_assert(&pmc_sx, SX_XLOCKED);
1142
1143 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1144 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1145
1146 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1147 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1148
1149 /*
1150 * Traverse all children, detaching them from this PMC. We
1151 * ignore errors since we could be detaching a PMC from a
1152 * partially attached proc tree.
1153 */
1154
1155 sx_slock(&proctree_lock);
1156
1157 top = p;
1158
1159 for (;;) {
1160 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1161
1162 if (!LIST_EMPTY(&p->p_children))
1163 p = LIST_FIRST(&p->p_children);
1164 else for (;;) {
1165 if (p == top)
1166 goto done;
1167 if (LIST_NEXT(p, p_sibling)) {
1168 p = LIST_NEXT(p, p_sibling);
1169 break;
1170 }
1171 p = p->p_pptr;
1172 }
1173 }
1174
1175 done:
1176 sx_sunlock(&proctree_lock);
1177
1178 if (LIST_EMPTY(&pm->pm_targets))
1179 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1180
1181 return 0;
1182}
1183
1184
1185/*
1186 * Thread context switch IN
1187 */
1188
1189static void
1190pmc_process_csw_in(struct thread *td)
1191{
1192 int cpu;
1193 unsigned int adjri, ri;
1194 struct pmc *pm;
1195 struct proc *p;
1196 struct pmc_cpu *pc;
1197 struct pmc_hw *phw;
1198 pmc_value_t newvalue;
1199 struct pmc_process *pp;
1200 struct pmc_classdep *pcd;
1201
1202 p = td->td_proc;
1203
1204 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1205 return;
1206
1207 KASSERT(pp->pp_proc == td->td_proc,
1208 ("[pmc,%d] not my thread state", __LINE__));
1209
1210 critical_enter(); /* no preemption from this point */
1211
1212 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1213
1214 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1215 p->p_pid, p->p_comm, pp);
1216
1217 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1218 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1219
1220 pc = pmc_pcpu[cpu];
1221
1222 for (ri = 0; ri < md->pmd_npmc; ri++) {
1223
1224 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1225 continue;
1226
1227 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1228 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1229 __LINE__, PMC_TO_MODE(pm)));
1230
1231 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1232 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1233 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1234
1235 /*
1236 * Only PMCs that are marked as 'RUNNING' need
1237 * be placed on hardware.
1238 */
1239
1240 if (pm->pm_state != PMC_STATE_RUNNING)
1241 continue;
1242
1243 /* increment PMC runcount */
1244 atomic_add_rel_32(&pm->pm_runcount, 1);
1245
1246 /* configure the HWPMC we are going to use. */
1247 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1248 pcd->pcd_config_pmc(cpu, adjri, pm);
1249
1250 phw = pc->pc_hwpmcs[ri];
1251
1252 KASSERT(phw != NULL,
1253 ("[pmc,%d] null hw pointer", __LINE__));
1254
1255 KASSERT(phw->phw_pmc == pm,
1256 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1257 phw->phw_pmc, pm));
1258
1259 /*
1260 * Write out saved value and start the PMC.
1261 *
1262 * Sampling PMCs use a per-process value, while
1263 * counting mode PMCs use a per-pmc value that is
1264 * inherited across descendants.
1265 */
1266 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1267 mtx_pool_lock_spin(pmc_mtxpool, pm);
1268 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1269 pp->pp_pmcs[ri].pp_pmcval;
1270 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1271 } else {
1272 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1273 ("[pmc,%d] illegal mode=%d", __LINE__,
1274 PMC_TO_MODE(pm)));
1275 mtx_pool_lock_spin(pmc_mtxpool, pm);
1276 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1277 pm->pm_gv.pm_savedvalue;
1278 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1279 }
1280
1281 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1282
1283 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1284 pcd->pcd_start_pmc(cpu, adjri);
1285 }
1286
1287 /*
1288 * perform any other architecture/cpu dependent thread
1289 * switch-in actions.
1290 */
1291
1292 (void) (*md->pmd_switch_in)(pc, pp);
1293
1294 critical_exit();
1295
1296}
1297
1298/*
1299 * Thread context switch OUT.
1300 */
1301
1302static void
1303pmc_process_csw_out(struct thread *td)
1304{
1305 int cpu;
1306 int64_t tmp;
1307 struct pmc *pm;
1308 struct proc *p;
1309 enum pmc_mode mode;
1310 struct pmc_cpu *pc;
1311 pmc_value_t newvalue;
1312 unsigned int adjri, ri;
1313 struct pmc_process *pp;
1314 struct pmc_classdep *pcd;
1315
1316
1317 /*
1318 * Locate our process descriptor; this may be NULL if
1319 * this process is exiting and we have already removed
1320 * the process from the target process table.
1321 *
1322 * Note that due to kernel preemption, multiple
1323 * context switches may happen while the process is
1324 * exiting.
1325 *
1326 * Note also that if the target process cannot be
1327 * found we still need to deconfigure any PMCs that
1328 * are currently running on hardware.
1329 */
1330
1331 p = td->td_proc;
1332 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1333
1334 /*
1335 * save PMCs
1336 */
1337
1338 critical_enter();
1339
1340 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1341
1342 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1343 p->p_pid, p->p_comm, pp);
1344
1345 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1346 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1347
1348 pc = pmc_pcpu[cpu];
1349
1350 /*
1351 * When a PMC gets unlinked from a target PMC, it will
1352 * be removed from the target's pp_pmc[] array.
1353 *
1354 * However, on a MP system, the target could have been
1355 * executing on another CPU at the time of the unlink.
1356 * So, at context switch OUT time, we need to look at
1357 * the hardware to determine if a PMC is scheduled on
1358 * it.
1359 */
1360
1361 for (ri = 0; ri < md->pmd_npmc; ri++) {
1362
1363 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1364 pm = NULL;
1365 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1366
1367 if (pm == NULL) /* nothing at this row index */
1368 continue;
1369
1370 mode = PMC_TO_MODE(pm);
1371 if (!PMC_IS_VIRTUAL_MODE(mode))
1372 continue; /* not a process virtual PMC */
1373
1374 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1375 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1376 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1377
1378 /* Stop hardware if not already stopped */
1379 if (pm->pm_stalled == 0)
1380 pcd->pcd_stop_pmc(cpu, adjri);
1381
1382 /* reduce this PMC's runcount */
1383 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1384
1385 /*
1386 * If this PMC is associated with this process,
1387 * save the reading.
1388 */
1389
1390 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1391
1392 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1393 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1394 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1395
1396 KASSERT(pp->pp_refcnt > 0,
1397 ("[pmc,%d] pp refcnt = %d", __LINE__,
1398 pp->pp_refcnt));
1399
1400 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1401
1402 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1403
1404 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1405 tmp);
1406
1407 if (mode == PMC_MODE_TS) {
1408
1409 /*
1410 * For sampling process-virtual PMCs,
1411 * we expect the count to be
1412 * decreasing as the 'value'
1413 * programmed into the PMC is the
1414 * number of events to be seen till
1415 * the next sampling interrupt.
1416 */
1417 if (tmp < 0)
1418 tmp += pm->pm_sc.pm_reloadcount;
1419 mtx_pool_lock_spin(pmc_mtxpool, pm);
1420 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1421 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1422 pp->pp_pmcs[ri].pp_pmcval +=
1423 pm->pm_sc.pm_reloadcount;
1424 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1425
1426 } else {
1427
1428 /*
1429 * For counting process-virtual PMCs,
1430 * we expect the count to be
1431 * increasing monotonically, modulo a 64
1432 * bit wraparound.
1433 */
1434 KASSERT((int64_t) tmp >= 0,
1435 ("[pmc,%d] negative increment cpu=%d "
1436 "ri=%d newvalue=%jx saved=%jx "
1437 "incr=%jx", __LINE__, cpu, ri,
1438 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1439
1440 mtx_pool_lock_spin(pmc_mtxpool, pm);
1441 pm->pm_gv.pm_savedvalue += tmp;
1442 pp->pp_pmcs[ri].pp_pmcval += tmp;
1443 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1444
1445 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1446 pmclog_process_proccsw(pm, pp, tmp);
1447 }
1448 }
1449
1450 /* mark hardware as free */
1451 pcd->pcd_config_pmc(cpu, adjri, NULL);
1452 }
1453
1454 /*
1455 * perform any other architecture/cpu dependent thread
1456 * switch out functions.
1457 */
1458
1459 (void) (*md->pmd_switch_out)(pc, pp);
1460
1461 critical_exit();
1462}
1463
1464/*
1465 * Log a KLD operation.
1466 */
1467
1468static void
1469pmc_process_kld_load(struct pmckern_map_in *pkm)
1470{
1471 struct pmc_owner *po;
1472
1473 sx_assert(&pmc_sx, SX_LOCKED);
1474
1475 /*
1476 * Notify owners of system sampling PMCs about KLD operations.
1477 */
1478
1479 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1480 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1481 pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
1482 (char *) pkm->pm_file);
1483
1484 /*
1485 * TODO: Notify owners of (all) process-sampling PMCs too.
1486 */
1487
1488 return;
1489}
1490
1491static void
1492pmc_process_kld_unload(struct pmckern_map_out *pkm)
1493{
1494 struct pmc_owner *po;
1495
1496 sx_assert(&pmc_sx, SX_LOCKED);
1497
1498 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1499 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1500 pmclog_process_map_out(po, (pid_t) -1,
1501 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1502
1503 /*
1504 * TODO: Notify owners of process-sampling PMCs.
1505 */
1506}
1507
1508/*
1509 * A mapping change for a process.
1510 */
1511
1512static void
1513pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1514{
1515 int ri;
1516 pid_t pid;
1517 char *fullpath, *freepath;
1518 const struct pmc *pm;
1519 struct pmc_owner *po;
1520 const struct pmc_process *pp;
1521
1522 freepath = fullpath = NULL;
1523 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1524
1525 pid = td->td_proc->p_pid;
1526
1527 /* Inform owners of all system-wide sampling PMCs. */
1528 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1529 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1530 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1531
1532 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1533 goto done;
1534
1535 /*
1536 * Inform sampling PMC owners tracking this process.
1537 */
1538 for (ri = 0; ri < md->pmd_npmc; ri++)
1539 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1540 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1541 pmclog_process_map_in(pm->pm_owner,
1542 pid, pkm->pm_address, fullpath);
1543
1544 done:
1545 if (freepath)
1546 free(freepath, M_TEMP);
1547}
1548
1549
1550/*
1551 * Log an munmap request.
1552 */
1553
1554static void
1555pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1556{
1557 int ri;
1558 pid_t pid;
1559 struct pmc_owner *po;
1560 const struct pmc *pm;
1561 const struct pmc_process *pp;
1562
1563 pid = td->td_proc->p_pid;
1564
1565 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1566 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1567 pmclog_process_map_out(po, pid, pkm->pm_address,
1568 pkm->pm_address + pkm->pm_size);
1569
1570 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1571 return;
1572
1573 for (ri = 0; ri < md->pmd_npmc; ri++)
1574 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1575 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1576 pmclog_process_map_out(pm->pm_owner, pid,
1577 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1578}
1579
1580/*
1581 * Log mapping information about the kernel.
1582 */
1583
1584static void
1585pmc_log_kernel_mappings(struct pmc *pm)
1586{
1587 struct pmc_owner *po;
1588 struct pmckern_map_in *km, *kmbase;
1589
1590 sx_assert(&pmc_sx, SX_LOCKED);
1591 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1592 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1593 __LINE__, (void *) pm));
1594
1595 po = pm->pm_owner;
1596
1597 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
1598 return;
1599
1600 /*
1601 * Log the current set of kernel modules.
1602 */
1603 kmbase = linker_hwpmc_list_objects();
1604 for (km = kmbase; km->pm_file != NULL; km++) {
1605 PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
1606 (void *) km->pm_address);
1607 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1608 km->pm_file);
1609 }
1610 free(kmbase, M_LINKER);
1611
1612 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1613}
1614
1615/*
1616 * Log the mappings for a single process.
1617 */
1618
1619static void
1620pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1621{
1622}
1623
1624/*
1625 * Log mappings for all processes in the system.
1626 */
1627
1628static void
1629pmc_log_all_process_mappings(struct pmc_owner *po)
1630{
1631 struct proc *p, *top;
1632
1633 sx_assert(&pmc_sx, SX_XLOCKED);
1634
1635 if ((p = pfind(1)) == NULL)
1636 panic("[pmc,%d] Cannot find init", __LINE__);
1637
1638 PROC_UNLOCK(p);
1639
1640 sx_slock(&proctree_lock);
1641
1642 top = p;
1643
1644 for (;;) {
1645 pmc_log_process_mappings(po, p);
1646 if (!LIST_EMPTY(&p->p_children))
1647 p = LIST_FIRST(&p->p_children);
1648 else for (;;) {
1649 if (p == top)
1650 goto done;
1651 if (LIST_NEXT(p, p_sibling)) {
1652 p = LIST_NEXT(p, p_sibling);
1653 break;
1654 }
1655 p = p->p_pptr;
1656 }
1657 }
1658 done:
1659 sx_sunlock(&proctree_lock);
1660}
1661
1662/*
1663 * The 'hook' invoked from the kernel proper
1664 */
1665
1666
1667#ifdef DEBUG
1668const char *pmc_hooknames[] = {
1669 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
1670 "",
1671 "EXEC",
1672 "CSW-IN",
1673 "CSW-OUT",
1674 "SAMPLE",
1675 "KLDLOAD",
1676 "KLDUNLOAD",
1677 "MMAP",
1678 "MUNMAP",
1679 "CALLCHAIN"
1680};
1681#endif
1682
1683static int
1684pmc_hook_handler(struct thread *td, int function, void *arg)
1685{
1686
1687 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1688 pmc_hooknames[function], arg);
1689
1690 switch (function)
1691 {
1692
1693 /*
1694 * Process exec()
1695 */
1696
1697 case PMC_FN_PROCESS_EXEC:
1698 {
1699 char *fullpath, *freepath;
1700 unsigned int ri;
1701 int is_using_hwpmcs;
1702 struct pmc *pm;
1703 struct proc *p;
1704 struct pmc_owner *po;
1705 struct pmc_process *pp;
1706 struct pmckern_procexec *pk;
1707
1708 sx_assert(&pmc_sx, SX_XLOCKED);
1709
1710 p = td->td_proc;
1711 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1712
1713 pk = (struct pmckern_procexec *) arg;
1714
1715 /* Inform owners of SS mode PMCs of the exec event. */
1716 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1717 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1718 pmclog_process_procexec(po, PMC_ID_INVALID,
1719 p->p_pid, pk->pm_entryaddr, fullpath);
1720
1721 PROC_LOCK(p);
1722 is_using_hwpmcs = p->p_flag & P_HWPMC;
1723 PROC_UNLOCK(p);
1724
1725 if (!is_using_hwpmcs) {
1726 if (freepath)
1727 free(freepath, M_TEMP);
1728 break;
1729 }
1730
1731 /*
1732 * PMCs are not inherited across an exec(): remove any
1733 * PMCs that this process is the owner of.
1734 */
1735
1736 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1737 pmc_remove_owner(po);
1738 pmc_destroy_owner_descriptor(po);
1739 }
1740
1741 /*
1742 * If the process being exec'ed is not the target of any
1743 * PMC, we are done.
1744 */
1745 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1746 if (freepath)
1747 free(freepath, M_TEMP);
1748 break;
1749 }
1750
1751 /*
1752 * Log the exec event to all monitoring owners. Skip
1753 * owners who have already recieved the event because
1754 * they had system sampling PMCs active.
1755 */
1756 for (ri = 0; ri < md->pmd_npmc; ri++)
1757 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1758 po = pm->pm_owner;
1759 if (po->po_sscount == 0 &&
1760 po->po_flags & PMC_PO_OWNS_LOGFILE)
1761 pmclog_process_procexec(po, pm->pm_id,
1762 p->p_pid, pk->pm_entryaddr,
1763 fullpath);
1764 }
1765
1766 if (freepath)
1767 free(freepath, M_TEMP);
1768
1769
1770 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1771 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1772
1773 if (pk->pm_credentialschanged == 0) /* no change */
1774 break;
1775
1776 /*
1777 * If the newly exec()'ed process has a different credential
1778 * than before, allow it to be the target of a PMC only if
1779 * the PMC's owner has sufficient priviledge.
1780 */
1781
1782 for (ri = 0; ri < md->pmd_npmc; ri++)
1783 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1784 if (pmc_can_attach(pm, td->td_proc) != 0)
1785 pmc_detach_one_process(td->td_proc,
1786 pm, PMC_FLAG_NONE);
1787
1788 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1789 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1790 pp->pp_refcnt, pp));
1791
1792 /*
1793 * If this process is no longer the target of any
1794 * PMCs, we can remove the process entry and free
1795 * up space.
1796 */
1797
1798 if (pp->pp_refcnt == 0) {
1799 pmc_remove_process_descriptor(pp);
1800 free(pp, M_PMC);
1801 break;
1802 }
1803
1804 }
1805 break;
1806
1807 case PMC_FN_CSW_IN:
1808 pmc_process_csw_in(td);
1809 break;
1810
1811 case PMC_FN_CSW_OUT:
1812 pmc_process_csw_out(td);
1813 break;
1814
1815 /*
1816 * Process accumulated PC samples.
1817 *
1818 * This function is expected to be called by hardclock() for
1819 * each CPU that has accumulated PC samples.
1820 *
1821 * This function is to be executed on the CPU whose samples
1822 * are being processed.
1823 */
1824 case PMC_FN_DO_SAMPLES:
1825
1826 /*
1827 * Clear the cpu specific bit in the CPU mask before
1828 * do the rest of the processing. If the NMI handler
1829 * gets invoked after the "atomic_clear_int()" call
1830 * below but before "pmc_process_samples()" gets
1831 * around to processing the interrupt, then we will
1832 * come back here at the next hardclock() tick (and
1833 * may find nothing to do if "pmc_process_samples()"
1834 * had already processed the interrupt). We don't
1835 * lose the interrupt sample.
1836 */
1837 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1838 pmc_process_samples(PCPU_GET(cpuid));
1839 break;
1840
1841
1842 case PMC_FN_KLD_LOAD:
1843 sx_assert(&pmc_sx, SX_LOCKED);
1844 pmc_process_kld_load((struct pmckern_map_in *) arg);
1845 break;
1846
1847 case PMC_FN_KLD_UNLOAD:
1848 sx_assert(&pmc_sx, SX_LOCKED);
1849 pmc_process_kld_unload((struct pmckern_map_out *) arg);
1850 break;
1851
1852 case PMC_FN_MMAP:
1853 sx_assert(&pmc_sx, SX_LOCKED);
1854 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
1855 break;
1856
1857 case PMC_FN_MUNMAP:
1858 sx_assert(&pmc_sx, SX_LOCKED);
1859 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
1860 break;
1861
1862 case PMC_FN_USER_CALLCHAIN:
1863 /*
1864 * Record a call chain.
1865 */
1866 KASSERT(td == curthread, ("[pmc,%d] td != curthread",
1867 __LINE__));
1866 pmc_capture_user_callchain(PCPU_GET(cpuid),
1867 (struct trapframe *) arg);
1868 pmc_capture_user_callchain(PCPU_GET(cpuid),
1869 (struct trapframe *) arg);
1870 td->td_pflags &= ~TDP_CALLCHAIN;
1868 break;
1869
1870 default:
1871#ifdef DEBUG
1872 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1873#endif
1874 break;
1875
1876 }
1877
1878 return 0;
1879}
1880
1881/*
1882 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1883 */
1884
1885static struct pmc_owner *
1886pmc_allocate_owner_descriptor(struct proc *p)
1887{
1888 uint32_t hindex;
1889 struct pmc_owner *po;
1890 struct pmc_ownerhash *poh;
1891
1892 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1893 poh = &pmc_ownerhash[hindex];
1894
1895 /* allocate space for N pointers and one descriptor struct */
1896 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
1897 po->po_sscount = po->po_error = po->po_flags = 0;
1898 po->po_file = NULL;
1899 po->po_owner = p;
1900 po->po_kthread = NULL;
1901 LIST_INIT(&po->po_pmcs);
1902 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1903
1904 TAILQ_INIT(&po->po_logbuffers);
1905 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
1906
1907 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1908 p, p->p_pid, p->p_comm, po);
1909
1910 return po;
1911}
1912
1913static void
1914pmc_destroy_owner_descriptor(struct pmc_owner *po)
1915{
1916
1917 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1918 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1919
1920 mtx_destroy(&po->po_mtx);
1921 free(po, M_PMC);
1922}
1923
1924/*
1925 * find the descriptor corresponding to process 'p', adding or removing it
1926 * as specified by 'mode'.
1927 */
1928
1929static struct pmc_process *
1930pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1931{
1932 uint32_t hindex;
1933 struct pmc_process *pp, *ppnew;
1934 struct pmc_processhash *pph;
1935
1936 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1937 pph = &pmc_processhash[hindex];
1938
1939 ppnew = NULL;
1940
1941 /*
1942 * Pre-allocate memory in the FIND_ALLOCATE case since we
1943 * cannot call malloc(9) once we hold a spin lock.
1944 */
1945 if (mode & PMC_FLAG_ALLOCATE)
1946 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
1947 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
1948
1949 mtx_lock_spin(&pmc_processhash_mtx);
1950 LIST_FOREACH(pp, pph, pp_next)
1951 if (pp->pp_proc == p)
1952 break;
1953
1954 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1955 LIST_REMOVE(pp, pp_next);
1956
1957 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1958 ppnew != NULL) {
1959 ppnew->pp_proc = p;
1960 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1961 pp = ppnew;
1962 ppnew = NULL;
1963 }
1964 mtx_unlock_spin(&pmc_processhash_mtx);
1965
1966 if (pp != NULL && ppnew != NULL)
1967 free(ppnew, M_PMC);
1968
1969 return pp;
1970}
1971
1972/*
1973 * remove a process descriptor from the process hash table.
1974 */
1975
1976static void
1977pmc_remove_process_descriptor(struct pmc_process *pp)
1978{
1979 KASSERT(pp->pp_refcnt == 0,
1980 ("[pmc,%d] Removing process descriptor %p with count %d",
1981 __LINE__, pp, pp->pp_refcnt));
1982
1983 mtx_lock_spin(&pmc_processhash_mtx);
1984 LIST_REMOVE(pp, pp_next);
1985 mtx_unlock_spin(&pmc_processhash_mtx);
1986}
1987
1988
1989/*
1990 * find an owner descriptor corresponding to proc 'p'
1991 */
1992
1993static struct pmc_owner *
1994pmc_find_owner_descriptor(struct proc *p)
1995{
1996 uint32_t hindex;
1997 struct pmc_owner *po;
1998 struct pmc_ownerhash *poh;
1999
2000 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2001 poh = &pmc_ownerhash[hindex];
2002
2003 po = NULL;
2004 LIST_FOREACH(po, poh, po_next)
2005 if (po->po_owner == p)
2006 break;
2007
2008 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2009 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2010
2011 return po;
2012}
2013
2014/*
2015 * pmc_allocate_pmc_descriptor
2016 *
2017 * Allocate a pmc descriptor and initialize its
2018 * fields.
2019 */
2020
2021static struct pmc *
2022pmc_allocate_pmc_descriptor(void)
2023{
2024 struct pmc *pmc;
2025
2026 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2027
2028 if (pmc != NULL) {
2029 pmc->pm_owner = NULL;
2030 LIST_INIT(&pmc->pm_targets);
2031 }
2032
2033 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2034
2035 return pmc;
2036}
2037
2038/*
2039 * Destroy a pmc descriptor.
2040 */
2041
2042static void
2043pmc_destroy_pmc_descriptor(struct pmc *pm)
2044{
2045 (void) pm;
2046
2047#ifdef DEBUG
2048 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2049 pm->pm_state == PMC_STATE_FREE,
2050 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2051 KASSERT(LIST_EMPTY(&pm->pm_targets),
2052 ("[pmc,%d] destroying pmc with targets", __LINE__));
2053 KASSERT(pm->pm_owner == NULL,
2054 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2055 KASSERT(pm->pm_runcount == 0,
2056 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
2057 pm->pm_runcount));
2058#endif
2059}
2060
2061static void
2062pmc_wait_for_pmc_idle(struct pmc *pm)
2063{
2064#ifdef DEBUG
2065 volatile int maxloop;
2066
2067 maxloop = 100 * pmc_cpu_max();
2068#endif
2069
2070 /*
2071 * Loop (with a forced context switch) till the PMC's runcount
2072 * comes down to zero.
2073 */
2074 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
2075#ifdef DEBUG
2076 maxloop--;
2077 KASSERT(maxloop > 0,
2078 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
2079 "pmc to be free", __LINE__,
2080 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
2081#endif
2082 pmc_force_context_switch();
2083 }
2084}
2085
2086/*
2087 * This function does the following things:
2088 *
2089 * - detaches the PMC from hardware
2090 * - unlinks all target threads that were attached to it
2091 * - removes the PMC from its owner's list
2092 * - destroy's the PMC private mutex
2093 *
2094 * Once this function completes, the given pmc pointer can be safely
2095 * FREE'd by the caller.
2096 */
2097
2098static void
2099pmc_release_pmc_descriptor(struct pmc *pm)
2100{
2101 enum pmc_mode mode;
2102 struct pmc_hw *phw;
2103 u_int adjri, ri, cpu;
2104 struct pmc_owner *po;
2105 struct pmc_binding pb;
2106 struct pmc_process *pp;
2107 struct pmc_classdep *pcd;
2108 struct pmc_target *ptgt, *tmp;
2109
2110 sx_assert(&pmc_sx, SX_XLOCKED);
2111
2112 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2113
2114 ri = PMC_TO_ROWINDEX(pm);
2115 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2116 mode = PMC_TO_MODE(pm);
2117
2118 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2119 mode);
2120
2121 /*
2122 * First, we take the PMC off hardware.
2123 */
2124 cpu = 0;
2125 if (PMC_IS_SYSTEM_MODE(mode)) {
2126
2127 /*
2128 * A system mode PMC runs on a specific CPU. Switch
2129 * to this CPU and turn hardware off.
2130 */
2131 pmc_save_cpu_binding(&pb);
2132
2133 cpu = PMC_TO_CPU(pm);
2134
2135 pmc_select_cpu(cpu);
2136
2137 /* switch off non-stalled CPUs */
2138 if (pm->pm_state == PMC_STATE_RUNNING &&
2139 pm->pm_stalled == 0) {
2140
2141 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2142
2143 KASSERT(phw->phw_pmc == pm,
2144 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2145 __LINE__, ri, phw->phw_pmc, pm));
2146 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2147
2148 critical_enter();
2149 pcd->pcd_stop_pmc(cpu, adjri);
2150 critical_exit();
2151 }
2152
2153 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2154
2155 critical_enter();
2156 pcd->pcd_config_pmc(cpu, adjri, NULL);
2157 critical_exit();
2158
2159 /* adjust the global and process count of SS mode PMCs */
2160 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2161 po = pm->pm_owner;
2162 po->po_sscount--;
2163 if (po->po_sscount == 0) {
2164 atomic_subtract_rel_int(&pmc_ss_count, 1);
2165 LIST_REMOVE(po, po_ssnext);
2166 }
2167 }
2168
2169 pm->pm_state = PMC_STATE_DELETED;
2170
2171 pmc_restore_cpu_binding(&pb);
2172
2173 /*
2174 * We could have references to this PMC structure in
2175 * the per-cpu sample queues. Wait for the queue to
2176 * drain.
2177 */
2178 pmc_wait_for_pmc_idle(pm);
2179
2180 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2181
2182 /*
2183 * A virtual PMC could be running on multiple CPUs at
2184 * a given instant.
2185 *
2186 * By marking its state as DELETED, we ensure that
2187 * this PMC is never further scheduled on hardware.
2188 *
2189 * Then we wait till all CPUs are done with this PMC.
2190 */
2191 pm->pm_state = PMC_STATE_DELETED;
2192
2193
2194 /* Wait for the PMCs runcount to come to zero. */
2195 pmc_wait_for_pmc_idle(pm);
2196
2197 /*
2198 * At this point the PMC is off all CPUs and cannot be
2199 * freshly scheduled onto a CPU. It is now safe to
2200 * unlink all targets from this PMC. If a
2201 * process-record's refcount falls to zero, we remove
2202 * it from the hash table. The module-wide SX lock
2203 * protects us from races.
2204 */
2205 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2206 pp = ptgt->pt_process;
2207 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2208
2209 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2210
2211 /*
2212 * If the target process record shows that no
2213 * PMCs are attached to it, reclaim its space.
2214 */
2215
2216 if (pp->pp_refcnt == 0) {
2217 pmc_remove_process_descriptor(pp);
2218 free(pp, M_PMC);
2219 }
2220 }
2221
2222 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2223
2224 }
2225
2226 /*
2227 * Release any MD resources
2228 */
2229 (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2230
2231 /*
2232 * Update row disposition
2233 */
2234
2235 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2236 PMC_UNMARK_ROW_STANDALONE(ri);
2237 else
2238 PMC_UNMARK_ROW_THREAD(ri);
2239
2240 /* unlink from the owner's list */
2241 if (pm->pm_owner) {
2242 LIST_REMOVE(pm, pm_next);
2243 pm->pm_owner = NULL;
2244 }
2245
2246 pmc_destroy_pmc_descriptor(pm);
2247}
2248
2249/*
2250 * Register an owner and a pmc.
2251 */
2252
2253static int
2254pmc_register_owner(struct proc *p, struct pmc *pmc)
2255{
2256 struct pmc_owner *po;
2257
2258 sx_assert(&pmc_sx, SX_XLOCKED);
2259
2260 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2261 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2262 return ENOMEM;
2263
2264 KASSERT(pmc->pm_owner == NULL,
2265 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2266 pmc->pm_owner = po;
2267
2268 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2269
2270 PROC_LOCK(p);
2271 p->p_flag |= P_HWPMC;
2272 PROC_UNLOCK(p);
2273
2274 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2275 pmclog_process_pmcallocate(pmc);
2276
2277 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2278 po, pmc);
2279
2280 return 0;
2281}
2282
2283/*
2284 * Return the current row disposition:
2285 * == 0 => FREE
2286 * > 0 => PROCESS MODE
2287 * < 0 => SYSTEM MODE
2288 */
2289
2290int
2291pmc_getrowdisp(int ri)
2292{
2293 return pmc_pmcdisp[ri];
2294}
2295
2296/*
2297 * Check if a PMC at row index 'ri' can be allocated to the current
2298 * process.
2299 *
2300 * Allocation can fail if:
2301 * - the current process is already being profiled by a PMC at index 'ri',
2302 * attached to it via OP_PMCATTACH.
2303 * - the current process has already allocated a PMC at index 'ri'
2304 * via OP_ALLOCATE.
2305 */
2306
2307static int
2308pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2309{
2310 enum pmc_mode mode;
2311 struct pmc *pm;
2312 struct pmc_owner *po;
2313 struct pmc_process *pp;
2314
2315 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2316 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2317
2318 /*
2319 * We shouldn't have already allocated a process-mode PMC at
2320 * row index 'ri'.
2321 *
2322 * We shouldn't have allocated a system-wide PMC on the same
2323 * CPU and same RI.
2324 */
2325 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2326 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2327 if (PMC_TO_ROWINDEX(pm) == ri) {
2328 mode = PMC_TO_MODE(pm);
2329 if (PMC_IS_VIRTUAL_MODE(mode))
2330 return EEXIST;
2331 if (PMC_IS_SYSTEM_MODE(mode) &&
2332 (int) PMC_TO_CPU(pm) == cpu)
2333 return EEXIST;
2334 }
2335 }
2336
2337 /*
2338 * We also shouldn't be the target of any PMC at this index
2339 * since otherwise a PMC_ATTACH to ourselves will fail.
2340 */
2341 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2342 if (pp->pp_pmcs[ri].pp_pmc)
2343 return EEXIST;
2344
2345 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2346 p, p->p_pid, p->p_comm, ri);
2347
2348 return 0;
2349}
2350
2351/*
2352 * Check if a given PMC at row index 'ri' can be currently used in
2353 * mode 'mode'.
2354 */
2355
2356static int
2357pmc_can_allocate_row(int ri, enum pmc_mode mode)
2358{
2359 enum pmc_disp disp;
2360
2361 sx_assert(&pmc_sx, SX_XLOCKED);
2362
2363 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2364
2365 if (PMC_IS_SYSTEM_MODE(mode))
2366 disp = PMC_DISP_STANDALONE;
2367 else
2368 disp = PMC_DISP_THREAD;
2369
2370 /*
2371 * check disposition for PMC row 'ri':
2372 *
2373 * Expected disposition Row-disposition Result
2374 *
2375 * STANDALONE STANDALONE or FREE proceed
2376 * STANDALONE THREAD fail
2377 * THREAD THREAD or FREE proceed
2378 * THREAD STANDALONE fail
2379 */
2380
2381 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2382 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2383 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2384 return EBUSY;
2385
2386 /*
2387 * All OK
2388 */
2389
2390 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2391
2392 return 0;
2393
2394}
2395
2396/*
2397 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2398 */
2399
2400static struct pmc *
2401pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2402{
2403 struct pmc *pm;
2404
2405 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2406 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2407 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2408
2409 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2410 if (pm->pm_id == pmcid)
2411 return pm;
2412
2413 return NULL;
2414}
2415
2416static int
2417pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2418{
2419
2420 struct pmc *pm;
2421 struct pmc_owner *po;
2422
2423 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2424
2425 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2426 return ESRCH;
2427
2428 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2429 return EINVAL;
2430
2431 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2432
2433 *pmc = pm;
2434 return 0;
2435}
2436
2437/*
2438 * Start a PMC.
2439 */
2440
2441static int
2442pmc_start(struct pmc *pm)
2443{
2444 enum pmc_mode mode;
2445 struct pmc_owner *po;
2446 struct pmc_binding pb;
2447 struct pmc_classdep *pcd;
2448 int adjri, error, cpu, ri;
2449
2450 KASSERT(pm != NULL,
2451 ("[pmc,%d] null pm", __LINE__));
2452
2453 mode = PMC_TO_MODE(pm);
2454 ri = PMC_TO_ROWINDEX(pm);
2455 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2456
2457 error = 0;
2458
2459 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2460
2461 po = pm->pm_owner;
2462
2463 /*
2464 * Disallow PMCSTART if a logfile is required but has not been
2465 * configured yet.
2466 */
2467 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2468 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2469 return (EDOOFUS); /* programming error */
2470
2471 /*
2472 * If this is a sampling mode PMC, log mapping information for
2473 * the kernel modules that are currently loaded.
2474 */
2475 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2476 pmc_log_kernel_mappings(pm);
2477
2478 if (PMC_IS_VIRTUAL_MODE(mode)) {
2479
2480 /*
2481 * If a PMCATTACH has never been done on this PMC,
2482 * attach it to its owner process.
2483 */
2484
2485 if (LIST_EMPTY(&pm->pm_targets))
2486 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2487 pmc_attach_process(po->po_owner, pm);
2488
2489 /*
2490 * If the PMC is attached to its owner, then force a context
2491 * switch to ensure that the MD state gets set correctly.
2492 */
2493
2494 if (error == 0) {
2495 pm->pm_state = PMC_STATE_RUNNING;
2496 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2497 pmc_force_context_switch();
2498 }
2499
2500 return (error);
2501 }
2502
2503
2504 /*
2505 * A system-wide PMC.
2506 *
2507 * Add the owner to the global list if this is a system-wide
2508 * sampling PMC.
2509 */
2510
2511 if (mode == PMC_MODE_SS) {
2512 if (po->po_sscount == 0) {
2513 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2514 atomic_add_rel_int(&pmc_ss_count, 1);
2515 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2516 }
2517 po->po_sscount++;
2518 }
2519
2520 /* Log mapping information for all processes in the system. */
2521 pmc_log_all_process_mappings(po);
2522
2523 /*
2524 * Move to the CPU associated with this
2525 * PMC, and start the hardware.
2526 */
2527
2528 pmc_save_cpu_binding(&pb);
2529
2530 cpu = PMC_TO_CPU(pm);
2531
2532 if (!pmc_cpu_is_active(cpu))
2533 return (ENXIO);
2534
2535 pmc_select_cpu(cpu);
2536
2537 /*
2538 * global PMCs are configured at allocation time
2539 * so write out the initial value and start the PMC.
2540 */
2541
2542 pm->pm_state = PMC_STATE_RUNNING;
2543
2544 critical_enter();
2545 if ((error = pcd->pcd_write_pmc(cpu, adjri,
2546 PMC_IS_SAMPLING_MODE(mode) ?
2547 pm->pm_sc.pm_reloadcount :
2548 pm->pm_sc.pm_initial)) == 0)
2549 error = pcd->pcd_start_pmc(cpu, adjri);
2550 critical_exit();
2551
2552 pmc_restore_cpu_binding(&pb);
2553
2554 return (error);
2555}
2556
2557/*
2558 * Stop a PMC.
2559 */
2560
2561static int
2562pmc_stop(struct pmc *pm)
2563{
2564 struct pmc_owner *po;
2565 struct pmc_binding pb;
2566 struct pmc_classdep *pcd;
2567 int adjri, cpu, error, ri;
2568
2569 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2570
2571 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2572 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2573
2574 pm->pm_state = PMC_STATE_STOPPED;
2575
2576 /*
2577 * If the PMC is a virtual mode one, changing the state to
2578 * non-RUNNING is enough to ensure that the PMC never gets
2579 * scheduled.
2580 *
2581 * If this PMC is current running on a CPU, then it will
2582 * handled correctly at the time its target process is context
2583 * switched out.
2584 */
2585
2586 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2587 return 0;
2588
2589 /*
2590 * A system-mode PMC. Move to the CPU associated with
2591 * this PMC, and stop the hardware. We update the
2592 * 'initial count' so that a subsequent PMCSTART will
2593 * resume counting from the current hardware count.
2594 */
2595
2596 pmc_save_cpu_binding(&pb);
2597
2598 cpu = PMC_TO_CPU(pm);
2599
2600 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
2601 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2602
2603 if (!pmc_cpu_is_active(cpu))
2604 return ENXIO;
2605
2606 pmc_select_cpu(cpu);
2607
2608 ri = PMC_TO_ROWINDEX(pm);
2609 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2610
2611 critical_enter();
2612 if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
2613 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
2614 critical_exit();
2615
2616 pmc_restore_cpu_binding(&pb);
2617
2618 po = pm->pm_owner;
2619
2620 /* remove this owner from the global list of SS PMC owners */
2621 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2622 po->po_sscount--;
2623 if (po->po_sscount == 0) {
2624 atomic_subtract_rel_int(&pmc_ss_count, 1);
2625 LIST_REMOVE(po, po_ssnext);
2626 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2627 }
2628 }
2629
2630 return (error);
2631}
2632
2633
2634#ifdef DEBUG
2635static const char *pmc_op_to_name[] = {
2636#undef __PMC_OP
2637#define __PMC_OP(N, D) #N ,
2638 __PMC_OPS()
2639 NULL
2640};
2641#endif
2642
2643/*
2644 * The syscall interface
2645 */
2646
2647#define PMC_GET_SX_XLOCK(...) do { \
2648 sx_xlock(&pmc_sx); \
2649 if (pmc_hook == NULL) { \
2650 sx_xunlock(&pmc_sx); \
2651 return __VA_ARGS__; \
2652 } \
2653} while (0)
2654
2655#define PMC_DOWNGRADE_SX() do { \
2656 sx_downgrade(&pmc_sx); \
2657 is_sx_downgraded = 1; \
2658} while (0)
2659
2660static int
2661pmc_syscall_handler(struct thread *td, void *syscall_args)
2662{
2663 int error, is_sx_downgraded, op;
2664 struct pmc_syscall_args *c;
2665 void *arg;
2666
2667 PMC_GET_SX_XLOCK(ENOSYS);
2668
2669 DROP_GIANT();
2670
2671 is_sx_downgraded = 0;
2672
2673 c = (struct pmc_syscall_args *) syscall_args;
2674
2675 op = c->pmop_code;
2676 arg = c->pmop_data;
2677
2678 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2679 pmc_op_to_name[op], arg);
2680
2681 error = 0;
2682 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2683
2684 switch(op)
2685 {
2686
2687
2688 /*
2689 * Configure a log file.
2690 *
2691 * XXX This OP will be reworked.
2692 */
2693
2694 case PMC_OP_CONFIGURELOG:
2695 {
2696 struct proc *p;
2697 struct pmc *pm;
2698 struct pmc_owner *po;
2699 struct pmc_op_configurelog cl;
2700
2701 sx_assert(&pmc_sx, SX_XLOCKED);
2702
2703 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2704 break;
2705
2706 /* mark this process as owning a log file */
2707 p = td->td_proc;
2708 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2709 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2710 error = ENOMEM;
2711 break;
2712 }
2713
2714 /*
2715 * If a valid fd was passed in, try to configure that,
2716 * otherwise if 'fd' was less than zero and there was
2717 * a log file configured, flush its buffers and
2718 * de-configure it.
2719 */
2720 if (cl.pm_logfd >= 0)
2721 error = pmclog_configure_log(md, po, cl.pm_logfd);
2722 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2723 pmclog_process_closelog(po);
2724 error = pmclog_flush(po);
2725 if (error == 0) {
2726 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2727 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
2728 pm->pm_state == PMC_STATE_RUNNING)
2729 pmc_stop(pm);
2730 error = pmclog_deconfigure_log(po);
2731 }
2732 } else
2733 error = EINVAL;
2734
2735 if (error)
2736 break;
2737 }
2738 break;
2739
2740
2741 /*
2742 * Flush a log file.
2743 */
2744
2745 case PMC_OP_FLUSHLOG:
2746 {
2747 struct pmc_owner *po;
2748
2749 sx_assert(&pmc_sx, SX_XLOCKED);
2750
2751 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2752 error = EINVAL;
2753 break;
2754 }
2755
2756 error = pmclog_flush(po);
2757 }
2758 break;
2759
2760 /*
2761 * Retrieve hardware configuration.
2762 */
2763
2764 case PMC_OP_GETCPUINFO: /* CPU information */
2765 {
2766 struct pmc_op_getcpuinfo gci;
2767 struct pmc_classinfo *pci;
2768 struct pmc_classdep *pcd;
2769 int cl;
2770
2771 gci.pm_cputype = md->pmd_cputype;
2772 gci.pm_ncpu = pmc_cpu_max();
2773 gci.pm_npmc = md->pmd_npmc;
2774 gci.pm_nclass = md->pmd_nclass;
2775 pci = gci.pm_classes;
2776 pcd = md->pmd_classdep;
2777 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
2778 pci->pm_caps = pcd->pcd_caps;
2779 pci->pm_class = pcd->pcd_class;
2780 pci->pm_width = pcd->pcd_width;
2781 pci->pm_num = pcd->pcd_num;
2782 }
2783 error = copyout(&gci, arg, sizeof(gci));
2784 }
2785 break;
2786
2787
2788 /*
2789 * Get module statistics
2790 */
2791
2792 case PMC_OP_GETDRIVERSTATS:
2793 {
2794 struct pmc_op_getdriverstats gms;
2795
2796 bcopy(&pmc_stats, &gms, sizeof(gms));
2797 error = copyout(&gms, arg, sizeof(gms));
2798 }
2799 break;
2800
2801
2802 /*
2803 * Retrieve module version number
2804 */
2805
2806 case PMC_OP_GETMODULEVERSION:
2807 {
2808 uint32_t cv, modv;
2809
2810 /* retrieve the client's idea of the ABI version */
2811 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2812 break;
2813 /* don't service clients newer than our driver */
2814 modv = PMC_VERSION;
2815 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2816 error = EPROGMISMATCH;
2817 break;
2818 }
2819 error = copyout(&modv, arg, sizeof(int));
2820 }
2821 break;
2822
2823
2824 /*
2825 * Retrieve the state of all the PMCs on a given
2826 * CPU.
2827 */
2828
2829 case PMC_OP_GETPMCINFO:
2830 {
2831 int ari;
2832 struct pmc *pm;
2833 size_t pmcinfo_size;
2834 uint32_t cpu, n, npmc;
2835 struct pmc_owner *po;
2836 struct pmc_binding pb;
2837 struct pmc_classdep *pcd;
2838 struct pmc_info *p, *pmcinfo;
2839 struct pmc_op_getpmcinfo *gpi;
2840
2841 PMC_DOWNGRADE_SX();
2842
2843 gpi = (struct pmc_op_getpmcinfo *) arg;
2844
2845 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2846 break;
2847
2848 if (cpu >= pmc_cpu_max()) {
2849 error = EINVAL;
2850 break;
2851 }
2852
2853 if (!pmc_cpu_is_active(cpu)) {
2854 error = ENXIO;
2855 break;
2856 }
2857
2858 /* switch to CPU 'cpu' */
2859 pmc_save_cpu_binding(&pb);
2860 pmc_select_cpu(cpu);
2861
2862 npmc = md->pmd_npmc;
2863
2864 pmcinfo_size = npmc * sizeof(struct pmc_info);
2865 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
2866
2867 p = pmcinfo;
2868
2869 for (n = 0; n < md->pmd_npmc; n++, p++) {
2870
2871 pcd = pmc_ri_to_classdep(md, n, &ari);
2872
2873 KASSERT(pcd != NULL,
2874 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
2875
2876 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
2877 break;
2878
2879 if (PMC_ROW_DISP_IS_STANDALONE(n))
2880 p->pm_rowdisp = PMC_DISP_STANDALONE;
2881 else if (PMC_ROW_DISP_IS_THREAD(n))
2882 p->pm_rowdisp = PMC_DISP_THREAD;
2883 else
2884 p->pm_rowdisp = PMC_DISP_FREE;
2885
2886 p->pm_ownerpid = -1;
2887
2888 if (pm == NULL) /* no PMC associated */
2889 continue;
2890
2891 po = pm->pm_owner;
2892
2893 KASSERT(po->po_owner != NULL,
2894 ("[pmc,%d] pmc_owner had a null proc pointer",
2895 __LINE__));
2896
2897 p->pm_ownerpid = po->po_owner->p_pid;
2898 p->pm_mode = PMC_TO_MODE(pm);
2899 p->pm_event = pm->pm_event;
2900 p->pm_flags = pm->pm_flags;
2901
2902 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2903 p->pm_reloadcount =
2904 pm->pm_sc.pm_reloadcount;
2905 }
2906
2907 pmc_restore_cpu_binding(&pb);
2908
2909 /* now copy out the PMC info collected */
2910 if (error == 0)
2911 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2912
2913 free(pmcinfo, M_PMC);
2914 }
2915 break;
2916
2917
2918 /*
2919 * Set the administrative state of a PMC. I.e. whether
2920 * the PMC is to be used or not.
2921 */
2922
2923 case PMC_OP_PMCADMIN:
2924 {
2925 int cpu, ri;
2926 enum pmc_state request;
2927 struct pmc_cpu *pc;
2928 struct pmc_hw *phw;
2929 struct pmc_op_pmcadmin pma;
2930 struct pmc_binding pb;
2931
2932 sx_assert(&pmc_sx, SX_XLOCKED);
2933
2934 KASSERT(td == curthread,
2935 ("[pmc,%d] td != curthread", __LINE__));
2936
2937 error = priv_check(td, PRIV_PMC_MANAGE);
2938 if (error)
2939 break;
2940
2941 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2942 break;
2943
2944 cpu = pma.pm_cpu;
2945
2946 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
2947 error = EINVAL;
2948 break;
2949 }
2950
2951 if (!pmc_cpu_is_active(cpu)) {
2952 error = ENXIO;
2953 break;
2954 }
2955
2956 request = pma.pm_state;
2957
2958 if (request != PMC_STATE_DISABLED &&
2959 request != PMC_STATE_FREE) {
2960 error = EINVAL;
2961 break;
2962 }
2963
2964 ri = pma.pm_pmc; /* pmc id == row index */
2965 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2966 error = EINVAL;
2967 break;
2968 }
2969
2970 /*
2971 * We can't disable a PMC with a row-index allocated
2972 * for process virtual PMCs.
2973 */
2974
2975 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2976 request == PMC_STATE_DISABLED) {
2977 error = EBUSY;
2978 break;
2979 }
2980
2981 /*
2982 * otherwise, this PMC on this CPU is either free or
2983 * in system-wide mode.
2984 */
2985
2986 pmc_save_cpu_binding(&pb);
2987 pmc_select_cpu(cpu);
2988
2989 pc = pmc_pcpu[cpu];
2990 phw = pc->pc_hwpmcs[ri];
2991
2992 /*
2993 * XXX do we need some kind of 'forced' disable?
2994 */
2995
2996 if (phw->phw_pmc == NULL) {
2997 if (request == PMC_STATE_DISABLED &&
2998 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
2999 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3000 PMC_MARK_ROW_STANDALONE(ri);
3001 } else if (request == PMC_STATE_FREE &&
3002 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3003 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
3004 PMC_UNMARK_ROW_STANDALONE(ri);
3005 }
3006 /* other cases are a no-op */
3007 } else
3008 error = EBUSY;
3009
3010 pmc_restore_cpu_binding(&pb);
3011 }
3012 break;
3013
3014
3015 /*
3016 * Allocate a PMC.
3017 */
3018
3019 case PMC_OP_PMCALLOCATE:
3020 {
3021 int adjri, n;
3022 u_int cpu;
3023 uint32_t caps;
3024 struct pmc *pmc;
3025 enum pmc_mode mode;
3026 struct pmc_hw *phw;
3027 struct pmc_binding pb;
3028 struct pmc_classdep *pcd;
3029 struct pmc_op_pmcallocate pa;
3030
3031 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3032 break;
3033
3034 caps = pa.pm_caps;
3035 mode = pa.pm_mode;
3036 cpu = pa.pm_cpu;
3037
3038 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3039 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
3040 (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3041 error = EINVAL;
3042 break;
3043 }
3044
3045 /*
3046 * Virtual PMCs should only ask for a default CPU.
3047 * System mode PMCs need to specify a non-default CPU.
3048 */
3049
3050 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3051 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3052 error = EINVAL;
3053 break;
3054 }
3055
3056 /*
3057 * Check that an inactive CPU is not being asked for.
3058 */
3059
3060 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3061 error = ENXIO;
3062 break;
3063 }
3064
3065 /*
3066 * Refuse an allocation for a system-wide PMC if this
3067 * process has been jailed, or if this process lacks
3068 * super-user credentials and the sysctl tunable
3069 * 'security.bsd.unprivileged_syspmcs' is zero.
3070 */
3071
3072 if (PMC_IS_SYSTEM_MODE(mode)) {
3073 if (jailed(curthread->td_ucred)) {
3074 error = EPERM;
3075 break;
3076 }
3077 if (!pmc_unprivileged_syspmcs) {
3078 error = priv_check(curthread,
3079 PRIV_PMC_SYSTEM);
3080 if (error)
3081 break;
3082 }
3083 }
3084
3085 if (error)
3086 break;
3087
3088 /*
3089 * Look for valid values for 'pm_flags'
3090 */
3091
3092 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3093 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
3094 error = EINVAL;
3095 break;
3096 }
3097
3098 /* process logging options are not allowed for system PMCs */
3099 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3100 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3101 error = EINVAL;
3102 break;
3103 }
3104
3105 /*
3106 * All sampling mode PMCs need to be able to interrupt the
3107 * CPU.
3108 */
3109 if (PMC_IS_SAMPLING_MODE(mode))
3110 caps |= PMC_CAP_INTERRUPT;
3111
3112 /* A valid class specifier should have been passed in. */
3113 for (n = 0; n < md->pmd_nclass; n++)
3114 if (md->pmd_classdep[n].pcd_class == pa.pm_class)
3115 break;
3116 if (n == md->pmd_nclass) {
3117 error = EINVAL;
3118 break;
3119 }
3120
3121 /* The requested PMC capabilities should be feasible. */
3122 if ((md->pmd_classdep[n].pcd_caps & caps) != caps) {
3123 error = EOPNOTSUPP;
3124 break;
3125 }
3126
3127 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3128 pa.pm_ev, caps, mode, cpu);
3129
3130 pmc = pmc_allocate_pmc_descriptor();
3131 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3132 PMC_ID_INVALID);
3133 pmc->pm_event = pa.pm_ev;
3134 pmc->pm_state = PMC_STATE_FREE;
3135 pmc->pm_caps = caps;
3136 pmc->pm_flags = pa.pm_flags;
3137
3138 /* switch thread to CPU 'cpu' */
3139 pmc_save_cpu_binding(&pb);
3140
3141#define PMC_IS_SHAREABLE_PMC(cpu, n) \
3142 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3143 PMC_PHW_FLAG_IS_SHAREABLE)
3144#define PMC_IS_UNALLOCATED(cpu, n) \
3145 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3146
3147 if (PMC_IS_SYSTEM_MODE(mode)) {
3148 pmc_select_cpu(cpu);
3149 for (n = 0; n < (int) md->pmd_npmc; n++) {
3150 pcd = pmc_ri_to_classdep(md, n, &adjri);
3151 if (pmc_can_allocate_row(n, mode) == 0 &&
3152 pmc_can_allocate_rowindex(
3153 curthread->td_proc, n, cpu) == 0 &&
3154 (PMC_IS_UNALLOCATED(cpu, n) ||
3155 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3156 pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3157 &pa) == 0)
3158 break;
3159 }
3160 } else {
3161 /* Process virtual mode */
3162 for (n = 0; n < (int) md->pmd_npmc; n++) {
3163 pcd = pmc_ri_to_classdep(md, n, &adjri);
3164 if (pmc_can_allocate_row(n, mode) == 0 &&
3165 pmc_can_allocate_rowindex(
3166 curthread->td_proc, n,
3167 PMC_CPU_ANY) == 0 &&
3168 pcd->pcd_allocate_pmc(curthread->td_oncpu,
3169 adjri, pmc, &pa) == 0)
3170 break;
3171 }
3172 }
3173
3174#undef PMC_IS_UNALLOCATED
3175#undef PMC_IS_SHAREABLE_PMC
3176
3177 pmc_restore_cpu_binding(&pb);
3178
3179 if (n == (int) md->pmd_npmc) {
3180 pmc_destroy_pmc_descriptor(pmc);
3181 free(pmc, M_PMC);
3182 pmc = NULL;
3183 error = EINVAL;
3184 break;
3185 }
3186
3187 /* Fill in the correct value in the ID field */
3188 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3189
3190 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3191 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3192
3193 /* Process mode PMCs with logging enabled need log files */
3194 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3195 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3196
3197 /* All system mode sampling PMCs require a log file */
3198 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3199 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3200
3201 /*
3202 * Configure global pmc's immediately
3203 */
3204
3205 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3206
3207 pmc_save_cpu_binding(&pb);
3208 pmc_select_cpu(cpu);
3209
3210 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3211 pcd = pmc_ri_to_classdep(md, n, &adjri);
3212
3213 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3214 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
3215 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
3216 pmc_destroy_pmc_descriptor(pmc);
3217 free(pmc, M_PMC);
3218 pmc = NULL;
3219 pmc_restore_cpu_binding(&pb);
3220 error = EPERM;
3221 break;
3222 }
3223
3224 pmc_restore_cpu_binding(&pb);
3225 }
3226
3227 pmc->pm_state = PMC_STATE_ALLOCATED;
3228
3229 /*
3230 * mark row disposition
3231 */
3232
3233 if (PMC_IS_SYSTEM_MODE(mode))
3234 PMC_MARK_ROW_STANDALONE(n);
3235 else
3236 PMC_MARK_ROW_THREAD(n);
3237
3238 /*
3239 * Register this PMC with the current thread as its owner.
3240 */
3241
3242 if ((error =
3243 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3244 pmc_release_pmc_descriptor(pmc);
3245 free(pmc, M_PMC);
3246 pmc = NULL;
3247 break;
3248 }
3249
3250 /*
3251 * Return the allocated index.
3252 */
3253
3254 pa.pm_pmcid = pmc->pm_id;
3255
3256 error = copyout(&pa, arg, sizeof(pa));
3257 }
3258 break;
3259
3260
3261 /*
3262 * Attach a PMC to a process.
3263 */
3264
3265 case PMC_OP_PMCATTACH:
3266 {
3267 struct pmc *pm;
3268 struct proc *p;
3269 struct pmc_op_pmcattach a;
3270
3271 sx_assert(&pmc_sx, SX_XLOCKED);
3272
3273 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3274 break;
3275
3276 if (a.pm_pid < 0) {
3277 error = EINVAL;
3278 break;
3279 } else if (a.pm_pid == 0)
3280 a.pm_pid = td->td_proc->p_pid;
3281
3282 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3283 break;
3284
3285 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3286 error = EINVAL;
3287 break;
3288 }
3289
3290 /* PMCs may be (re)attached only when allocated or stopped */
3291 if (pm->pm_state == PMC_STATE_RUNNING) {
3292 error = EBUSY;
3293 break;
3294 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3295 pm->pm_state != PMC_STATE_STOPPED) {
3296 error = EINVAL;
3297 break;
3298 }
3299
3300 /* lookup pid */
3301 if ((p = pfind(a.pm_pid)) == NULL) {
3302 error = ESRCH;
3303 break;
3304 }
3305
3306 /*
3307 * Ignore processes that are working on exiting.
3308 */
3309 if (p->p_flag & P_WEXIT) {
3310 error = ESRCH;
3311 PROC_UNLOCK(p); /* pfind() returns a locked process */
3312 break;
3313 }
3314
3315 /*
3316 * we are allowed to attach a PMC to a process if
3317 * we can debug it.
3318 */
3319 error = p_candebug(curthread, p);
3320
3321 PROC_UNLOCK(p);
3322
3323 if (error == 0)
3324 error = pmc_attach_process(p, pm);
3325 }
3326 break;
3327
3328
3329 /*
3330 * Detach an attached PMC from a process.
3331 */
3332
3333 case PMC_OP_PMCDETACH:
3334 {
3335 struct pmc *pm;
3336 struct proc *p;
3337 struct pmc_op_pmcattach a;
3338
3339 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3340 break;
3341
3342 if (a.pm_pid < 0) {
3343 error = EINVAL;
3344 break;
3345 } else if (a.pm_pid == 0)
3346 a.pm_pid = td->td_proc->p_pid;
3347
3348 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3349 break;
3350
3351 if ((p = pfind(a.pm_pid)) == NULL) {
3352 error = ESRCH;
3353 break;
3354 }
3355
3356 /*
3357 * Treat processes that are in the process of exiting
3358 * as if they were not present.
3359 */
3360
3361 if (p->p_flag & P_WEXIT)
3362 error = ESRCH;
3363
3364 PROC_UNLOCK(p); /* pfind() returns a locked process */
3365
3366 if (error == 0)
3367 error = pmc_detach_process(p, pm);
3368 }
3369 break;
3370
3371
3372 /*
3373 * Retrieve the MSR number associated with the counter
3374 * 'pmc_id'. This allows processes to directly use RDPMC
3375 * instructions to read their PMCs, without the overhead of a
3376 * system call.
3377 */
3378
3379 case PMC_OP_PMCGETMSR:
3380 {
3381 int adjri, ri;
3382 struct pmc *pm;
3383 struct pmc_target *pt;
3384 struct pmc_op_getmsr gm;
3385 struct pmc_classdep *pcd;
3386
3387 PMC_DOWNGRADE_SX();
3388
3389 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3390 break;
3391
3392 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3393 break;
3394
3395 /*
3396 * The allocated PMC has to be a process virtual PMC,
3397 * i.e., of type MODE_T[CS]. Global PMCs can only be
3398 * read using the PMCREAD operation since they may be
3399 * allocated on a different CPU than the one we could
3400 * be running on at the time of the RDPMC instruction.
3401 *
3402 * The GETMSR operation is not allowed for PMCs that
3403 * are inherited across processes.
3404 */
3405
3406 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3407 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3408 error = EINVAL;
3409 break;
3410 }
3411
3412 /*
3413 * It only makes sense to use a RDPMC (or its
3414 * equivalent instruction on non-x86 architectures) on
3415 * a process that has allocated and attached a PMC to
3416 * itself. Conversely the PMC is only allowed to have
3417 * one process attached to it -- its owner.
3418 */
3419
3420 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3421 LIST_NEXT(pt, pt_next) != NULL ||
3422 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3423 error = EINVAL;
3424 break;
3425 }
3426
3427 ri = PMC_TO_ROWINDEX(pm);
3428 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3429
3430 /* PMC class has no 'GETMSR' support */
3431 if (pcd->pcd_get_msr == NULL) {
3432 error = ENOSYS;
3433 break;
3434 }
3435
3436 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
3437 break;
3438
3439 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3440 break;
3441
3442 /*
3443 * Mark our process as using MSRs. Update machine
3444 * state using a forced context switch.
3445 */
3446
3447 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3448 pmc_force_context_switch();
3449
3450 }
3451 break;
3452
3453 /*
3454 * Release an allocated PMC
3455 */
3456
3457 case PMC_OP_PMCRELEASE:
3458 {
3459 pmc_id_t pmcid;
3460 struct pmc *pm;
3461 struct pmc_owner *po;
3462 struct pmc_op_simple sp;
3463
3464 /*
3465 * Find PMC pointer for the named PMC.
3466 *
3467 * Use pmc_release_pmc_descriptor() to switch off the
3468 * PMC, remove all its target threads, and remove the
3469 * PMC from its owner's list.
3470 *
3471 * Remove the owner record if this is the last PMC
3472 * owned.
3473 *
3474 * Free up space.
3475 */
3476
3477 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3478 break;
3479
3480 pmcid = sp.pm_pmcid;
3481
3482 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3483 break;
3484
3485 po = pm->pm_owner;
3486 pmc_release_pmc_descriptor(pm);
3487 pmc_maybe_remove_owner(po);
3488
3489 free(pm, M_PMC);
3490 }
3491 break;
3492
3493
3494 /*
3495 * Read and/or write a PMC.
3496 */
3497
3498 case PMC_OP_PMCRW:
3499 {
3500 int adjri;
3501 struct pmc *pm;
3502 uint32_t cpu, ri;
3503 pmc_value_t oldvalue;
3504 struct pmc_binding pb;
3505 struct pmc_op_pmcrw prw;
3506 struct pmc_classdep *pcd;
3507 struct pmc_op_pmcrw *pprw;
3508
3509 PMC_DOWNGRADE_SX();
3510
3511 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3512 break;
3513
3514 ri = 0;
3515 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3516 prw.pm_flags);
3517
3518 /* must have at least one flag set */
3519 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3520 error = EINVAL;
3521 break;
3522 }
3523
3524 /* locate pmc descriptor */
3525 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3526 break;
3527
3528 /* Can't read a PMC that hasn't been started. */
3529 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3530 pm->pm_state != PMC_STATE_STOPPED &&
3531 pm->pm_state != PMC_STATE_RUNNING) {
3532 error = EINVAL;
3533 break;
3534 }
3535
3536 /* writing a new value is allowed only for 'STOPPED' pmcs */
3537 if (pm->pm_state == PMC_STATE_RUNNING &&
3538 (prw.pm_flags & PMC_F_NEWVALUE)) {
3539 error = EBUSY;
3540 break;
3541 }
3542
3543 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3544
3545 /*
3546 * If this PMC is attached to its owner (i.e.,
3547 * the process requesting this operation) and
3548 * is running, then attempt to get an
3549 * upto-date reading from hardware for a READ.
3550 * Writes are only allowed when the PMC is
3551 * stopped, so only update the saved value
3552 * field.
3553 *
3554 * If the PMC is not running, or is not
3555 * attached to its owner, read/write to the
3556 * savedvalue field.
3557 */
3558
3559 ri = PMC_TO_ROWINDEX(pm);
3560 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3561
3562 mtx_pool_lock_spin(pmc_mtxpool, pm);
3563 cpu = curthread->td_oncpu;
3564
3565 if (prw.pm_flags & PMC_F_OLDVALUE) {
3566 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3567 (pm->pm_state == PMC_STATE_RUNNING))
3568 error = (*pcd->pcd_read_pmc)(cpu, adjri,
3569 &oldvalue);
3570 else
3571 oldvalue = pm->pm_gv.pm_savedvalue;
3572 }
3573 if (prw.pm_flags & PMC_F_NEWVALUE)
3574 pm->pm_gv.pm_savedvalue = prw.pm_value;
3575
3576 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3577
3578 } else { /* System mode PMCs */
3579 cpu = PMC_TO_CPU(pm);
3580 ri = PMC_TO_ROWINDEX(pm);
3581 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3582
3583 if (!pmc_cpu_is_active(cpu)) {
3584 error = ENXIO;
3585 break;
3586 }
3587
3588 /* move this thread to CPU 'cpu' */
3589 pmc_save_cpu_binding(&pb);
3590 pmc_select_cpu(cpu);
3591
3592 critical_enter();
3593 /* save old value */
3594 if (prw.pm_flags & PMC_F_OLDVALUE)
3595 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
3596 &oldvalue)))
3597 goto error;
3598 /* write out new value */
3599 if (prw.pm_flags & PMC_F_NEWVALUE)
3600 error = (*pcd->pcd_write_pmc)(cpu, adjri,
3601 prw.pm_value);
3602 error:
3603 critical_exit();
3604 pmc_restore_cpu_binding(&pb);
3605 if (error)
3606 break;
3607 }
3608
3609 pprw = (struct pmc_op_pmcrw *) arg;
3610
3611#ifdef DEBUG
3612 if (prw.pm_flags & PMC_F_NEWVALUE)
3613 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3614 ri, prw.pm_value, oldvalue);
3615 else if (prw.pm_flags & PMC_F_OLDVALUE)
3616 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3617#endif
3618
3619 /* return old value if requested */
3620 if (prw.pm_flags & PMC_F_OLDVALUE)
3621 if ((error = copyout(&oldvalue, &pprw->pm_value,
3622 sizeof(prw.pm_value))))
3623 break;
3624
3625 }
3626 break;
3627
3628
3629 /*
3630 * Set the sampling rate for a sampling mode PMC and the
3631 * initial count for a counting mode PMC.
3632 */
3633
3634 case PMC_OP_PMCSETCOUNT:
3635 {
3636 struct pmc *pm;
3637 struct pmc_op_pmcsetcount sc;
3638
3639 PMC_DOWNGRADE_SX();
3640
3641 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3642 break;
3643
3644 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3645 break;
3646
3647 if (pm->pm_state == PMC_STATE_RUNNING) {
3648 error = EBUSY;
3649 break;
3650 }
3651
3652 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3653 pm->pm_sc.pm_reloadcount = sc.pm_count;
3654 else
3655 pm->pm_sc.pm_initial = sc.pm_count;
3656 }
3657 break;
3658
3659
3660 /*
3661 * Start a PMC.
3662 */
3663
3664 case PMC_OP_PMCSTART:
3665 {
3666 pmc_id_t pmcid;
3667 struct pmc *pm;
3668 struct pmc_op_simple sp;
3669
3670 sx_assert(&pmc_sx, SX_XLOCKED);
3671
3672 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3673 break;
3674
3675 pmcid = sp.pm_pmcid;
3676
3677 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3678 break;
3679
3680 KASSERT(pmcid == pm->pm_id,
3681 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3682 pm->pm_id, pmcid));
3683
3684 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3685 break;
3686 else if (pm->pm_state != PMC_STATE_STOPPED &&
3687 pm->pm_state != PMC_STATE_ALLOCATED) {
3688 error = EINVAL;
3689 break;
3690 }
3691
3692 error = pmc_start(pm);
3693 }
3694 break;
3695
3696
3697 /*
3698 * Stop a PMC.
3699 */
3700
3701 case PMC_OP_PMCSTOP:
3702 {
3703 pmc_id_t pmcid;
3704 struct pmc *pm;
3705 struct pmc_op_simple sp;
3706
3707 PMC_DOWNGRADE_SX();
3708
3709 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3710 break;
3711
3712 pmcid = sp.pm_pmcid;
3713
3714 /*
3715 * Mark the PMC as inactive and invoke the MD stop
3716 * routines if needed.
3717 */
3718
3719 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3720 break;
3721
3722 KASSERT(pmcid == pm->pm_id,
3723 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3724 pm->pm_id, pmcid));
3725
3726 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3727 break;
3728 else if (pm->pm_state != PMC_STATE_RUNNING) {
3729 error = EINVAL;
3730 break;
3731 }
3732
3733 error = pmc_stop(pm);
3734 }
3735 break;
3736
3737
3738 /*
3739 * Write a user supplied value to the log file.
3740 */
3741
3742 case PMC_OP_WRITELOG:
3743 {
3744 struct pmc_op_writelog wl;
3745 struct pmc_owner *po;
3746
3747 PMC_DOWNGRADE_SX();
3748
3749 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3750 break;
3751
3752 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3753 error = EINVAL;
3754 break;
3755 }
3756
3757 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3758 error = EINVAL;
3759 break;
3760 }
3761
3762 error = pmclog_process_userlog(po, &wl);
3763 }
3764 break;
3765
3766
3767 default:
3768 error = EINVAL;
3769 break;
3770 }
3771
3772 if (is_sx_downgraded)
3773 sx_sunlock(&pmc_sx);
3774 else
3775 sx_xunlock(&pmc_sx);
3776
3777 if (error)
3778 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3779
3780 PICKUP_GIANT();
3781
3782 return error;
3783}
3784
3785/*
3786 * Helper functions
3787 */
3788
3789
3790/*
3791 * Mark the thread as needing callchain capture and post an AST. The
3792 * actual callchain capture will be done in a context where it is safe
3793 * to take page faults.
3794 */
3795
3796static void
1871 break;
1872
1873 default:
1874#ifdef DEBUG
1875 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1876#endif
1877 break;
1878
1879 }
1880
1881 return 0;
1882}
1883
1884/*
1885 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1886 */
1887
1888static struct pmc_owner *
1889pmc_allocate_owner_descriptor(struct proc *p)
1890{
1891 uint32_t hindex;
1892 struct pmc_owner *po;
1893 struct pmc_ownerhash *poh;
1894
1895 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1896 poh = &pmc_ownerhash[hindex];
1897
1898 /* allocate space for N pointers and one descriptor struct */
1899 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
1900 po->po_sscount = po->po_error = po->po_flags = 0;
1901 po->po_file = NULL;
1902 po->po_owner = p;
1903 po->po_kthread = NULL;
1904 LIST_INIT(&po->po_pmcs);
1905 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1906
1907 TAILQ_INIT(&po->po_logbuffers);
1908 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
1909
1910 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1911 p, p->p_pid, p->p_comm, po);
1912
1913 return po;
1914}
1915
1916static void
1917pmc_destroy_owner_descriptor(struct pmc_owner *po)
1918{
1919
1920 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1921 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1922
1923 mtx_destroy(&po->po_mtx);
1924 free(po, M_PMC);
1925}
1926
1927/*
1928 * find the descriptor corresponding to process 'p', adding or removing it
1929 * as specified by 'mode'.
1930 */
1931
1932static struct pmc_process *
1933pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1934{
1935 uint32_t hindex;
1936 struct pmc_process *pp, *ppnew;
1937 struct pmc_processhash *pph;
1938
1939 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1940 pph = &pmc_processhash[hindex];
1941
1942 ppnew = NULL;
1943
1944 /*
1945 * Pre-allocate memory in the FIND_ALLOCATE case since we
1946 * cannot call malloc(9) once we hold a spin lock.
1947 */
1948 if (mode & PMC_FLAG_ALLOCATE)
1949 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
1950 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
1951
1952 mtx_lock_spin(&pmc_processhash_mtx);
1953 LIST_FOREACH(pp, pph, pp_next)
1954 if (pp->pp_proc == p)
1955 break;
1956
1957 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1958 LIST_REMOVE(pp, pp_next);
1959
1960 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1961 ppnew != NULL) {
1962 ppnew->pp_proc = p;
1963 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1964 pp = ppnew;
1965 ppnew = NULL;
1966 }
1967 mtx_unlock_spin(&pmc_processhash_mtx);
1968
1969 if (pp != NULL && ppnew != NULL)
1970 free(ppnew, M_PMC);
1971
1972 return pp;
1973}
1974
1975/*
1976 * remove a process descriptor from the process hash table.
1977 */
1978
1979static void
1980pmc_remove_process_descriptor(struct pmc_process *pp)
1981{
1982 KASSERT(pp->pp_refcnt == 0,
1983 ("[pmc,%d] Removing process descriptor %p with count %d",
1984 __LINE__, pp, pp->pp_refcnt));
1985
1986 mtx_lock_spin(&pmc_processhash_mtx);
1987 LIST_REMOVE(pp, pp_next);
1988 mtx_unlock_spin(&pmc_processhash_mtx);
1989}
1990
1991
1992/*
1993 * find an owner descriptor corresponding to proc 'p'
1994 */
1995
1996static struct pmc_owner *
1997pmc_find_owner_descriptor(struct proc *p)
1998{
1999 uint32_t hindex;
2000 struct pmc_owner *po;
2001 struct pmc_ownerhash *poh;
2002
2003 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2004 poh = &pmc_ownerhash[hindex];
2005
2006 po = NULL;
2007 LIST_FOREACH(po, poh, po_next)
2008 if (po->po_owner == p)
2009 break;
2010
2011 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2012 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2013
2014 return po;
2015}
2016
2017/*
2018 * pmc_allocate_pmc_descriptor
2019 *
2020 * Allocate a pmc descriptor and initialize its
2021 * fields.
2022 */
2023
2024static struct pmc *
2025pmc_allocate_pmc_descriptor(void)
2026{
2027 struct pmc *pmc;
2028
2029 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
2030
2031 if (pmc != NULL) {
2032 pmc->pm_owner = NULL;
2033 LIST_INIT(&pmc->pm_targets);
2034 }
2035
2036 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2037
2038 return pmc;
2039}
2040
2041/*
2042 * Destroy a pmc descriptor.
2043 */
2044
2045static void
2046pmc_destroy_pmc_descriptor(struct pmc *pm)
2047{
2048 (void) pm;
2049
2050#ifdef DEBUG
2051 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2052 pm->pm_state == PMC_STATE_FREE,
2053 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2054 KASSERT(LIST_EMPTY(&pm->pm_targets),
2055 ("[pmc,%d] destroying pmc with targets", __LINE__));
2056 KASSERT(pm->pm_owner == NULL,
2057 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2058 KASSERT(pm->pm_runcount == 0,
2059 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
2060 pm->pm_runcount));
2061#endif
2062}
2063
2064static void
2065pmc_wait_for_pmc_idle(struct pmc *pm)
2066{
2067#ifdef DEBUG
2068 volatile int maxloop;
2069
2070 maxloop = 100 * pmc_cpu_max();
2071#endif
2072
2073 /*
2074 * Loop (with a forced context switch) till the PMC's runcount
2075 * comes down to zero.
2076 */
2077 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
2078#ifdef DEBUG
2079 maxloop--;
2080 KASSERT(maxloop > 0,
2081 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
2082 "pmc to be free", __LINE__,
2083 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
2084#endif
2085 pmc_force_context_switch();
2086 }
2087}
2088
2089/*
2090 * This function does the following things:
2091 *
2092 * - detaches the PMC from hardware
2093 * - unlinks all target threads that were attached to it
2094 * - removes the PMC from its owner's list
2095 * - destroy's the PMC private mutex
2096 *
2097 * Once this function completes, the given pmc pointer can be safely
2098 * FREE'd by the caller.
2099 */
2100
2101static void
2102pmc_release_pmc_descriptor(struct pmc *pm)
2103{
2104 enum pmc_mode mode;
2105 struct pmc_hw *phw;
2106 u_int adjri, ri, cpu;
2107 struct pmc_owner *po;
2108 struct pmc_binding pb;
2109 struct pmc_process *pp;
2110 struct pmc_classdep *pcd;
2111 struct pmc_target *ptgt, *tmp;
2112
2113 sx_assert(&pmc_sx, SX_XLOCKED);
2114
2115 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2116
2117 ri = PMC_TO_ROWINDEX(pm);
2118 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2119 mode = PMC_TO_MODE(pm);
2120
2121 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2122 mode);
2123
2124 /*
2125 * First, we take the PMC off hardware.
2126 */
2127 cpu = 0;
2128 if (PMC_IS_SYSTEM_MODE(mode)) {
2129
2130 /*
2131 * A system mode PMC runs on a specific CPU. Switch
2132 * to this CPU and turn hardware off.
2133 */
2134 pmc_save_cpu_binding(&pb);
2135
2136 cpu = PMC_TO_CPU(pm);
2137
2138 pmc_select_cpu(cpu);
2139
2140 /* switch off non-stalled CPUs */
2141 if (pm->pm_state == PMC_STATE_RUNNING &&
2142 pm->pm_stalled == 0) {
2143
2144 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2145
2146 KASSERT(phw->phw_pmc == pm,
2147 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2148 __LINE__, ri, phw->phw_pmc, pm));
2149 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2150
2151 critical_enter();
2152 pcd->pcd_stop_pmc(cpu, adjri);
2153 critical_exit();
2154 }
2155
2156 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2157
2158 critical_enter();
2159 pcd->pcd_config_pmc(cpu, adjri, NULL);
2160 critical_exit();
2161
2162 /* adjust the global and process count of SS mode PMCs */
2163 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2164 po = pm->pm_owner;
2165 po->po_sscount--;
2166 if (po->po_sscount == 0) {
2167 atomic_subtract_rel_int(&pmc_ss_count, 1);
2168 LIST_REMOVE(po, po_ssnext);
2169 }
2170 }
2171
2172 pm->pm_state = PMC_STATE_DELETED;
2173
2174 pmc_restore_cpu_binding(&pb);
2175
2176 /*
2177 * We could have references to this PMC structure in
2178 * the per-cpu sample queues. Wait for the queue to
2179 * drain.
2180 */
2181 pmc_wait_for_pmc_idle(pm);
2182
2183 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
2184
2185 /*
2186 * A virtual PMC could be running on multiple CPUs at
2187 * a given instant.
2188 *
2189 * By marking its state as DELETED, we ensure that
2190 * this PMC is never further scheduled on hardware.
2191 *
2192 * Then we wait till all CPUs are done with this PMC.
2193 */
2194 pm->pm_state = PMC_STATE_DELETED;
2195
2196
2197 /* Wait for the PMCs runcount to come to zero. */
2198 pmc_wait_for_pmc_idle(pm);
2199
2200 /*
2201 * At this point the PMC is off all CPUs and cannot be
2202 * freshly scheduled onto a CPU. It is now safe to
2203 * unlink all targets from this PMC. If a
2204 * process-record's refcount falls to zero, we remove
2205 * it from the hash table. The module-wide SX lock
2206 * protects us from races.
2207 */
2208 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2209 pp = ptgt->pt_process;
2210 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2211
2212 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2213
2214 /*
2215 * If the target process record shows that no
2216 * PMCs are attached to it, reclaim its space.
2217 */
2218
2219 if (pp->pp_refcnt == 0) {
2220 pmc_remove_process_descriptor(pp);
2221 free(pp, M_PMC);
2222 }
2223 }
2224
2225 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2226
2227 }
2228
2229 /*
2230 * Release any MD resources
2231 */
2232 (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2233
2234 /*
2235 * Update row disposition
2236 */
2237
2238 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2239 PMC_UNMARK_ROW_STANDALONE(ri);
2240 else
2241 PMC_UNMARK_ROW_THREAD(ri);
2242
2243 /* unlink from the owner's list */
2244 if (pm->pm_owner) {
2245 LIST_REMOVE(pm, pm_next);
2246 pm->pm_owner = NULL;
2247 }
2248
2249 pmc_destroy_pmc_descriptor(pm);
2250}
2251
2252/*
2253 * Register an owner and a pmc.
2254 */
2255
2256static int
2257pmc_register_owner(struct proc *p, struct pmc *pmc)
2258{
2259 struct pmc_owner *po;
2260
2261 sx_assert(&pmc_sx, SX_XLOCKED);
2262
2263 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2264 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2265 return ENOMEM;
2266
2267 KASSERT(pmc->pm_owner == NULL,
2268 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2269 pmc->pm_owner = po;
2270
2271 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2272
2273 PROC_LOCK(p);
2274 p->p_flag |= P_HWPMC;
2275 PROC_UNLOCK(p);
2276
2277 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
2278 pmclog_process_pmcallocate(pmc);
2279
2280 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2281 po, pmc);
2282
2283 return 0;
2284}
2285
2286/*
2287 * Return the current row disposition:
2288 * == 0 => FREE
2289 * > 0 => PROCESS MODE
2290 * < 0 => SYSTEM MODE
2291 */
2292
2293int
2294pmc_getrowdisp(int ri)
2295{
2296 return pmc_pmcdisp[ri];
2297}
2298
2299/*
2300 * Check if a PMC at row index 'ri' can be allocated to the current
2301 * process.
2302 *
2303 * Allocation can fail if:
2304 * - the current process is already being profiled by a PMC at index 'ri',
2305 * attached to it via OP_PMCATTACH.
2306 * - the current process has already allocated a PMC at index 'ri'
2307 * via OP_ALLOCATE.
2308 */
2309
2310static int
2311pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2312{
2313 enum pmc_mode mode;
2314 struct pmc *pm;
2315 struct pmc_owner *po;
2316 struct pmc_process *pp;
2317
2318 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2319 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2320
2321 /*
2322 * We shouldn't have already allocated a process-mode PMC at
2323 * row index 'ri'.
2324 *
2325 * We shouldn't have allocated a system-wide PMC on the same
2326 * CPU and same RI.
2327 */
2328 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2329 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2330 if (PMC_TO_ROWINDEX(pm) == ri) {
2331 mode = PMC_TO_MODE(pm);
2332 if (PMC_IS_VIRTUAL_MODE(mode))
2333 return EEXIST;
2334 if (PMC_IS_SYSTEM_MODE(mode) &&
2335 (int) PMC_TO_CPU(pm) == cpu)
2336 return EEXIST;
2337 }
2338 }
2339
2340 /*
2341 * We also shouldn't be the target of any PMC at this index
2342 * since otherwise a PMC_ATTACH to ourselves will fail.
2343 */
2344 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2345 if (pp->pp_pmcs[ri].pp_pmc)
2346 return EEXIST;
2347
2348 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2349 p, p->p_pid, p->p_comm, ri);
2350
2351 return 0;
2352}
2353
2354/*
2355 * Check if a given PMC at row index 'ri' can be currently used in
2356 * mode 'mode'.
2357 */
2358
2359static int
2360pmc_can_allocate_row(int ri, enum pmc_mode mode)
2361{
2362 enum pmc_disp disp;
2363
2364 sx_assert(&pmc_sx, SX_XLOCKED);
2365
2366 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2367
2368 if (PMC_IS_SYSTEM_MODE(mode))
2369 disp = PMC_DISP_STANDALONE;
2370 else
2371 disp = PMC_DISP_THREAD;
2372
2373 /*
2374 * check disposition for PMC row 'ri':
2375 *
2376 * Expected disposition Row-disposition Result
2377 *
2378 * STANDALONE STANDALONE or FREE proceed
2379 * STANDALONE THREAD fail
2380 * THREAD THREAD or FREE proceed
2381 * THREAD STANDALONE fail
2382 */
2383
2384 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2385 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2386 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2387 return EBUSY;
2388
2389 /*
2390 * All OK
2391 */
2392
2393 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2394
2395 return 0;
2396
2397}
2398
2399/*
2400 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2401 */
2402
2403static struct pmc *
2404pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2405{
2406 struct pmc *pm;
2407
2408 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2409 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2410 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2411
2412 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2413 if (pm->pm_id == pmcid)
2414 return pm;
2415
2416 return NULL;
2417}
2418
2419static int
2420pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2421{
2422
2423 struct pmc *pm;
2424 struct pmc_owner *po;
2425
2426 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2427
2428 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2429 return ESRCH;
2430
2431 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2432 return EINVAL;
2433
2434 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2435
2436 *pmc = pm;
2437 return 0;
2438}
2439
2440/*
2441 * Start a PMC.
2442 */
2443
2444static int
2445pmc_start(struct pmc *pm)
2446{
2447 enum pmc_mode mode;
2448 struct pmc_owner *po;
2449 struct pmc_binding pb;
2450 struct pmc_classdep *pcd;
2451 int adjri, error, cpu, ri;
2452
2453 KASSERT(pm != NULL,
2454 ("[pmc,%d] null pm", __LINE__));
2455
2456 mode = PMC_TO_MODE(pm);
2457 ri = PMC_TO_ROWINDEX(pm);
2458 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2459
2460 error = 0;
2461
2462 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2463
2464 po = pm->pm_owner;
2465
2466 /*
2467 * Disallow PMCSTART if a logfile is required but has not been
2468 * configured yet.
2469 */
2470 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2471 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2472 return (EDOOFUS); /* programming error */
2473
2474 /*
2475 * If this is a sampling mode PMC, log mapping information for
2476 * the kernel modules that are currently loaded.
2477 */
2478 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2479 pmc_log_kernel_mappings(pm);
2480
2481 if (PMC_IS_VIRTUAL_MODE(mode)) {
2482
2483 /*
2484 * If a PMCATTACH has never been done on this PMC,
2485 * attach it to its owner process.
2486 */
2487
2488 if (LIST_EMPTY(&pm->pm_targets))
2489 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2490 pmc_attach_process(po->po_owner, pm);
2491
2492 /*
2493 * If the PMC is attached to its owner, then force a context
2494 * switch to ensure that the MD state gets set correctly.
2495 */
2496
2497 if (error == 0) {
2498 pm->pm_state = PMC_STATE_RUNNING;
2499 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2500 pmc_force_context_switch();
2501 }
2502
2503 return (error);
2504 }
2505
2506
2507 /*
2508 * A system-wide PMC.
2509 *
2510 * Add the owner to the global list if this is a system-wide
2511 * sampling PMC.
2512 */
2513
2514 if (mode == PMC_MODE_SS) {
2515 if (po->po_sscount == 0) {
2516 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2517 atomic_add_rel_int(&pmc_ss_count, 1);
2518 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2519 }
2520 po->po_sscount++;
2521 }
2522
2523 /* Log mapping information for all processes in the system. */
2524 pmc_log_all_process_mappings(po);
2525
2526 /*
2527 * Move to the CPU associated with this
2528 * PMC, and start the hardware.
2529 */
2530
2531 pmc_save_cpu_binding(&pb);
2532
2533 cpu = PMC_TO_CPU(pm);
2534
2535 if (!pmc_cpu_is_active(cpu))
2536 return (ENXIO);
2537
2538 pmc_select_cpu(cpu);
2539
2540 /*
2541 * global PMCs are configured at allocation time
2542 * so write out the initial value and start the PMC.
2543 */
2544
2545 pm->pm_state = PMC_STATE_RUNNING;
2546
2547 critical_enter();
2548 if ((error = pcd->pcd_write_pmc(cpu, adjri,
2549 PMC_IS_SAMPLING_MODE(mode) ?
2550 pm->pm_sc.pm_reloadcount :
2551 pm->pm_sc.pm_initial)) == 0)
2552 error = pcd->pcd_start_pmc(cpu, adjri);
2553 critical_exit();
2554
2555 pmc_restore_cpu_binding(&pb);
2556
2557 return (error);
2558}
2559
2560/*
2561 * Stop a PMC.
2562 */
2563
2564static int
2565pmc_stop(struct pmc *pm)
2566{
2567 struct pmc_owner *po;
2568 struct pmc_binding pb;
2569 struct pmc_classdep *pcd;
2570 int adjri, cpu, error, ri;
2571
2572 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2573
2574 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2575 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2576
2577 pm->pm_state = PMC_STATE_STOPPED;
2578
2579 /*
2580 * If the PMC is a virtual mode one, changing the state to
2581 * non-RUNNING is enough to ensure that the PMC never gets
2582 * scheduled.
2583 *
2584 * If this PMC is current running on a CPU, then it will
2585 * handled correctly at the time its target process is context
2586 * switched out.
2587 */
2588
2589 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2590 return 0;
2591
2592 /*
2593 * A system-mode PMC. Move to the CPU associated with
2594 * this PMC, and stop the hardware. We update the
2595 * 'initial count' so that a subsequent PMCSTART will
2596 * resume counting from the current hardware count.
2597 */
2598
2599 pmc_save_cpu_binding(&pb);
2600
2601 cpu = PMC_TO_CPU(pm);
2602
2603 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
2604 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2605
2606 if (!pmc_cpu_is_active(cpu))
2607 return ENXIO;
2608
2609 pmc_select_cpu(cpu);
2610
2611 ri = PMC_TO_ROWINDEX(pm);
2612 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2613
2614 critical_enter();
2615 if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
2616 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
2617 critical_exit();
2618
2619 pmc_restore_cpu_binding(&pb);
2620
2621 po = pm->pm_owner;
2622
2623 /* remove this owner from the global list of SS PMC owners */
2624 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2625 po->po_sscount--;
2626 if (po->po_sscount == 0) {
2627 atomic_subtract_rel_int(&pmc_ss_count, 1);
2628 LIST_REMOVE(po, po_ssnext);
2629 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2630 }
2631 }
2632
2633 return (error);
2634}
2635
2636
2637#ifdef DEBUG
2638static const char *pmc_op_to_name[] = {
2639#undef __PMC_OP
2640#define __PMC_OP(N, D) #N ,
2641 __PMC_OPS()
2642 NULL
2643};
2644#endif
2645
2646/*
2647 * The syscall interface
2648 */
2649
2650#define PMC_GET_SX_XLOCK(...) do { \
2651 sx_xlock(&pmc_sx); \
2652 if (pmc_hook == NULL) { \
2653 sx_xunlock(&pmc_sx); \
2654 return __VA_ARGS__; \
2655 } \
2656} while (0)
2657
2658#define PMC_DOWNGRADE_SX() do { \
2659 sx_downgrade(&pmc_sx); \
2660 is_sx_downgraded = 1; \
2661} while (0)
2662
2663static int
2664pmc_syscall_handler(struct thread *td, void *syscall_args)
2665{
2666 int error, is_sx_downgraded, op;
2667 struct pmc_syscall_args *c;
2668 void *arg;
2669
2670 PMC_GET_SX_XLOCK(ENOSYS);
2671
2672 DROP_GIANT();
2673
2674 is_sx_downgraded = 0;
2675
2676 c = (struct pmc_syscall_args *) syscall_args;
2677
2678 op = c->pmop_code;
2679 arg = c->pmop_data;
2680
2681 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2682 pmc_op_to_name[op], arg);
2683
2684 error = 0;
2685 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2686
2687 switch(op)
2688 {
2689
2690
2691 /*
2692 * Configure a log file.
2693 *
2694 * XXX This OP will be reworked.
2695 */
2696
2697 case PMC_OP_CONFIGURELOG:
2698 {
2699 struct proc *p;
2700 struct pmc *pm;
2701 struct pmc_owner *po;
2702 struct pmc_op_configurelog cl;
2703
2704 sx_assert(&pmc_sx, SX_XLOCKED);
2705
2706 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2707 break;
2708
2709 /* mark this process as owning a log file */
2710 p = td->td_proc;
2711 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2712 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2713 error = ENOMEM;
2714 break;
2715 }
2716
2717 /*
2718 * If a valid fd was passed in, try to configure that,
2719 * otherwise if 'fd' was less than zero and there was
2720 * a log file configured, flush its buffers and
2721 * de-configure it.
2722 */
2723 if (cl.pm_logfd >= 0)
2724 error = pmclog_configure_log(md, po, cl.pm_logfd);
2725 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2726 pmclog_process_closelog(po);
2727 error = pmclog_flush(po);
2728 if (error == 0) {
2729 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2730 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
2731 pm->pm_state == PMC_STATE_RUNNING)
2732 pmc_stop(pm);
2733 error = pmclog_deconfigure_log(po);
2734 }
2735 } else
2736 error = EINVAL;
2737
2738 if (error)
2739 break;
2740 }
2741 break;
2742
2743
2744 /*
2745 * Flush a log file.
2746 */
2747
2748 case PMC_OP_FLUSHLOG:
2749 {
2750 struct pmc_owner *po;
2751
2752 sx_assert(&pmc_sx, SX_XLOCKED);
2753
2754 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2755 error = EINVAL;
2756 break;
2757 }
2758
2759 error = pmclog_flush(po);
2760 }
2761 break;
2762
2763 /*
2764 * Retrieve hardware configuration.
2765 */
2766
2767 case PMC_OP_GETCPUINFO: /* CPU information */
2768 {
2769 struct pmc_op_getcpuinfo gci;
2770 struct pmc_classinfo *pci;
2771 struct pmc_classdep *pcd;
2772 int cl;
2773
2774 gci.pm_cputype = md->pmd_cputype;
2775 gci.pm_ncpu = pmc_cpu_max();
2776 gci.pm_npmc = md->pmd_npmc;
2777 gci.pm_nclass = md->pmd_nclass;
2778 pci = gci.pm_classes;
2779 pcd = md->pmd_classdep;
2780 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
2781 pci->pm_caps = pcd->pcd_caps;
2782 pci->pm_class = pcd->pcd_class;
2783 pci->pm_width = pcd->pcd_width;
2784 pci->pm_num = pcd->pcd_num;
2785 }
2786 error = copyout(&gci, arg, sizeof(gci));
2787 }
2788 break;
2789
2790
2791 /*
2792 * Get module statistics
2793 */
2794
2795 case PMC_OP_GETDRIVERSTATS:
2796 {
2797 struct pmc_op_getdriverstats gms;
2798
2799 bcopy(&pmc_stats, &gms, sizeof(gms));
2800 error = copyout(&gms, arg, sizeof(gms));
2801 }
2802 break;
2803
2804
2805 /*
2806 * Retrieve module version number
2807 */
2808
2809 case PMC_OP_GETMODULEVERSION:
2810 {
2811 uint32_t cv, modv;
2812
2813 /* retrieve the client's idea of the ABI version */
2814 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2815 break;
2816 /* don't service clients newer than our driver */
2817 modv = PMC_VERSION;
2818 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2819 error = EPROGMISMATCH;
2820 break;
2821 }
2822 error = copyout(&modv, arg, sizeof(int));
2823 }
2824 break;
2825
2826
2827 /*
2828 * Retrieve the state of all the PMCs on a given
2829 * CPU.
2830 */
2831
2832 case PMC_OP_GETPMCINFO:
2833 {
2834 int ari;
2835 struct pmc *pm;
2836 size_t pmcinfo_size;
2837 uint32_t cpu, n, npmc;
2838 struct pmc_owner *po;
2839 struct pmc_binding pb;
2840 struct pmc_classdep *pcd;
2841 struct pmc_info *p, *pmcinfo;
2842 struct pmc_op_getpmcinfo *gpi;
2843
2844 PMC_DOWNGRADE_SX();
2845
2846 gpi = (struct pmc_op_getpmcinfo *) arg;
2847
2848 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2849 break;
2850
2851 if (cpu >= pmc_cpu_max()) {
2852 error = EINVAL;
2853 break;
2854 }
2855
2856 if (!pmc_cpu_is_active(cpu)) {
2857 error = ENXIO;
2858 break;
2859 }
2860
2861 /* switch to CPU 'cpu' */
2862 pmc_save_cpu_binding(&pb);
2863 pmc_select_cpu(cpu);
2864
2865 npmc = md->pmd_npmc;
2866
2867 pmcinfo_size = npmc * sizeof(struct pmc_info);
2868 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
2869
2870 p = pmcinfo;
2871
2872 for (n = 0; n < md->pmd_npmc; n++, p++) {
2873
2874 pcd = pmc_ri_to_classdep(md, n, &ari);
2875
2876 KASSERT(pcd != NULL,
2877 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
2878
2879 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
2880 break;
2881
2882 if (PMC_ROW_DISP_IS_STANDALONE(n))
2883 p->pm_rowdisp = PMC_DISP_STANDALONE;
2884 else if (PMC_ROW_DISP_IS_THREAD(n))
2885 p->pm_rowdisp = PMC_DISP_THREAD;
2886 else
2887 p->pm_rowdisp = PMC_DISP_FREE;
2888
2889 p->pm_ownerpid = -1;
2890
2891 if (pm == NULL) /* no PMC associated */
2892 continue;
2893
2894 po = pm->pm_owner;
2895
2896 KASSERT(po->po_owner != NULL,
2897 ("[pmc,%d] pmc_owner had a null proc pointer",
2898 __LINE__));
2899
2900 p->pm_ownerpid = po->po_owner->p_pid;
2901 p->pm_mode = PMC_TO_MODE(pm);
2902 p->pm_event = pm->pm_event;
2903 p->pm_flags = pm->pm_flags;
2904
2905 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2906 p->pm_reloadcount =
2907 pm->pm_sc.pm_reloadcount;
2908 }
2909
2910 pmc_restore_cpu_binding(&pb);
2911
2912 /* now copy out the PMC info collected */
2913 if (error == 0)
2914 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2915
2916 free(pmcinfo, M_PMC);
2917 }
2918 break;
2919
2920
2921 /*
2922 * Set the administrative state of a PMC. I.e. whether
2923 * the PMC is to be used or not.
2924 */
2925
2926 case PMC_OP_PMCADMIN:
2927 {
2928 int cpu, ri;
2929 enum pmc_state request;
2930 struct pmc_cpu *pc;
2931 struct pmc_hw *phw;
2932 struct pmc_op_pmcadmin pma;
2933 struct pmc_binding pb;
2934
2935 sx_assert(&pmc_sx, SX_XLOCKED);
2936
2937 KASSERT(td == curthread,
2938 ("[pmc,%d] td != curthread", __LINE__));
2939
2940 error = priv_check(td, PRIV_PMC_MANAGE);
2941 if (error)
2942 break;
2943
2944 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2945 break;
2946
2947 cpu = pma.pm_cpu;
2948
2949 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
2950 error = EINVAL;
2951 break;
2952 }
2953
2954 if (!pmc_cpu_is_active(cpu)) {
2955 error = ENXIO;
2956 break;
2957 }
2958
2959 request = pma.pm_state;
2960
2961 if (request != PMC_STATE_DISABLED &&
2962 request != PMC_STATE_FREE) {
2963 error = EINVAL;
2964 break;
2965 }
2966
2967 ri = pma.pm_pmc; /* pmc id == row index */
2968 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2969 error = EINVAL;
2970 break;
2971 }
2972
2973 /*
2974 * We can't disable a PMC with a row-index allocated
2975 * for process virtual PMCs.
2976 */
2977
2978 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2979 request == PMC_STATE_DISABLED) {
2980 error = EBUSY;
2981 break;
2982 }
2983
2984 /*
2985 * otherwise, this PMC on this CPU is either free or
2986 * in system-wide mode.
2987 */
2988
2989 pmc_save_cpu_binding(&pb);
2990 pmc_select_cpu(cpu);
2991
2992 pc = pmc_pcpu[cpu];
2993 phw = pc->pc_hwpmcs[ri];
2994
2995 /*
2996 * XXX do we need some kind of 'forced' disable?
2997 */
2998
2999 if (phw->phw_pmc == NULL) {
3000 if (request == PMC_STATE_DISABLED &&
3001 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
3002 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
3003 PMC_MARK_ROW_STANDALONE(ri);
3004 } else if (request == PMC_STATE_FREE &&
3005 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
3006 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
3007 PMC_UNMARK_ROW_STANDALONE(ri);
3008 }
3009 /* other cases are a no-op */
3010 } else
3011 error = EBUSY;
3012
3013 pmc_restore_cpu_binding(&pb);
3014 }
3015 break;
3016
3017
3018 /*
3019 * Allocate a PMC.
3020 */
3021
3022 case PMC_OP_PMCALLOCATE:
3023 {
3024 int adjri, n;
3025 u_int cpu;
3026 uint32_t caps;
3027 struct pmc *pmc;
3028 enum pmc_mode mode;
3029 struct pmc_hw *phw;
3030 struct pmc_binding pb;
3031 struct pmc_classdep *pcd;
3032 struct pmc_op_pmcallocate pa;
3033
3034 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3035 break;
3036
3037 caps = pa.pm_caps;
3038 mode = pa.pm_mode;
3039 cpu = pa.pm_cpu;
3040
3041 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3042 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
3043 (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
3044 error = EINVAL;
3045 break;
3046 }
3047
3048 /*
3049 * Virtual PMCs should only ask for a default CPU.
3050 * System mode PMCs need to specify a non-default CPU.
3051 */
3052
3053 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
3054 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
3055 error = EINVAL;
3056 break;
3057 }
3058
3059 /*
3060 * Check that an inactive CPU is not being asked for.
3061 */
3062
3063 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
3064 error = ENXIO;
3065 break;
3066 }
3067
3068 /*
3069 * Refuse an allocation for a system-wide PMC if this
3070 * process has been jailed, or if this process lacks
3071 * super-user credentials and the sysctl tunable
3072 * 'security.bsd.unprivileged_syspmcs' is zero.
3073 */
3074
3075 if (PMC_IS_SYSTEM_MODE(mode)) {
3076 if (jailed(curthread->td_ucred)) {
3077 error = EPERM;
3078 break;
3079 }
3080 if (!pmc_unprivileged_syspmcs) {
3081 error = priv_check(curthread,
3082 PRIV_PMC_SYSTEM);
3083 if (error)
3084 break;
3085 }
3086 }
3087
3088 if (error)
3089 break;
3090
3091 /*
3092 * Look for valid values for 'pm_flags'
3093 */
3094
3095 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3096 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
3097 error = EINVAL;
3098 break;
3099 }
3100
3101 /* process logging options are not allowed for system PMCs */
3102 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
3103 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
3104 error = EINVAL;
3105 break;
3106 }
3107
3108 /*
3109 * All sampling mode PMCs need to be able to interrupt the
3110 * CPU.
3111 */
3112 if (PMC_IS_SAMPLING_MODE(mode))
3113 caps |= PMC_CAP_INTERRUPT;
3114
3115 /* A valid class specifier should have been passed in. */
3116 for (n = 0; n < md->pmd_nclass; n++)
3117 if (md->pmd_classdep[n].pcd_class == pa.pm_class)
3118 break;
3119 if (n == md->pmd_nclass) {
3120 error = EINVAL;
3121 break;
3122 }
3123
3124 /* The requested PMC capabilities should be feasible. */
3125 if ((md->pmd_classdep[n].pcd_caps & caps) != caps) {
3126 error = EOPNOTSUPP;
3127 break;
3128 }
3129
3130 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3131 pa.pm_ev, caps, mode, cpu);
3132
3133 pmc = pmc_allocate_pmc_descriptor();
3134 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
3135 PMC_ID_INVALID);
3136 pmc->pm_event = pa.pm_ev;
3137 pmc->pm_state = PMC_STATE_FREE;
3138 pmc->pm_caps = caps;
3139 pmc->pm_flags = pa.pm_flags;
3140
3141 /* switch thread to CPU 'cpu' */
3142 pmc_save_cpu_binding(&pb);
3143
3144#define PMC_IS_SHAREABLE_PMC(cpu, n) \
3145 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3146 PMC_PHW_FLAG_IS_SHAREABLE)
3147#define PMC_IS_UNALLOCATED(cpu, n) \
3148 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3149
3150 if (PMC_IS_SYSTEM_MODE(mode)) {
3151 pmc_select_cpu(cpu);
3152 for (n = 0; n < (int) md->pmd_npmc; n++) {
3153 pcd = pmc_ri_to_classdep(md, n, &adjri);
3154 if (pmc_can_allocate_row(n, mode) == 0 &&
3155 pmc_can_allocate_rowindex(
3156 curthread->td_proc, n, cpu) == 0 &&
3157 (PMC_IS_UNALLOCATED(cpu, n) ||
3158 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3159 pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3160 &pa) == 0)
3161 break;
3162 }
3163 } else {
3164 /* Process virtual mode */
3165 for (n = 0; n < (int) md->pmd_npmc; n++) {
3166 pcd = pmc_ri_to_classdep(md, n, &adjri);
3167 if (pmc_can_allocate_row(n, mode) == 0 &&
3168 pmc_can_allocate_rowindex(
3169 curthread->td_proc, n,
3170 PMC_CPU_ANY) == 0 &&
3171 pcd->pcd_allocate_pmc(curthread->td_oncpu,
3172 adjri, pmc, &pa) == 0)
3173 break;
3174 }
3175 }
3176
3177#undef PMC_IS_UNALLOCATED
3178#undef PMC_IS_SHAREABLE_PMC
3179
3180 pmc_restore_cpu_binding(&pb);
3181
3182 if (n == (int) md->pmd_npmc) {
3183 pmc_destroy_pmc_descriptor(pmc);
3184 free(pmc, M_PMC);
3185 pmc = NULL;
3186 error = EINVAL;
3187 break;
3188 }
3189
3190 /* Fill in the correct value in the ID field */
3191 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
3192
3193 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3194 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
3195
3196 /* Process mode PMCs with logging enabled need log files */
3197 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
3198 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3199
3200 /* All system mode sampling PMCs require a log file */
3201 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3202 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3203
3204 /*
3205 * Configure global pmc's immediately
3206 */
3207
3208 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3209
3210 pmc_save_cpu_binding(&pb);
3211 pmc_select_cpu(cpu);
3212
3213 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3214 pcd = pmc_ri_to_classdep(md, n, &adjri);
3215
3216 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3217 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
3218 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
3219 pmc_destroy_pmc_descriptor(pmc);
3220 free(pmc, M_PMC);
3221 pmc = NULL;
3222 pmc_restore_cpu_binding(&pb);
3223 error = EPERM;
3224 break;
3225 }
3226
3227 pmc_restore_cpu_binding(&pb);
3228 }
3229
3230 pmc->pm_state = PMC_STATE_ALLOCATED;
3231
3232 /*
3233 * mark row disposition
3234 */
3235
3236 if (PMC_IS_SYSTEM_MODE(mode))
3237 PMC_MARK_ROW_STANDALONE(n);
3238 else
3239 PMC_MARK_ROW_THREAD(n);
3240
3241 /*
3242 * Register this PMC with the current thread as its owner.
3243 */
3244
3245 if ((error =
3246 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
3247 pmc_release_pmc_descriptor(pmc);
3248 free(pmc, M_PMC);
3249 pmc = NULL;
3250 break;
3251 }
3252
3253 /*
3254 * Return the allocated index.
3255 */
3256
3257 pa.pm_pmcid = pmc->pm_id;
3258
3259 error = copyout(&pa, arg, sizeof(pa));
3260 }
3261 break;
3262
3263
3264 /*
3265 * Attach a PMC to a process.
3266 */
3267
3268 case PMC_OP_PMCATTACH:
3269 {
3270 struct pmc *pm;
3271 struct proc *p;
3272 struct pmc_op_pmcattach a;
3273
3274 sx_assert(&pmc_sx, SX_XLOCKED);
3275
3276 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3277 break;
3278
3279 if (a.pm_pid < 0) {
3280 error = EINVAL;
3281 break;
3282 } else if (a.pm_pid == 0)
3283 a.pm_pid = td->td_proc->p_pid;
3284
3285 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3286 break;
3287
3288 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
3289 error = EINVAL;
3290 break;
3291 }
3292
3293 /* PMCs may be (re)attached only when allocated or stopped */
3294 if (pm->pm_state == PMC_STATE_RUNNING) {
3295 error = EBUSY;
3296 break;
3297 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3298 pm->pm_state != PMC_STATE_STOPPED) {
3299 error = EINVAL;
3300 break;
3301 }
3302
3303 /* lookup pid */
3304 if ((p = pfind(a.pm_pid)) == NULL) {
3305 error = ESRCH;
3306 break;
3307 }
3308
3309 /*
3310 * Ignore processes that are working on exiting.
3311 */
3312 if (p->p_flag & P_WEXIT) {
3313 error = ESRCH;
3314 PROC_UNLOCK(p); /* pfind() returns a locked process */
3315 break;
3316 }
3317
3318 /*
3319 * we are allowed to attach a PMC to a process if
3320 * we can debug it.
3321 */
3322 error = p_candebug(curthread, p);
3323
3324 PROC_UNLOCK(p);
3325
3326 if (error == 0)
3327 error = pmc_attach_process(p, pm);
3328 }
3329 break;
3330
3331
3332 /*
3333 * Detach an attached PMC from a process.
3334 */
3335
3336 case PMC_OP_PMCDETACH:
3337 {
3338 struct pmc *pm;
3339 struct proc *p;
3340 struct pmc_op_pmcattach a;
3341
3342 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3343 break;
3344
3345 if (a.pm_pid < 0) {
3346 error = EINVAL;
3347 break;
3348 } else if (a.pm_pid == 0)
3349 a.pm_pid = td->td_proc->p_pid;
3350
3351 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3352 break;
3353
3354 if ((p = pfind(a.pm_pid)) == NULL) {
3355 error = ESRCH;
3356 break;
3357 }
3358
3359 /*
3360 * Treat processes that are in the process of exiting
3361 * as if they were not present.
3362 */
3363
3364 if (p->p_flag & P_WEXIT)
3365 error = ESRCH;
3366
3367 PROC_UNLOCK(p); /* pfind() returns a locked process */
3368
3369 if (error == 0)
3370 error = pmc_detach_process(p, pm);
3371 }
3372 break;
3373
3374
3375 /*
3376 * Retrieve the MSR number associated with the counter
3377 * 'pmc_id'. This allows processes to directly use RDPMC
3378 * instructions to read their PMCs, without the overhead of a
3379 * system call.
3380 */
3381
3382 case PMC_OP_PMCGETMSR:
3383 {
3384 int adjri, ri;
3385 struct pmc *pm;
3386 struct pmc_target *pt;
3387 struct pmc_op_getmsr gm;
3388 struct pmc_classdep *pcd;
3389
3390 PMC_DOWNGRADE_SX();
3391
3392 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3393 break;
3394
3395 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3396 break;
3397
3398 /*
3399 * The allocated PMC has to be a process virtual PMC,
3400 * i.e., of type MODE_T[CS]. Global PMCs can only be
3401 * read using the PMCREAD operation since they may be
3402 * allocated on a different CPU than the one we could
3403 * be running on at the time of the RDPMC instruction.
3404 *
3405 * The GETMSR operation is not allowed for PMCs that
3406 * are inherited across processes.
3407 */
3408
3409 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3410 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3411 error = EINVAL;
3412 break;
3413 }
3414
3415 /*
3416 * It only makes sense to use a RDPMC (or its
3417 * equivalent instruction on non-x86 architectures) on
3418 * a process that has allocated and attached a PMC to
3419 * itself. Conversely the PMC is only allowed to have
3420 * one process attached to it -- its owner.
3421 */
3422
3423 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3424 LIST_NEXT(pt, pt_next) != NULL ||
3425 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3426 error = EINVAL;
3427 break;
3428 }
3429
3430 ri = PMC_TO_ROWINDEX(pm);
3431 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3432
3433 /* PMC class has no 'GETMSR' support */
3434 if (pcd->pcd_get_msr == NULL) {
3435 error = ENOSYS;
3436 break;
3437 }
3438
3439 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
3440 break;
3441
3442 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3443 break;
3444
3445 /*
3446 * Mark our process as using MSRs. Update machine
3447 * state using a forced context switch.
3448 */
3449
3450 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3451 pmc_force_context_switch();
3452
3453 }
3454 break;
3455
3456 /*
3457 * Release an allocated PMC
3458 */
3459
3460 case PMC_OP_PMCRELEASE:
3461 {
3462 pmc_id_t pmcid;
3463 struct pmc *pm;
3464 struct pmc_owner *po;
3465 struct pmc_op_simple sp;
3466
3467 /*
3468 * Find PMC pointer for the named PMC.
3469 *
3470 * Use pmc_release_pmc_descriptor() to switch off the
3471 * PMC, remove all its target threads, and remove the
3472 * PMC from its owner's list.
3473 *
3474 * Remove the owner record if this is the last PMC
3475 * owned.
3476 *
3477 * Free up space.
3478 */
3479
3480 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3481 break;
3482
3483 pmcid = sp.pm_pmcid;
3484
3485 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3486 break;
3487
3488 po = pm->pm_owner;
3489 pmc_release_pmc_descriptor(pm);
3490 pmc_maybe_remove_owner(po);
3491
3492 free(pm, M_PMC);
3493 }
3494 break;
3495
3496
3497 /*
3498 * Read and/or write a PMC.
3499 */
3500
3501 case PMC_OP_PMCRW:
3502 {
3503 int adjri;
3504 struct pmc *pm;
3505 uint32_t cpu, ri;
3506 pmc_value_t oldvalue;
3507 struct pmc_binding pb;
3508 struct pmc_op_pmcrw prw;
3509 struct pmc_classdep *pcd;
3510 struct pmc_op_pmcrw *pprw;
3511
3512 PMC_DOWNGRADE_SX();
3513
3514 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3515 break;
3516
3517 ri = 0;
3518 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3519 prw.pm_flags);
3520
3521 /* must have at least one flag set */
3522 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3523 error = EINVAL;
3524 break;
3525 }
3526
3527 /* locate pmc descriptor */
3528 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3529 break;
3530
3531 /* Can't read a PMC that hasn't been started. */
3532 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3533 pm->pm_state != PMC_STATE_STOPPED &&
3534 pm->pm_state != PMC_STATE_RUNNING) {
3535 error = EINVAL;
3536 break;
3537 }
3538
3539 /* writing a new value is allowed only for 'STOPPED' pmcs */
3540 if (pm->pm_state == PMC_STATE_RUNNING &&
3541 (prw.pm_flags & PMC_F_NEWVALUE)) {
3542 error = EBUSY;
3543 break;
3544 }
3545
3546 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3547
3548 /*
3549 * If this PMC is attached to its owner (i.e.,
3550 * the process requesting this operation) and
3551 * is running, then attempt to get an
3552 * upto-date reading from hardware for a READ.
3553 * Writes are only allowed when the PMC is
3554 * stopped, so only update the saved value
3555 * field.
3556 *
3557 * If the PMC is not running, or is not
3558 * attached to its owner, read/write to the
3559 * savedvalue field.
3560 */
3561
3562 ri = PMC_TO_ROWINDEX(pm);
3563 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3564
3565 mtx_pool_lock_spin(pmc_mtxpool, pm);
3566 cpu = curthread->td_oncpu;
3567
3568 if (prw.pm_flags & PMC_F_OLDVALUE) {
3569 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3570 (pm->pm_state == PMC_STATE_RUNNING))
3571 error = (*pcd->pcd_read_pmc)(cpu, adjri,
3572 &oldvalue);
3573 else
3574 oldvalue = pm->pm_gv.pm_savedvalue;
3575 }
3576 if (prw.pm_flags & PMC_F_NEWVALUE)
3577 pm->pm_gv.pm_savedvalue = prw.pm_value;
3578
3579 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3580
3581 } else { /* System mode PMCs */
3582 cpu = PMC_TO_CPU(pm);
3583 ri = PMC_TO_ROWINDEX(pm);
3584 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3585
3586 if (!pmc_cpu_is_active(cpu)) {
3587 error = ENXIO;
3588 break;
3589 }
3590
3591 /* move this thread to CPU 'cpu' */
3592 pmc_save_cpu_binding(&pb);
3593 pmc_select_cpu(cpu);
3594
3595 critical_enter();
3596 /* save old value */
3597 if (prw.pm_flags & PMC_F_OLDVALUE)
3598 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
3599 &oldvalue)))
3600 goto error;
3601 /* write out new value */
3602 if (prw.pm_flags & PMC_F_NEWVALUE)
3603 error = (*pcd->pcd_write_pmc)(cpu, adjri,
3604 prw.pm_value);
3605 error:
3606 critical_exit();
3607 pmc_restore_cpu_binding(&pb);
3608 if (error)
3609 break;
3610 }
3611
3612 pprw = (struct pmc_op_pmcrw *) arg;
3613
3614#ifdef DEBUG
3615 if (prw.pm_flags & PMC_F_NEWVALUE)
3616 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3617 ri, prw.pm_value, oldvalue);
3618 else if (prw.pm_flags & PMC_F_OLDVALUE)
3619 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3620#endif
3621
3622 /* return old value if requested */
3623 if (prw.pm_flags & PMC_F_OLDVALUE)
3624 if ((error = copyout(&oldvalue, &pprw->pm_value,
3625 sizeof(prw.pm_value))))
3626 break;
3627
3628 }
3629 break;
3630
3631
3632 /*
3633 * Set the sampling rate for a sampling mode PMC and the
3634 * initial count for a counting mode PMC.
3635 */
3636
3637 case PMC_OP_PMCSETCOUNT:
3638 {
3639 struct pmc *pm;
3640 struct pmc_op_pmcsetcount sc;
3641
3642 PMC_DOWNGRADE_SX();
3643
3644 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3645 break;
3646
3647 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3648 break;
3649
3650 if (pm->pm_state == PMC_STATE_RUNNING) {
3651 error = EBUSY;
3652 break;
3653 }
3654
3655 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3656 pm->pm_sc.pm_reloadcount = sc.pm_count;
3657 else
3658 pm->pm_sc.pm_initial = sc.pm_count;
3659 }
3660 break;
3661
3662
3663 /*
3664 * Start a PMC.
3665 */
3666
3667 case PMC_OP_PMCSTART:
3668 {
3669 pmc_id_t pmcid;
3670 struct pmc *pm;
3671 struct pmc_op_simple sp;
3672
3673 sx_assert(&pmc_sx, SX_XLOCKED);
3674
3675 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3676 break;
3677
3678 pmcid = sp.pm_pmcid;
3679
3680 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3681 break;
3682
3683 KASSERT(pmcid == pm->pm_id,
3684 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3685 pm->pm_id, pmcid));
3686
3687 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3688 break;
3689 else if (pm->pm_state != PMC_STATE_STOPPED &&
3690 pm->pm_state != PMC_STATE_ALLOCATED) {
3691 error = EINVAL;
3692 break;
3693 }
3694
3695 error = pmc_start(pm);
3696 }
3697 break;
3698
3699
3700 /*
3701 * Stop a PMC.
3702 */
3703
3704 case PMC_OP_PMCSTOP:
3705 {
3706 pmc_id_t pmcid;
3707 struct pmc *pm;
3708 struct pmc_op_simple sp;
3709
3710 PMC_DOWNGRADE_SX();
3711
3712 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3713 break;
3714
3715 pmcid = sp.pm_pmcid;
3716
3717 /*
3718 * Mark the PMC as inactive and invoke the MD stop
3719 * routines if needed.
3720 */
3721
3722 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3723 break;
3724
3725 KASSERT(pmcid == pm->pm_id,
3726 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3727 pm->pm_id, pmcid));
3728
3729 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3730 break;
3731 else if (pm->pm_state != PMC_STATE_RUNNING) {
3732 error = EINVAL;
3733 break;
3734 }
3735
3736 error = pmc_stop(pm);
3737 }
3738 break;
3739
3740
3741 /*
3742 * Write a user supplied value to the log file.
3743 */
3744
3745 case PMC_OP_WRITELOG:
3746 {
3747 struct pmc_op_writelog wl;
3748 struct pmc_owner *po;
3749
3750 PMC_DOWNGRADE_SX();
3751
3752 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3753 break;
3754
3755 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3756 error = EINVAL;
3757 break;
3758 }
3759
3760 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3761 error = EINVAL;
3762 break;
3763 }
3764
3765 error = pmclog_process_userlog(po, &wl);
3766 }
3767 break;
3768
3769
3770 default:
3771 error = EINVAL;
3772 break;
3773 }
3774
3775 if (is_sx_downgraded)
3776 sx_sunlock(&pmc_sx);
3777 else
3778 sx_xunlock(&pmc_sx);
3779
3780 if (error)
3781 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3782
3783 PICKUP_GIANT();
3784
3785 return error;
3786}
3787
3788/*
3789 * Helper functions
3790 */
3791
3792
3793/*
3794 * Mark the thread as needing callchain capture and post an AST. The
3795 * actual callchain capture will be done in a context where it is safe
3796 * to take page faults.
3797 */
3798
3799static void
3797pmc_post_callchain_ast(void)
3800pmc_post_callchain_callback(void)
3798{
3799 struct thread *td;
3800
3801 td = curthread;
3802
3801{
3802 struct thread *td;
3803
3804 td = curthread;
3805
3806 KASSERT((td->td_pflags & TDP_CALLCHAIN) == 0,
3807 ("[pmc,%d] thread %p already marked for callchain capture",
3808 __LINE__, (void *) td));
3809
3803 /*
3810 /*
3804 * Mark this thread as needing processing in ast().
3805 * td->td_pflags will be safe to touch as the process was in
3806 * user space when it was interrupted.
3811 * Mark this thread as needing callchain capture.
3812 * `td->td_pflags' will be safe to touch because this thread
3813 * was in user space when it was interrupted.
3807 */
3808 td->td_pflags |= TDP_CALLCHAIN;
3809
3810 /*
3814 */
3815 td->td_pflags |= TDP_CALLCHAIN;
3816
3817 /*
3811 * Again, since we've entered this function directly from
3812 * userland, `td' is guaranteed to be not locked by this CPU,
3813 * so its safe to try acquire the thread lock even though we
3814 * are executing in an NMI context. We need to acquire this
3815 * lock before touching `td_flags' because other CPUs may be
3816 * in the process of touching this field.
3818 * Don't let this thread migrate between CPUs until callchain
3819 * capture completes.
3817 */
3820 */
3818 thread_lock(td);
3819 td->td_flags |= TDF_ASTPENDING;
3820 thread_unlock(td);
3821 sched_pin();
3821
3822 return;
3823}
3824
3825/*
3826 * Interrupt processing.
3827 *
3828 * Find a free slot in the per-cpu array of samples and capture the
3829 * current callchain there. If a sample was successfully added, a bit
3830 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
3831 * needs to be invoked from the clock handler.
3832 *
3833 * This function is meant to be called from an NMI handler. It cannot
3834 * use any of the locking primitives supplied by the OS.
3835 */
3836
3837int
3838pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
3839 int inuserspace)
3840{
3841 int error, callchaindepth;
3842 struct thread *td;
3843 struct pmc_sample *ps;
3844 struct pmc_samplebuffer *psb;
3845
3846 error = 0;
3847
3848 /*
3849 * Allocate space for a sample buffer.
3850 */
3851 psb = pmc_pcpu[cpu]->pc_sb;
3852
3853 ps = psb->ps_write;
3854 if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
3855 pm->pm_stalled = 1;
3856 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3857 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
3858 cpu, pm, (void *) tf, inuserspace,
3859 (int) (psb->ps_write - psb->ps_samples),
3860 (int) (psb->ps_read - psb->ps_samples));
3861 error = ENOMEM;
3862 goto done;
3863 }
3864
3865
3866 /* Fill in entry. */
3867 PMCDBG(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
3868 (void *) tf, inuserspace,
3869 (int) (psb->ps_write - psb->ps_samples),
3870 (int) (psb->ps_read - psb->ps_samples));
3871
3822
3823 return;
3824}
3825
3826/*
3827 * Interrupt processing.
3828 *
3829 * Find a free slot in the per-cpu array of samples and capture the
3830 * current callchain there. If a sample was successfully added, a bit
3831 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
3832 * needs to be invoked from the clock handler.
3833 *
3834 * This function is meant to be called from an NMI handler. It cannot
3835 * use any of the locking primitives supplied by the OS.
3836 */
3837
3838int
3839pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
3840 int inuserspace)
3841{
3842 int error, callchaindepth;
3843 struct thread *td;
3844 struct pmc_sample *ps;
3845 struct pmc_samplebuffer *psb;
3846
3847 error = 0;
3848
3849 /*
3850 * Allocate space for a sample buffer.
3851 */
3852 psb = pmc_pcpu[cpu]->pc_sb;
3853
3854 ps = psb->ps_write;
3855 if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
3856 pm->pm_stalled = 1;
3857 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3858 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
3859 cpu, pm, (void *) tf, inuserspace,
3860 (int) (psb->ps_write - psb->ps_samples),
3861 (int) (psb->ps_read - psb->ps_samples));
3862 error = ENOMEM;
3863 goto done;
3864 }
3865
3866
3867 /* Fill in entry. */
3868 PMCDBG(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
3869 (void *) tf, inuserspace,
3870 (int) (psb->ps_write - psb->ps_samples),
3871 (int) (psb->ps_read - psb->ps_samples));
3872
3873 KASSERT(pm->pm_runcount >= 0,
3874 ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
3875 pm->pm_runcount));
3876
3872 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3873 ps->ps_pmc = pm;
3874 if ((td = curthread) && td->td_proc)
3875 ps->ps_pid = td->td_proc->p_pid;
3876 else
3877 ps->ps_pid = -1;
3878 ps->ps_cpu = cpu;
3877 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3878 ps->ps_pmc = pm;
3879 if ((td = curthread) && td->td_proc)
3880 ps->ps_pid = td->td_proc->p_pid;
3881 else
3882 ps->ps_pid = -1;
3883 ps->ps_cpu = cpu;
3884 ps->ps_td = td;
3879 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
3880
3881 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
3882 pmc_callchaindepth : 1;
3883
3884 if (callchaindepth == 1)
3885 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
3886 else {
3887 /*
3888 * Kernel stack traversals can be done immediately,
3889 * while we defer to an AST for user space traversals.
3890 */
3891 if (!inuserspace)
3892 callchaindepth =
3893 pmc_save_kernel_callchain(ps->ps_pc,
3894 callchaindepth, tf);
3895 else {
3885 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
3886
3887 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
3888 pmc_callchaindepth : 1;
3889
3890 if (callchaindepth == 1)
3891 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
3892 else {
3893 /*
3894 * Kernel stack traversals can be done immediately,
3895 * while we defer to an AST for user space traversals.
3896 */
3897 if (!inuserspace)
3898 callchaindepth =
3899 pmc_save_kernel_callchain(ps->ps_pc,
3900 callchaindepth, tf);
3901 else {
3896 pmc_post_callchain_ast();
3902 pmc_post_callchain_callback();
3897 callchaindepth = PMC_SAMPLE_INUSE;
3898 }
3899 }
3900
3901 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
3902
3903 /* increment write pointer, modulo ring buffer size */
3904 ps++;
3905 if (ps == psb->ps_fence)
3906 psb->ps_write = psb->ps_samples;
3907 else
3908 psb->ps_write = ps;
3909
3910 done:
3911 /* mark CPU as needing processing */
3912 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3913
3914 return (error);
3915}
3916
3917/*
3918 * Capture a user call chain. This function will be called from ast()
3919 * before control returns to userland and before the process gets
3920 * rescheduled.
3921 */
3922
3923static void
3924pmc_capture_user_callchain(int cpu, struct trapframe *tf)
3925{
3926 int i;
3927 struct pmc *pm;
3903 callchaindepth = PMC_SAMPLE_INUSE;
3904 }
3905 }
3906
3907 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
3908
3909 /* increment write pointer, modulo ring buffer size */
3910 ps++;
3911 if (ps == psb->ps_fence)
3912 psb->ps_write = psb->ps_samples;
3913 else
3914 psb->ps_write = ps;
3915
3916 done:
3917 /* mark CPU as needing processing */
3918 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3919
3920 return (error);
3921}
3922
3923/*
3924 * Capture a user call chain. This function will be called from ast()
3925 * before control returns to userland and before the process gets
3926 * rescheduled.
3927 */
3928
3929static void
3930pmc_capture_user_callchain(int cpu, struct trapframe *tf)
3931{
3932 int i;
3933 struct pmc *pm;
3934 struct thread *td;
3928 struct pmc_sample *ps;
3929 struct pmc_samplebuffer *psb;
3935 struct pmc_sample *ps;
3936 struct pmc_samplebuffer *psb;
3937#ifdef INVARIANTS
3938 int ncallchains;
3939#endif
3930
3940
3941 sched_unpin(); /* Can migrate safely now. */
3942
3931 psb = pmc_pcpu[cpu]->pc_sb;
3943 psb = pmc_pcpu[cpu]->pc_sb;
3944 td = curthread;
3932
3945
3946 KASSERT(td->td_pflags & TDP_CALLCHAIN,
3947 ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
3948 __LINE__));
3949
3950#ifdef INVARIANTS
3951 ncallchains = 0;
3952#endif
3953
3933 /*
3934 * Iterate through all deferred callchain requests.
3935 */
3936
3954 /*
3955 * Iterate through all deferred callchain requests.
3956 */
3957
3937 for (i = 0; i < pmc_nsamples; i++) {
3958 ps = psb->ps_samples;
3959 for (i = 0; i < pmc_nsamples; i++, ps++) {
3938
3960
3939 ps = &psb->ps_samples[i];
3940 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
3941 continue;
3961 if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
3962 continue;
3963 if (ps->ps_td != td)
3964 continue;
3942
3965
3966 KASSERT(ps->ps_cpu == cpu,
3967 ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
3968 ps->ps_cpu, PCPU_GET(cpuid)));
3969
3943 pm = ps->ps_pmc;
3944
3945 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
3946 ("[pmc,%d] Retrieving callchain for PMC that doesn't "
3947 "want it", __LINE__));
3948
3970 pm = ps->ps_pmc;
3971
3972 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
3973 ("[pmc,%d] Retrieving callchain for PMC that doesn't "
3974 "want it", __LINE__));
3975
3976 KASSERT(pm->pm_runcount > 0,
3977 ("[pmc,%d] runcount %d", __LINE__, pm->pm_runcount));
3978
3949 /*
3950 * Retrieve the callchain and mark the sample buffer
3951 * as 'processable' by the timer tick sweep code.
3952 */
3953 ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
3954 pmc_callchaindepth, tf);
3979 /*
3980 * Retrieve the callchain and mark the sample buffer
3981 * as 'processable' by the timer tick sweep code.
3982 */
3983 ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
3984 pmc_callchaindepth, tf);
3985
3986#ifdef INVARIANTS
3987 ncallchains++;
3988#endif
3989
3955 }
3956
3990 }
3991
3992 KASSERT(ncallchains > 0,
3993 ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
3994 cpu));
3995
3957 return;
3958}
3959
3960
3961/*
3962 * Process saved PC samples.
3963 */
3964
3965static void
3966pmc_process_samples(int cpu)
3967{
3968 struct pmc *pm;
3969 int adjri, n;
3970 struct thread *td;
3971 struct pmc_owner *po;
3972 struct pmc_sample *ps;
3973 struct pmc_classdep *pcd;
3974 struct pmc_samplebuffer *psb;
3975
3976 KASSERT(PCPU_GET(cpuid) == cpu,
3977 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
3978 PCPU_GET(cpuid), cpu));
3979
3980 psb = pmc_pcpu[cpu]->pc_sb;
3981
3982 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
3983
3984 ps = psb->ps_read;
3985 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
3986 break;
3987 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
3988 /* Need a rescan at a later time. */
3989 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3990 break;
3991 }
3992
3993 pm = ps->ps_pmc;
3996 return;
3997}
3998
3999
4000/*
4001 * Process saved PC samples.
4002 */
4003
4004static void
4005pmc_process_samples(int cpu)
4006{
4007 struct pmc *pm;
4008 int adjri, n;
4009 struct thread *td;
4010 struct pmc_owner *po;
4011 struct pmc_sample *ps;
4012 struct pmc_classdep *pcd;
4013 struct pmc_samplebuffer *psb;
4014
4015 KASSERT(PCPU_GET(cpuid) == cpu,
4016 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4017 PCPU_GET(cpuid), cpu));
4018
4019 psb = pmc_pcpu[cpu]->pc_sb;
4020
4021 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
4022
4023 ps = psb->ps_read;
4024 if (ps->ps_nsamples == PMC_SAMPLE_FREE)
4025 break;
4026 if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
4027 /* Need a rescan at a later time. */
4028 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
4029 break;
4030 }
4031
4032 pm = ps->ps_pmc;
4033
4034 KASSERT(pm->pm_runcount > 0,
4035 ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
4036 pm->pm_runcount));
4037
3994 po = pm->pm_owner;
3995
3996 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
3997 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
3998 pm, PMC_TO_MODE(pm)));
3999
4000 /* Ignore PMCs that have been switched off */
4001 if (pm->pm_state != PMC_STATE_RUNNING)
4002 goto entrydone;
4003
4004 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4005 pm, ps->ps_nsamples, ps->ps_flags,
4006 (int) (psb->ps_write - psb->ps_samples),
4007 (int) (psb->ps_read - psb->ps_samples));
4008
4009 /*
4010 * If this is a process-mode PMC that is attached to
4011 * its owner, and if the PC is in user mode, update
4012 * profiling statistics like timer-based profiling
4013 * would have done.
4014 */
4015 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4016 if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4017 td = FIRST_THREAD_IN_PROC(po->po_owner);
4018 addupc_intr(td, ps->ps_pc[0], 1);
4019 }
4020 goto entrydone;
4021 }
4022
4023 /*
4024 * Otherwise, this is either a sampling mode PMC that
4025 * is attached to a different process than its owner,
4026 * or a system-wide sampling PMC. Dispatch a log
4027 * entry to the PMC's owner process.
4028 */
4029
4030 pmclog_process_callchain(pm, ps);
4031
4032 entrydone:
4033 ps->ps_nsamples = 0; /* mark entry as free */
4034 atomic_subtract_rel_32(&pm->pm_runcount, 1);
4035
4036 /* increment read pointer, modulo sample size */
4037 if (++ps == psb->ps_fence)
4038 psb->ps_read = psb->ps_samples;
4039 else
4040 psb->ps_read = ps;
4041 }
4042
4043 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
4044
4045 /* Do not re-enable stalled PMCs if we failed to process any samples */
4046 if (n == 0)
4047 return;
4048
4049 /*
4050 * Restart any stalled sampling PMCs on this CPU.
4051 *
4052 * If the NMI handler sets the pm_stalled field of a PMC after
4053 * the check below, we'll end up processing the stalled PMC at
4054 * the next hardclock tick.
4055 */
4056 for (n = 0; n < md->pmd_npmc; n++) {
4057 pcd = pmc_ri_to_classdep(md, n, &adjri);
4058 KASSERT(pcd != NULL,
4059 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
4060 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
4061
4062 if (pm == NULL || /* !cfg'ed */
4063 pm->pm_state != PMC_STATE_RUNNING || /* !active */
4064 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
4065 pm->pm_stalled == 0) /* !stalled */
4066 continue;
4067
4068 pm->pm_stalled = 0;
4069 (*pcd->pcd_start_pmc)(cpu, adjri);
4070 }
4071}
4072
4073/*
4074 * Event handlers.
4075 */
4076
4077/*
4078 * Handle a process exit.
4079 *
4080 * Remove this process from all hash tables. If this process
4081 * owned any PMCs, turn off those PMCs and deallocate them,
4082 * removing any associations with target processes.
4083 *
4084 * This function will be called by the last 'thread' of a
4085 * process.
4086 *
4087 * XXX This eventhandler gets called early in the exit process.
4088 * Consider using a 'hook' invocation from thread_exit() or equivalent
4089 * spot. Another negative is that kse_exit doesn't seem to call
4090 * exit1() [??].
4091 *
4092 */
4093
4094static void
4095pmc_process_exit(void *arg __unused, struct proc *p)
4096{
4097 struct pmc *pm;
4098 int adjri, cpu;
4099 unsigned int ri;
4100 int is_using_hwpmcs;
4101 struct pmc_owner *po;
4102 struct pmc_process *pp;
4103 struct pmc_classdep *pcd;
4104 pmc_value_t newvalue, tmp;
4105
4106 PROC_LOCK(p);
4107 is_using_hwpmcs = p->p_flag & P_HWPMC;
4108 PROC_UNLOCK(p);
4109
4110 /*
4111 * Log a sysexit event to all SS PMC owners.
4112 */
4113 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4114 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4115 pmclog_process_sysexit(po, p->p_pid);
4116
4117 if (!is_using_hwpmcs)
4118 return;
4119
4120 PMC_GET_SX_XLOCK();
4121 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
4122 p->p_comm);
4123
4124 /*
4125 * Since this code is invoked by the last thread in an exiting
4126 * process, we would have context switched IN at some prior
4127 * point. However, with PREEMPTION, kernel mode context
4128 * switches may happen any time, so we want to disable a
4129 * context switch OUT till we get any PMCs targetting this
4130 * process off the hardware.
4131 *
4132 * We also need to atomically remove this process'
4133 * entry from our target process hash table, using
4134 * PMC_FLAG_REMOVE.
4135 */
4136 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
4137 p->p_comm);
4138
4139 critical_enter(); /* no preemption */
4140
4141 cpu = curthread->td_oncpu;
4142
4143 if ((pp = pmc_find_process_descriptor(p,
4144 PMC_FLAG_REMOVE)) != NULL) {
4145
4146 PMCDBG(PRC,EXT,2,
4147 "process-exit proc=%p pmc-process=%p", p, pp);
4148
4149 /*
4150 * The exiting process could the target of
4151 * some PMCs which will be running on
4152 * currently executing CPU.
4153 *
4154 * We need to turn these PMCs off like we
4155 * would do at context switch OUT time.
4156 */
4157 for (ri = 0; ri < md->pmd_npmc; ri++) {
4158
4159 /*
4160 * Pick up the pmc pointer from hardware
4161 * state similar to the CSW_OUT code.
4162 */
4163 pm = NULL;
4164
4165 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4166
4167 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
4168
4169 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
4170
4171 if (pm == NULL ||
4172 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
4173 continue;
4174
4175 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
4176 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
4177 pm, pm->pm_state);
4178
4179 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
4180 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
4181 __LINE__, PMC_TO_ROWINDEX(pm), ri));
4182
4183 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
4184 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
4185 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
4186
4187 (void) pcd->pcd_stop_pmc(cpu, adjri);
4188
4189 KASSERT(pm->pm_runcount > 0,
4190 ("[pmc,%d] bad runcount ri %d rc %d",
4191 __LINE__, ri, pm->pm_runcount));
4192
4193 /* Stop hardware only if it is actually running */
4194 if (pm->pm_state == PMC_STATE_RUNNING &&
4195 pm->pm_stalled == 0) {
4196 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
4197 tmp = newvalue -
4198 PMC_PCPU_SAVED(cpu,ri);
4199
4200 mtx_pool_lock_spin(pmc_mtxpool, pm);
4201 pm->pm_gv.pm_savedvalue += tmp;
4202 pp->pp_pmcs[ri].pp_pmcval += tmp;
4203 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4204 }
4205
4206 atomic_subtract_rel_32(&pm->pm_runcount,1);
4207
4208 KASSERT((int) pm->pm_runcount >= 0,
4209 ("[pmc,%d] runcount is %d", __LINE__, ri));
4210
4211 (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
4212 }
4213
4214 /*
4215 * Inform the MD layer of this pseudo "context switch
4216 * out"
4217 */
4218 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
4219
4220 critical_exit(); /* ok to be pre-empted now */
4221
4222 /*
4223 * Unlink this process from the PMCs that are
4224 * targetting it. This will send a signal to
4225 * all PMC owner's whose PMCs are orphaned.
4226 *
4227 * Log PMC value at exit time if requested.
4228 */
4229 for (ri = 0; ri < md->pmd_npmc; ri++)
4230 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
4231 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
4232 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
4233 pmclog_process_procexit(pm, pp);
4234 pmc_unlink_target_process(pm, pp);
4235 }
4236 free(pp, M_PMC);
4237
4238 } else
4239 critical_exit(); /* pp == NULL */
4240
4241
4242 /*
4243 * If the process owned PMCs, free them up and free up
4244 * memory.
4245 */
4246 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
4247 pmc_remove_owner(po);
4248 pmc_destroy_owner_descriptor(po);
4249 }
4250
4251 sx_xunlock(&pmc_sx);
4252}
4253
4254/*
4255 * Handle a process fork.
4256 *
4257 * If the parent process 'p1' is under HWPMC monitoring, then copy
4258 * over any attached PMCs that have 'do_descendants' semantics.
4259 */
4260
4261static void
4262pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
4263 int flags)
4264{
4265 int is_using_hwpmcs;
4266 unsigned int ri;
4267 uint32_t do_descendants;
4268 struct pmc *pm;
4269 struct pmc_owner *po;
4270 struct pmc_process *ppnew, *ppold;
4271
4272 (void) flags; /* unused parameter */
4273
4274 PROC_LOCK(p1);
4275 is_using_hwpmcs = p1->p_flag & P_HWPMC;
4276 PROC_UNLOCK(p1);
4277
4278 /*
4279 * If there are system-wide sampling PMCs active, we need to
4280 * log all fork events to their owner's logs.
4281 */
4282
4283 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4284 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4285 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
4286
4287 if (!is_using_hwpmcs)
4288 return;
4289
4290 PMC_GET_SX_XLOCK();
4291 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
4292 p1->p_pid, p1->p_comm, newproc);
4293
4294 /*
4295 * If the parent process (curthread->td_proc) is a
4296 * target of any PMCs, look for PMCs that are to be
4297 * inherited, and link these into the new process
4298 * descriptor.
4299 */
4300 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
4301 PMC_FLAG_NONE)) == NULL)
4302 goto done; /* nothing to do */
4303
4304 do_descendants = 0;
4305 for (ri = 0; ri < md->pmd_npmc; ri++)
4306 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
4307 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
4308 if (do_descendants == 0) /* nothing to do */
4309 goto done;
4310
4311 /* allocate a descriptor for the new process */
4312 if ((ppnew = pmc_find_process_descriptor(newproc,
4313 PMC_FLAG_ALLOCATE)) == NULL)
4314 goto done;
4315
4316 /*
4317 * Run through all PMCs that were targeting the old process
4318 * and which specified F_DESCENDANTS and attach them to the
4319 * new process.
4320 *
4321 * Log the fork event to all owners of PMCs attached to this
4322 * process, if not already logged.
4323 */
4324 for (ri = 0; ri < md->pmd_npmc; ri++)
4325 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
4326 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4327 pmc_link_target_process(pm, ppnew);
4328 po = pm->pm_owner;
4329 if (po->po_sscount == 0 &&
4330 po->po_flags & PMC_PO_OWNS_LOGFILE)
4331 pmclog_process_procfork(po, p1->p_pid,
4332 newproc->p_pid);
4333 }
4334
4335 /*
4336 * Now mark the new process as being tracked by this driver.
4337 */
4338 PROC_LOCK(newproc);
4339 newproc->p_flag |= P_HWPMC;
4340 PROC_UNLOCK(newproc);
4341
4342 done:
4343 sx_xunlock(&pmc_sx);
4344}
4345
4346
4347/*
4348 * initialization
4349 */
4350
4351static const char *pmc_name_of_pmcclass[] = {
4352#undef __PMC_CLASS
4353#define __PMC_CLASS(N) #N ,
4354 __PMC_CLASSES()
4355};
4356
4357static int
4358pmc_initialize(void)
4359{
4360 int c, cpu, error, n, ri;
4361 unsigned int maxcpu;
4362 struct pmc_binding pb;
4363 struct pmc_sample *ps;
4364 struct pmc_classdep *pcd;
4365 struct pmc_samplebuffer *sb;
4366
4367 md = NULL;
4368 error = 0;
4369
4370#ifdef DEBUG
4371 /* parse debug flags first */
4372 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
4373 pmc_debugstr, sizeof(pmc_debugstr)))
4374 pmc_debugflags_parse(pmc_debugstr,
4375 pmc_debugstr+strlen(pmc_debugstr));
4376#endif
4377
4378 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
4379
4380 /* check kernel version */
4381 if (pmc_kernel_version != PMC_VERSION) {
4382 if (pmc_kernel_version == 0)
4383 printf("hwpmc: this kernel has not been compiled with "
4384 "'options HWPMC_HOOKS'.\n");
4385 else
4386 printf("hwpmc: kernel version (0x%x) does not match "
4387 "module version (0x%x).\n", pmc_kernel_version,
4388 PMC_VERSION);
4389 return EPROGMISMATCH;
4390 }
4391
4392 /*
4393 * check sysctl parameters
4394 */
4395
4396 if (pmc_hashsize <= 0) {
4397 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
4398 "greater than zero.\n", pmc_hashsize);
4399 pmc_hashsize = PMC_HASH_SIZE;
4400 }
4401
4402 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
4403 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
4404 "range.\n", pmc_nsamples);
4405 pmc_nsamples = PMC_NSAMPLES;
4406 }
4407
4408 if (pmc_callchaindepth <= 0 ||
4409 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
4410 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
4411 "range.\n", pmc_callchaindepth);
4412 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
4413 }
4414
4415 md = pmc_md_initialize();
4416
4417 if (md == NULL)
4418 return (ENOSYS);
4419
4420 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
4421 ("[pmc,%d] no classes or pmcs", __LINE__));
4422
4423 /* Compute the map from row-indices to classdep pointers. */
4424 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
4425 md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
4426
4427 for (n = 0; n < md->pmd_npmc; n++)
4428 pmc_rowindex_to_classdep[n] = NULL;
4429 for (ri = c = 0; c < md->pmd_nclass; c++) {
4430 pcd = &md->pmd_classdep[c];
4431 for (n = 0; n < pcd->pcd_num; n++, ri++)
4432 pmc_rowindex_to_classdep[ri] = pcd;
4433 }
4434
4435 KASSERT(ri == md->pmd_npmc,
4436 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
4437 ri, md->pmd_npmc));
4438
4439 maxcpu = pmc_cpu_max();
4440
4441 /* allocate space for the per-cpu array */
4442 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
4443 M_WAITOK|M_ZERO);
4444
4445 /* per-cpu 'saved values' for managing process-mode PMCs */
4446 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
4447 M_PMC, M_WAITOK);
4448
4449 /* Perform CPU-dependent initialization. */
4450 pmc_save_cpu_binding(&pb);
4451 error = 0;
4452 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
4453 if (!pmc_cpu_is_active(cpu))
4454 continue;
4455 pmc_select_cpu(cpu);
4456 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
4457 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
4458 M_WAITOK|M_ZERO);
4459 if (md->pmd_pcpu_init)
4460 error = md->pmd_pcpu_init(md, cpu);
4461 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
4462 error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
4463 }
4464 pmc_restore_cpu_binding(&pb);
4465
4466 if (error)
4467 return (error);
4468
4469 /* allocate space for the sample array */
4470 for (cpu = 0; cpu < maxcpu; cpu++) {
4471 if (!pmc_cpu_is_active(cpu))
4472 continue;
4473
4474 sb = malloc(sizeof(struct pmc_samplebuffer) +
4475 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4476 M_WAITOK|M_ZERO);
4477 sb->ps_read = sb->ps_write = sb->ps_samples;
4478 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4479
4480 KASSERT(pmc_pcpu[cpu] != NULL,
4481 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4482
4483 sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
4484 sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
4485
4486 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
4487 ps->ps_pc = sb->ps_callchains +
4488 (n * pmc_callchaindepth);
4489
4490 pmc_pcpu[cpu]->pc_sb = sb;
4491 }
4492
4493 /* allocate space for the row disposition array */
4494 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
4495 M_PMC, M_WAITOK|M_ZERO);
4496
4497 KASSERT(pmc_pmcdisp != NULL,
4498 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
4499
4500 /* mark all PMCs as available */
4501 for (n = 0; n < (int) md->pmd_npmc; n++)
4502 PMC_MARK_ROW_FREE(n);
4503
4504 /* allocate thread hash tables */
4505 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
4506 &pmc_ownerhashmask);
4507
4508 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
4509 &pmc_processhashmask);
4510 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
4511 MTX_SPIN);
4512
4513 LIST_INIT(&pmc_ss_owners);
4514 pmc_ss_count = 0;
4515
4516 /* allocate a pool of spin mutexes */
4517 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
4518 MTX_SPIN);
4519
4520 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4521 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4522 pmc_processhash, pmc_processhashmask);
4523
4524 /* register process {exit,fork,exec} handlers */
4525 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4526 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4527 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4528 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4529
4530 /* initialize logging */
4531 pmclog_initialize();
4532
4533 /* set hook functions */
4534 pmc_intr = md->pmd_intr;
4535 pmc_hook = pmc_hook_handler;
4536
4537 if (error == 0) {
4538 printf(PMC_MODULE_NAME ":");
4539 for (n = 0; n < (int) md->pmd_nclass; n++) {
4540 pcd = &md->pmd_classdep[n];
4541 printf(" %s/%d/%d/0x%b",
4542 pmc_name_of_pmcclass[pcd->pcd_class],
4543 pcd->pcd_num,
4544 pcd->pcd_width,
4545 pcd->pcd_caps,
4546 "\20"
4547 "\1INT\2USR\3SYS\4EDG\5THR"
4548 "\6REA\7WRI\10INV\11QUA\12PRC"
4549 "\13TAG\14CSC");
4550 }
4551 printf("\n");
4552 }
4553
4554 return (error);
4555}
4556
4557/* prepare to be unloaded */
4558static void
4559pmc_cleanup(void)
4560{
4561 int c, cpu;
4562 unsigned int maxcpu;
4563 struct pmc_ownerhash *ph;
4564 struct pmc_owner *po, *tmp;
4565 struct pmc_binding pb;
4566#ifdef DEBUG
4567 struct pmc_processhash *prh;
4568#endif
4569
4570 PMCDBG(MOD,INI,0, "%s", "cleanup");
4571
4572 /* switch off sampling */
4573 atomic_store_rel_int(&pmc_cpumask, 0);
4574 pmc_intr = NULL;
4575
4576 sx_xlock(&pmc_sx);
4577 if (pmc_hook == NULL) { /* being unloaded already */
4578 sx_xunlock(&pmc_sx);
4579 return;
4580 }
4581
4582 pmc_hook = NULL; /* prevent new threads from entering module */
4583
4584 /* deregister event handlers */
4585 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4586 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4587
4588 /* send SIGBUS to all owner threads, free up allocations */
4589 if (pmc_ownerhash)
4590 for (ph = pmc_ownerhash;
4591 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4592 ph++) {
4593 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4594 pmc_remove_owner(po);
4595
4596 /* send SIGBUS to owner processes */
4597 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4598 "(%d, %s)", po->po_owner,
4599 po->po_owner->p_pid,
4600 po->po_owner->p_comm);
4601
4602 PROC_LOCK(po->po_owner);
4603 psignal(po->po_owner, SIGBUS);
4604 PROC_UNLOCK(po->po_owner);
4605
4606 pmc_destroy_owner_descriptor(po);
4607 }
4608 }
4609
4610 /* reclaim allocated data structures */
4611 if (pmc_mtxpool)
4612 mtx_pool_destroy(&pmc_mtxpool);
4613
4614 mtx_destroy(&pmc_processhash_mtx);
4615 if (pmc_processhash) {
4616#ifdef DEBUG
4617 struct pmc_process *pp;
4618
4619 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4620 for (prh = pmc_processhash;
4621 prh <= &pmc_processhash[pmc_processhashmask];
4622 prh++)
4623 LIST_FOREACH(pp, prh, pp_next)
4624 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4625#endif
4626
4627 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4628 pmc_processhash = NULL;
4629 }
4630
4631 if (pmc_ownerhash) {
4632 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4633 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4634 pmc_ownerhash = NULL;
4635 }
4636
4637 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4638 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4639 KASSERT(pmc_ss_count == 0,
4640 ("[pmc,%d] Global SS count not empty", __LINE__));
4641
4642 /* do processor and pmc-class dependent cleanup */
4643 maxcpu = pmc_cpu_max();
4644
4645 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4646 if (md) {
4647 pmc_save_cpu_binding(&pb);
4648 for (cpu = 0; cpu < maxcpu; cpu++) {
4649 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4650 cpu, pmc_pcpu[cpu]);
4651 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
4652 continue;
4653 pmc_select_cpu(cpu);
4654 for (c = 0; c < md->pmd_nclass; c++)
4655 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
4656 if (md->pmd_pcpu_fini)
4657 md->pmd_pcpu_fini(md, cpu);
4658 }
4659
4660 pmc_md_finalize(md);
4661
4662 free(md, M_PMC);
4663 md = NULL;
4664 pmc_restore_cpu_binding(&pb);
4665 }
4666
4667 /* Free per-cpu descriptors. */
4668 for (cpu = 0; cpu < maxcpu; cpu++) {
4669 if (!pmc_cpu_is_active(cpu))
4670 continue;
4671 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
4672 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__,
4673 cpu));
4674 free(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC);
4675 free(pmc_pcpu[cpu]->pc_sb, M_PMC);
4676 free(pmc_pcpu[cpu], M_PMC);
4677 }
4678
4679 free(pmc_pcpu, M_PMC);
4680 pmc_pcpu = NULL;
4681
4682 free(pmc_pcpu_saved, M_PMC);
4683 pmc_pcpu_saved = NULL;
4684
4685 if (pmc_pmcdisp) {
4686 free(pmc_pmcdisp, M_PMC);
4687 pmc_pmcdisp = NULL;
4688 }
4689
4690 if (pmc_rowindex_to_classdep) {
4691 free(pmc_rowindex_to_classdep, M_PMC);
4692 pmc_rowindex_to_classdep = NULL;
4693 }
4694
4695 pmclog_shutdown();
4696
4697 sx_xunlock(&pmc_sx); /* we are done */
4698}
4699
4700/*
4701 * The function called at load/unload.
4702 */
4703
4704static int
4705load (struct module *module __unused, int cmd, void *arg __unused)
4706{
4707 int error;
4708
4709 error = 0;
4710
4711 switch (cmd) {
4712 case MOD_LOAD :
4713 /* initialize the subsystem */
4714 error = pmc_initialize();
4715 if (error != 0)
4716 break;
4717 PMCDBG(MOD,INI,1, "syscall=%d maxcpu=%d",
4718 pmc_syscall_num, pmc_cpu_max());
4719 break;
4720
4721
4722 case MOD_UNLOAD :
4723 case MOD_SHUTDOWN:
4724 pmc_cleanup();
4725 PMCDBG(MOD,INI,1, "%s", "unloaded");
4726 break;
4727
4728 default :
4729 error = EINVAL; /* XXX should panic(9) */
4730 break;
4731 }
4732
4733 return error;
4734}
4735
4736/* memory pool */
4737MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
4038 po = pm->pm_owner;
4039
4040 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4041 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4042 pm, PMC_TO_MODE(pm)));
4043
4044 /* Ignore PMCs that have been switched off */
4045 if (pm->pm_state != PMC_STATE_RUNNING)
4046 goto entrydone;
4047
4048 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4049 pm, ps->ps_nsamples, ps->ps_flags,
4050 (int) (psb->ps_write - psb->ps_samples),
4051 (int) (psb->ps_read - psb->ps_samples));
4052
4053 /*
4054 * If this is a process-mode PMC that is attached to
4055 * its owner, and if the PC is in user mode, update
4056 * profiling statistics like timer-based profiling
4057 * would have done.
4058 */
4059 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
4060 if (ps->ps_flags & PMC_CC_F_USERSPACE) {
4061 td = FIRST_THREAD_IN_PROC(po->po_owner);
4062 addupc_intr(td, ps->ps_pc[0], 1);
4063 }
4064 goto entrydone;
4065 }
4066
4067 /*
4068 * Otherwise, this is either a sampling mode PMC that
4069 * is attached to a different process than its owner,
4070 * or a system-wide sampling PMC. Dispatch a log
4071 * entry to the PMC's owner process.
4072 */
4073
4074 pmclog_process_callchain(pm, ps);
4075
4076 entrydone:
4077 ps->ps_nsamples = 0; /* mark entry as free */
4078 atomic_subtract_rel_32(&pm->pm_runcount, 1);
4079
4080 /* increment read pointer, modulo sample size */
4081 if (++ps == psb->ps_fence)
4082 psb->ps_read = psb->ps_samples;
4083 else
4084 psb->ps_read = ps;
4085 }
4086
4087 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
4088
4089 /* Do not re-enable stalled PMCs if we failed to process any samples */
4090 if (n == 0)
4091 return;
4092
4093 /*
4094 * Restart any stalled sampling PMCs on this CPU.
4095 *
4096 * If the NMI handler sets the pm_stalled field of a PMC after
4097 * the check below, we'll end up processing the stalled PMC at
4098 * the next hardclock tick.
4099 */
4100 for (n = 0; n < md->pmd_npmc; n++) {
4101 pcd = pmc_ri_to_classdep(md, n, &adjri);
4102 KASSERT(pcd != NULL,
4103 ("[pmc,%d] null pcd ri=%d", __LINE__, n));
4104 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
4105
4106 if (pm == NULL || /* !cfg'ed */
4107 pm->pm_state != PMC_STATE_RUNNING || /* !active */
4108 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
4109 pm->pm_stalled == 0) /* !stalled */
4110 continue;
4111
4112 pm->pm_stalled = 0;
4113 (*pcd->pcd_start_pmc)(cpu, adjri);
4114 }
4115}
4116
4117/*
4118 * Event handlers.
4119 */
4120
4121/*
4122 * Handle a process exit.
4123 *
4124 * Remove this process from all hash tables. If this process
4125 * owned any PMCs, turn off those PMCs and deallocate them,
4126 * removing any associations with target processes.
4127 *
4128 * This function will be called by the last 'thread' of a
4129 * process.
4130 *
4131 * XXX This eventhandler gets called early in the exit process.
4132 * Consider using a 'hook' invocation from thread_exit() or equivalent
4133 * spot. Another negative is that kse_exit doesn't seem to call
4134 * exit1() [??].
4135 *
4136 */
4137
4138static void
4139pmc_process_exit(void *arg __unused, struct proc *p)
4140{
4141 struct pmc *pm;
4142 int adjri, cpu;
4143 unsigned int ri;
4144 int is_using_hwpmcs;
4145 struct pmc_owner *po;
4146 struct pmc_process *pp;
4147 struct pmc_classdep *pcd;
4148 pmc_value_t newvalue, tmp;
4149
4150 PROC_LOCK(p);
4151 is_using_hwpmcs = p->p_flag & P_HWPMC;
4152 PROC_UNLOCK(p);
4153
4154 /*
4155 * Log a sysexit event to all SS PMC owners.
4156 */
4157 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4158 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4159 pmclog_process_sysexit(po, p->p_pid);
4160
4161 if (!is_using_hwpmcs)
4162 return;
4163
4164 PMC_GET_SX_XLOCK();
4165 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
4166 p->p_comm);
4167
4168 /*
4169 * Since this code is invoked by the last thread in an exiting
4170 * process, we would have context switched IN at some prior
4171 * point. However, with PREEMPTION, kernel mode context
4172 * switches may happen any time, so we want to disable a
4173 * context switch OUT till we get any PMCs targetting this
4174 * process off the hardware.
4175 *
4176 * We also need to atomically remove this process'
4177 * entry from our target process hash table, using
4178 * PMC_FLAG_REMOVE.
4179 */
4180 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
4181 p->p_comm);
4182
4183 critical_enter(); /* no preemption */
4184
4185 cpu = curthread->td_oncpu;
4186
4187 if ((pp = pmc_find_process_descriptor(p,
4188 PMC_FLAG_REMOVE)) != NULL) {
4189
4190 PMCDBG(PRC,EXT,2,
4191 "process-exit proc=%p pmc-process=%p", p, pp);
4192
4193 /*
4194 * The exiting process could the target of
4195 * some PMCs which will be running on
4196 * currently executing CPU.
4197 *
4198 * We need to turn these PMCs off like we
4199 * would do at context switch OUT time.
4200 */
4201 for (ri = 0; ri < md->pmd_npmc; ri++) {
4202
4203 /*
4204 * Pick up the pmc pointer from hardware
4205 * state similar to the CSW_OUT code.
4206 */
4207 pm = NULL;
4208
4209 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4210
4211 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
4212
4213 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
4214
4215 if (pm == NULL ||
4216 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
4217 continue;
4218
4219 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
4220 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
4221 pm, pm->pm_state);
4222
4223 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
4224 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
4225 __LINE__, PMC_TO_ROWINDEX(pm), ri));
4226
4227 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
4228 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
4229 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
4230
4231 (void) pcd->pcd_stop_pmc(cpu, adjri);
4232
4233 KASSERT(pm->pm_runcount > 0,
4234 ("[pmc,%d] bad runcount ri %d rc %d",
4235 __LINE__, ri, pm->pm_runcount));
4236
4237 /* Stop hardware only if it is actually running */
4238 if (pm->pm_state == PMC_STATE_RUNNING &&
4239 pm->pm_stalled == 0) {
4240 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
4241 tmp = newvalue -
4242 PMC_PCPU_SAVED(cpu,ri);
4243
4244 mtx_pool_lock_spin(pmc_mtxpool, pm);
4245 pm->pm_gv.pm_savedvalue += tmp;
4246 pp->pp_pmcs[ri].pp_pmcval += tmp;
4247 mtx_pool_unlock_spin(pmc_mtxpool, pm);
4248 }
4249
4250 atomic_subtract_rel_32(&pm->pm_runcount,1);
4251
4252 KASSERT((int) pm->pm_runcount >= 0,
4253 ("[pmc,%d] runcount is %d", __LINE__, ri));
4254
4255 (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
4256 }
4257
4258 /*
4259 * Inform the MD layer of this pseudo "context switch
4260 * out"
4261 */
4262 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
4263
4264 critical_exit(); /* ok to be pre-empted now */
4265
4266 /*
4267 * Unlink this process from the PMCs that are
4268 * targetting it. This will send a signal to
4269 * all PMC owner's whose PMCs are orphaned.
4270 *
4271 * Log PMC value at exit time if requested.
4272 */
4273 for (ri = 0; ri < md->pmd_npmc; ri++)
4274 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
4275 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
4276 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
4277 pmclog_process_procexit(pm, pp);
4278 pmc_unlink_target_process(pm, pp);
4279 }
4280 free(pp, M_PMC);
4281
4282 } else
4283 critical_exit(); /* pp == NULL */
4284
4285
4286 /*
4287 * If the process owned PMCs, free them up and free up
4288 * memory.
4289 */
4290 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
4291 pmc_remove_owner(po);
4292 pmc_destroy_owner_descriptor(po);
4293 }
4294
4295 sx_xunlock(&pmc_sx);
4296}
4297
4298/*
4299 * Handle a process fork.
4300 *
4301 * If the parent process 'p1' is under HWPMC monitoring, then copy
4302 * over any attached PMCs that have 'do_descendants' semantics.
4303 */
4304
4305static void
4306pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
4307 int flags)
4308{
4309 int is_using_hwpmcs;
4310 unsigned int ri;
4311 uint32_t do_descendants;
4312 struct pmc *pm;
4313 struct pmc_owner *po;
4314 struct pmc_process *ppnew, *ppold;
4315
4316 (void) flags; /* unused parameter */
4317
4318 PROC_LOCK(p1);
4319 is_using_hwpmcs = p1->p_flag & P_HWPMC;
4320 PROC_UNLOCK(p1);
4321
4322 /*
4323 * If there are system-wide sampling PMCs active, we need to
4324 * log all fork events to their owner's logs.
4325 */
4326
4327 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
4328 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
4329 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
4330
4331 if (!is_using_hwpmcs)
4332 return;
4333
4334 PMC_GET_SX_XLOCK();
4335 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
4336 p1->p_pid, p1->p_comm, newproc);
4337
4338 /*
4339 * If the parent process (curthread->td_proc) is a
4340 * target of any PMCs, look for PMCs that are to be
4341 * inherited, and link these into the new process
4342 * descriptor.
4343 */
4344 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
4345 PMC_FLAG_NONE)) == NULL)
4346 goto done; /* nothing to do */
4347
4348 do_descendants = 0;
4349 for (ri = 0; ri < md->pmd_npmc; ri++)
4350 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
4351 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
4352 if (do_descendants == 0) /* nothing to do */
4353 goto done;
4354
4355 /* allocate a descriptor for the new process */
4356 if ((ppnew = pmc_find_process_descriptor(newproc,
4357 PMC_FLAG_ALLOCATE)) == NULL)
4358 goto done;
4359
4360 /*
4361 * Run through all PMCs that were targeting the old process
4362 * and which specified F_DESCENDANTS and attach them to the
4363 * new process.
4364 *
4365 * Log the fork event to all owners of PMCs attached to this
4366 * process, if not already logged.
4367 */
4368 for (ri = 0; ri < md->pmd_npmc; ri++)
4369 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
4370 (pm->pm_flags & PMC_F_DESCENDANTS)) {
4371 pmc_link_target_process(pm, ppnew);
4372 po = pm->pm_owner;
4373 if (po->po_sscount == 0 &&
4374 po->po_flags & PMC_PO_OWNS_LOGFILE)
4375 pmclog_process_procfork(po, p1->p_pid,
4376 newproc->p_pid);
4377 }
4378
4379 /*
4380 * Now mark the new process as being tracked by this driver.
4381 */
4382 PROC_LOCK(newproc);
4383 newproc->p_flag |= P_HWPMC;
4384 PROC_UNLOCK(newproc);
4385
4386 done:
4387 sx_xunlock(&pmc_sx);
4388}
4389
4390
4391/*
4392 * initialization
4393 */
4394
4395static const char *pmc_name_of_pmcclass[] = {
4396#undef __PMC_CLASS
4397#define __PMC_CLASS(N) #N ,
4398 __PMC_CLASSES()
4399};
4400
4401static int
4402pmc_initialize(void)
4403{
4404 int c, cpu, error, n, ri;
4405 unsigned int maxcpu;
4406 struct pmc_binding pb;
4407 struct pmc_sample *ps;
4408 struct pmc_classdep *pcd;
4409 struct pmc_samplebuffer *sb;
4410
4411 md = NULL;
4412 error = 0;
4413
4414#ifdef DEBUG
4415 /* parse debug flags first */
4416 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
4417 pmc_debugstr, sizeof(pmc_debugstr)))
4418 pmc_debugflags_parse(pmc_debugstr,
4419 pmc_debugstr+strlen(pmc_debugstr));
4420#endif
4421
4422 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
4423
4424 /* check kernel version */
4425 if (pmc_kernel_version != PMC_VERSION) {
4426 if (pmc_kernel_version == 0)
4427 printf("hwpmc: this kernel has not been compiled with "
4428 "'options HWPMC_HOOKS'.\n");
4429 else
4430 printf("hwpmc: kernel version (0x%x) does not match "
4431 "module version (0x%x).\n", pmc_kernel_version,
4432 PMC_VERSION);
4433 return EPROGMISMATCH;
4434 }
4435
4436 /*
4437 * check sysctl parameters
4438 */
4439
4440 if (pmc_hashsize <= 0) {
4441 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
4442 "greater than zero.\n", pmc_hashsize);
4443 pmc_hashsize = PMC_HASH_SIZE;
4444 }
4445
4446 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
4447 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
4448 "range.\n", pmc_nsamples);
4449 pmc_nsamples = PMC_NSAMPLES;
4450 }
4451
4452 if (pmc_callchaindepth <= 0 ||
4453 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
4454 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
4455 "range.\n", pmc_callchaindepth);
4456 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
4457 }
4458
4459 md = pmc_md_initialize();
4460
4461 if (md == NULL)
4462 return (ENOSYS);
4463
4464 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
4465 ("[pmc,%d] no classes or pmcs", __LINE__));
4466
4467 /* Compute the map from row-indices to classdep pointers. */
4468 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
4469 md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
4470
4471 for (n = 0; n < md->pmd_npmc; n++)
4472 pmc_rowindex_to_classdep[n] = NULL;
4473 for (ri = c = 0; c < md->pmd_nclass; c++) {
4474 pcd = &md->pmd_classdep[c];
4475 for (n = 0; n < pcd->pcd_num; n++, ri++)
4476 pmc_rowindex_to_classdep[ri] = pcd;
4477 }
4478
4479 KASSERT(ri == md->pmd_npmc,
4480 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
4481 ri, md->pmd_npmc));
4482
4483 maxcpu = pmc_cpu_max();
4484
4485 /* allocate space for the per-cpu array */
4486 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
4487 M_WAITOK|M_ZERO);
4488
4489 /* per-cpu 'saved values' for managing process-mode PMCs */
4490 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
4491 M_PMC, M_WAITOK);
4492
4493 /* Perform CPU-dependent initialization. */
4494 pmc_save_cpu_binding(&pb);
4495 error = 0;
4496 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
4497 if (!pmc_cpu_is_active(cpu))
4498 continue;
4499 pmc_select_cpu(cpu);
4500 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
4501 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
4502 M_WAITOK|M_ZERO);
4503 if (md->pmd_pcpu_init)
4504 error = md->pmd_pcpu_init(md, cpu);
4505 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
4506 error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
4507 }
4508 pmc_restore_cpu_binding(&pb);
4509
4510 if (error)
4511 return (error);
4512
4513 /* allocate space for the sample array */
4514 for (cpu = 0; cpu < maxcpu; cpu++) {
4515 if (!pmc_cpu_is_active(cpu))
4516 continue;
4517
4518 sb = malloc(sizeof(struct pmc_samplebuffer) +
4519 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
4520 M_WAITOK|M_ZERO);
4521 sb->ps_read = sb->ps_write = sb->ps_samples;
4522 sb->ps_fence = sb->ps_samples + pmc_nsamples;
4523
4524 KASSERT(pmc_pcpu[cpu] != NULL,
4525 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
4526
4527 sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
4528 sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
4529
4530 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
4531 ps->ps_pc = sb->ps_callchains +
4532 (n * pmc_callchaindepth);
4533
4534 pmc_pcpu[cpu]->pc_sb = sb;
4535 }
4536
4537 /* allocate space for the row disposition array */
4538 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
4539 M_PMC, M_WAITOK|M_ZERO);
4540
4541 KASSERT(pmc_pmcdisp != NULL,
4542 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
4543
4544 /* mark all PMCs as available */
4545 for (n = 0; n < (int) md->pmd_npmc; n++)
4546 PMC_MARK_ROW_FREE(n);
4547
4548 /* allocate thread hash tables */
4549 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
4550 &pmc_ownerhashmask);
4551
4552 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
4553 &pmc_processhashmask);
4554 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
4555 MTX_SPIN);
4556
4557 LIST_INIT(&pmc_ss_owners);
4558 pmc_ss_count = 0;
4559
4560 /* allocate a pool of spin mutexes */
4561 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
4562 MTX_SPIN);
4563
4564 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4565 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4566 pmc_processhash, pmc_processhashmask);
4567
4568 /* register process {exit,fork,exec} handlers */
4569 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4570 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4571 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4572 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4573
4574 /* initialize logging */
4575 pmclog_initialize();
4576
4577 /* set hook functions */
4578 pmc_intr = md->pmd_intr;
4579 pmc_hook = pmc_hook_handler;
4580
4581 if (error == 0) {
4582 printf(PMC_MODULE_NAME ":");
4583 for (n = 0; n < (int) md->pmd_nclass; n++) {
4584 pcd = &md->pmd_classdep[n];
4585 printf(" %s/%d/%d/0x%b",
4586 pmc_name_of_pmcclass[pcd->pcd_class],
4587 pcd->pcd_num,
4588 pcd->pcd_width,
4589 pcd->pcd_caps,
4590 "\20"
4591 "\1INT\2USR\3SYS\4EDG\5THR"
4592 "\6REA\7WRI\10INV\11QUA\12PRC"
4593 "\13TAG\14CSC");
4594 }
4595 printf("\n");
4596 }
4597
4598 return (error);
4599}
4600
4601/* prepare to be unloaded */
4602static void
4603pmc_cleanup(void)
4604{
4605 int c, cpu;
4606 unsigned int maxcpu;
4607 struct pmc_ownerhash *ph;
4608 struct pmc_owner *po, *tmp;
4609 struct pmc_binding pb;
4610#ifdef DEBUG
4611 struct pmc_processhash *prh;
4612#endif
4613
4614 PMCDBG(MOD,INI,0, "%s", "cleanup");
4615
4616 /* switch off sampling */
4617 atomic_store_rel_int(&pmc_cpumask, 0);
4618 pmc_intr = NULL;
4619
4620 sx_xlock(&pmc_sx);
4621 if (pmc_hook == NULL) { /* being unloaded already */
4622 sx_xunlock(&pmc_sx);
4623 return;
4624 }
4625
4626 pmc_hook = NULL; /* prevent new threads from entering module */
4627
4628 /* deregister event handlers */
4629 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4630 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4631
4632 /* send SIGBUS to all owner threads, free up allocations */
4633 if (pmc_ownerhash)
4634 for (ph = pmc_ownerhash;
4635 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4636 ph++) {
4637 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4638 pmc_remove_owner(po);
4639
4640 /* send SIGBUS to owner processes */
4641 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4642 "(%d, %s)", po->po_owner,
4643 po->po_owner->p_pid,
4644 po->po_owner->p_comm);
4645
4646 PROC_LOCK(po->po_owner);
4647 psignal(po->po_owner, SIGBUS);
4648 PROC_UNLOCK(po->po_owner);
4649
4650 pmc_destroy_owner_descriptor(po);
4651 }
4652 }
4653
4654 /* reclaim allocated data structures */
4655 if (pmc_mtxpool)
4656 mtx_pool_destroy(&pmc_mtxpool);
4657
4658 mtx_destroy(&pmc_processhash_mtx);
4659 if (pmc_processhash) {
4660#ifdef DEBUG
4661 struct pmc_process *pp;
4662
4663 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4664 for (prh = pmc_processhash;
4665 prh <= &pmc_processhash[pmc_processhashmask];
4666 prh++)
4667 LIST_FOREACH(pp, prh, pp_next)
4668 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4669#endif
4670
4671 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4672 pmc_processhash = NULL;
4673 }
4674
4675 if (pmc_ownerhash) {
4676 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4677 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4678 pmc_ownerhash = NULL;
4679 }
4680
4681 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4682 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4683 KASSERT(pmc_ss_count == 0,
4684 ("[pmc,%d] Global SS count not empty", __LINE__));
4685
4686 /* do processor and pmc-class dependent cleanup */
4687 maxcpu = pmc_cpu_max();
4688
4689 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4690 if (md) {
4691 pmc_save_cpu_binding(&pb);
4692 for (cpu = 0; cpu < maxcpu; cpu++) {
4693 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4694 cpu, pmc_pcpu[cpu]);
4695 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
4696 continue;
4697 pmc_select_cpu(cpu);
4698 for (c = 0; c < md->pmd_nclass; c++)
4699 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
4700 if (md->pmd_pcpu_fini)
4701 md->pmd_pcpu_fini(md, cpu);
4702 }
4703
4704 pmc_md_finalize(md);
4705
4706 free(md, M_PMC);
4707 md = NULL;
4708 pmc_restore_cpu_binding(&pb);
4709 }
4710
4711 /* Free per-cpu descriptors. */
4712 for (cpu = 0; cpu < maxcpu; cpu++) {
4713 if (!pmc_cpu_is_active(cpu))
4714 continue;
4715 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL,
4716 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__,
4717 cpu));
4718 free(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC);
4719 free(pmc_pcpu[cpu]->pc_sb, M_PMC);
4720 free(pmc_pcpu[cpu], M_PMC);
4721 }
4722
4723 free(pmc_pcpu, M_PMC);
4724 pmc_pcpu = NULL;
4725
4726 free(pmc_pcpu_saved, M_PMC);
4727 pmc_pcpu_saved = NULL;
4728
4729 if (pmc_pmcdisp) {
4730 free(pmc_pmcdisp, M_PMC);
4731 pmc_pmcdisp = NULL;
4732 }
4733
4734 if (pmc_rowindex_to_classdep) {
4735 free(pmc_rowindex_to_classdep, M_PMC);
4736 pmc_rowindex_to_classdep = NULL;
4737 }
4738
4739 pmclog_shutdown();
4740
4741 sx_xunlock(&pmc_sx); /* we are done */
4742}
4743
4744/*
4745 * The function called at load/unload.
4746 */
4747
4748static int
4749load (struct module *module __unused, int cmd, void *arg __unused)
4750{
4751 int error;
4752
4753 error = 0;
4754
4755 switch (cmd) {
4756 case MOD_LOAD :
4757 /* initialize the subsystem */
4758 error = pmc_initialize();
4759 if (error != 0)
4760 break;
4761 PMCDBG(MOD,INI,1, "syscall=%d maxcpu=%d",
4762 pmc_syscall_num, pmc_cpu_max());
4763 break;
4764
4765
4766 case MOD_UNLOAD :
4767 case MOD_SHUTDOWN:
4768 pmc_cleanup();
4769 PMCDBG(MOD,INI,1, "%s", "unloaded");
4770 break;
4771
4772 default :
4773 error = EINVAL; /* XXX should panic(9) */
4774 break;
4775 }
4776
4777 return error;
4778}
4779
4780/* memory pool */
4781MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");