Deleted Added
full compact
hwpmc_mod.c (147708) hwpmc_mod.c (147867)
1/*-
2 * Copyright (c) 2003-2005 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2005 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_mod.c 147708 2005-06-30 19:01:26Z jkoshy $");
29__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_mod.c 147867 2005-07-09 17:29:36Z jkoshy $");
30
31#include <sys/param.h>
32#include <sys/eventhandler.h>
33#include <sys/jail.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/mutex.h>
41#include <sys/pmc.h>
42#include <sys/pmckern.h>
43#include <sys/pmclog.h>
44#include <sys/proc.h>
45#include <sys/queue.h>
46#include <sys/resourcevar.h>
47#include <sys/sched.h>
48#include <sys/signalvar.h>
49#include <sys/smp.h>
50#include <sys/sx.h>
51#include <sys/sysctl.h>
52#include <sys/sysent.h>
53#include <sys/systm.h>
54#include <sys/vnode.h>
55
56#include <machine/atomic.h>
57#include <machine/md_var.h>
58
59/*
60 * Types
61 */
62
63enum pmc_flags {
64 PMC_FLAG_NONE = 0x00, /* do nothing */
65 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
66 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
67};
68
69/*
70 * The offset in sysent where the syscall is allocated.
71 */
72
73static int pmc_syscall_num = NO_SYSCALL;
74struct pmc_cpu **pmc_pcpu; /* per-cpu state */
75pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
76
77#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
78
79struct mtx_pool *pmc_mtxpool;
80static int *pmc_pmcdisp; /* PMC row dispositions */
81
82#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
83#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
84#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
85
86#define PMC_MARK_ROW_FREE(R) do { \
87 pmc_pmcdisp[(R)] = 0; \
88} while (0)
89
90#define PMC_MARK_ROW_STANDALONE(R) do { \
91 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
92 __LINE__)); \
93 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
94 KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
95 "disposition error", __LINE__)); \
96} while (0)
97
98#define PMC_UNMARK_ROW_STANDALONE(R) do { \
99 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
100 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
101 __LINE__)); \
102} while (0)
103
104#define PMC_MARK_ROW_THREAD(R) do { \
105 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
106 __LINE__)); \
107 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
108} while (0)
109
110#define PMC_UNMARK_ROW_THREAD(R) do { \
111 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
112 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
113 __LINE__)); \
114} while (0)
115
116
117/* various event handlers */
118static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
119
120/* Module statistics */
121struct pmc_op_getdriverstats pmc_stats;
122
123/* Machine/processor dependent operations */
124struct pmc_mdep *md;
125
126/*
127 * Hash tables mapping owner processes and target threads to PMCs.
128 */
129
130struct mtx pmc_processhash_mtx; /* spin mutex */
131static u_long pmc_processhashmask;
132static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
133
134/*
135 * Hash table of PMC owner descriptors. This table is protected by
136 * the shared PMC "sx" lock.
137 */
138
139static u_long pmc_ownerhashmask;
140static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
141
142/*
143 * List of PMC owners with system-wide sampling PMCs.
144 */
145
146static LIST_HEAD(, pmc_owner) pmc_ss_owners;
147
148
149/*
150 * Prototypes
151 */
152
153#if DEBUG
154static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
155static int pmc_debugflags_parse(char *newstr, char *fence);
156#endif
157
158static int load(struct module *module, int cmd, void *arg);
159static int pmc_attach_process(struct proc *p, struct pmc *pm);
160static struct pmc *pmc_allocate_pmc_descriptor(void);
161static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
162static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
163static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
164 int cpu);
165static int pmc_can_attach(struct pmc *pm, struct proc *p);
166static void pmc_cleanup(void);
167static int pmc_detach_process(struct proc *p, struct pmc *pm);
168static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
169 int flags);
170static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
171static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
172static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
173static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
174 pmc_id_t pmc);
175static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
176 uint32_t mode);
177static void pmc_force_context_switch(void);
178static void pmc_link_target_process(struct pmc *pm,
179 struct pmc_process *pp);
180static void pmc_maybe_remove_owner(struct pmc_owner *po);
181static void pmc_process_csw_in(struct thread *td);
182static void pmc_process_csw_out(struct thread *td);
183static void pmc_process_exit(void *arg, struct proc *p);
184static void pmc_process_fork(void *arg, struct proc *p1,
185 struct proc *p2, int n);
186static void pmc_process_samples(int cpu);
187static void pmc_release_pmc_descriptor(struct pmc *pmc);
188static void pmc_remove_owner(struct pmc_owner *po);
189static void pmc_remove_process_descriptor(struct pmc_process *pp);
190static void pmc_restore_cpu_binding(struct pmc_binding *pb);
191static void pmc_save_cpu_binding(struct pmc_binding *pb);
192static void pmc_select_cpu(int cpu);
193static int pmc_start(struct pmc *pm);
194static int pmc_stop(struct pmc *pm);
195static int pmc_syscall_handler(struct thread *td, void *syscall_args);
196static void pmc_unlink_target_process(struct pmc *pmc,
197 struct pmc_process *pp);
198
199/*
200 * Kernel tunables and sysctl(8) interface.
201 */
202
203SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
204
205#if DEBUG
206struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
207char pmc_debugstr[PMC_DEBUG_STRSIZE];
208TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
209 sizeof(pmc_debugstr));
210SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
211 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
212 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
213#endif
214
215/*
216 * kern.hwpmc.hashrows -- determines the number of rows in the
217 * of the hash table used to look up threads
218 */
219
220static int pmc_hashsize = PMC_HASH_SIZE;
221TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
222SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
223 &pmc_hashsize, 0, "rows in hash tables");
224
225/*
226 * kern.hwpmc.nsamples --- number of PC samples per CPU
227 */
228
229static int pmc_nsamples = PMC_NSAMPLES;
230TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
231SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
232 &pmc_nsamples, 0, "number of PC samples per CPU");
233
234/*
235 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
236 */
237
238static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
239TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
240SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
241 &pmc_mtxpool_size, 0, "size of spin mutex pool");
242
243
244/*
245 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
246 * allocate system-wide PMCs.
247 *
248 * Allowing unprivileged processes to allocate system PMCs is convenient
249 * if system-wide measurements need to be taken concurrently with other
250 * per-process measurements. This feature is turned off by default.
251 */
252
253SYSCTL_DECL(_security_bsd);
254
255static int pmc_unprivileged_syspmcs = 0;
256TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
257SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
258 &pmc_unprivileged_syspmcs, 0,
259 "allow unprivileged process to allocate system PMCs");
260
261/*
262 * Hash function. Discard the lower 2 bits of the pointer since
263 * these are always zero for our uses. The hash multiplier is
264 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
265 */
266
267#if LONG_BIT == 64
268#define _PMC_HM 11400714819323198486u
269#elif LONG_BIT == 32
270#define _PMC_HM 2654435769u
271#else
272#error Must know the size of 'long' to compile
273#endif
274
275#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
276
277/*
278 * Syscall structures
279 */
280
281/* The `sysent' for the new syscall */
282static struct sysent pmc_sysent = {
283 2, /* sy_narg */
284 pmc_syscall_handler /* sy_call */
285};
286
287static struct syscall_module_data pmc_syscall_mod = {
288 load,
289 NULL,
290 &pmc_syscall_num,
291 &pmc_sysent,
292 { 0, NULL }
293};
294
295static moduledata_t pmc_mod = {
296 PMC_MODULE_NAME,
297 syscall_module_handler,
298 &pmc_syscall_mod
299};
300
301DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
302MODULE_VERSION(pmc, PMC_VERSION);
303
304#if DEBUG
305enum pmc_dbgparse_state {
306 PMCDS_WS, /* in whitespace */
307 PMCDS_MAJOR, /* seen a major keyword */
308 PMCDS_MINOR
309};
310
311static int
312pmc_debugflags_parse(char *newstr, char *fence)
313{
314 char c, *p, *q;
315 struct pmc_debugflags *tmpflags;
316 int error, found, *newbits, tmp;
317 size_t kwlen;
318
319 MALLOC(tmpflags, struct pmc_debugflags *, sizeof(*tmpflags),
320 M_PMC, M_WAITOK|M_ZERO);
321
322 p = newstr;
323 error = 0;
324
325 for (; p < fence && (c = *p); p++) {
326
327 /* skip white space */
328 if (c == ' ' || c == '\t')
329 continue;
330
331 /* look for a keyword followed by "=" */
332 for (q = p; p < fence && (c = *p) && c != '='; p++)
333 ;
334 if (c != '=') {
335 error = EINVAL;
336 goto done;
337 }
338
339 kwlen = p - q;
340 newbits = NULL;
341
342 /* lookup flag group name */
343#define DBG_SET_FLAG_MAJ(S,F) \
344 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
345 newbits = &tmpflags->pdb_ ## F;
346
347 DBG_SET_FLAG_MAJ("cpu", CPU);
348 DBG_SET_FLAG_MAJ("csw", CSW);
349 DBG_SET_FLAG_MAJ("logging", LOG);
350 DBG_SET_FLAG_MAJ("module", MOD);
351 DBG_SET_FLAG_MAJ("md", MDP);
352 DBG_SET_FLAG_MAJ("owner", OWN);
353 DBG_SET_FLAG_MAJ("pmc", PMC);
354 DBG_SET_FLAG_MAJ("process", PRC);
355 DBG_SET_FLAG_MAJ("sampling", SAM);
356
357 if (newbits == NULL) {
358 error = EINVAL;
359 goto done;
360 }
361
362 p++; /* skip the '=' */
363
364 /* Now parse the individual flags */
365 tmp = 0;
366 newflag:
367 for (q = p; p < fence && (c = *p); p++)
368 if (c == ' ' || c == '\t' || c == ',')
369 break;
370
371 /* p == fence or c == ws or c == "," or c == 0 */
372
373 if ((kwlen = p - q) == 0) {
374 *newbits = tmp;
375 continue;
376 }
377
378 found = 0;
379#define DBG_SET_FLAG_MIN(S,F) \
380 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
381 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
382
383 /* a '*' denotes all possible flags in the group */
384 if (kwlen == 1 && *q == '*')
385 tmp = found = ~0;
386 /* look for individual flag names */
387 DBG_SET_FLAG_MIN("allocaterow", ALR);
388 DBG_SET_FLAG_MIN("allocate", ALL);
389 DBG_SET_FLAG_MIN("attach", ATT);
390 DBG_SET_FLAG_MIN("bind", BND);
391 DBG_SET_FLAG_MIN("config", CFG);
392 DBG_SET_FLAG_MIN("exec", EXC);
393 DBG_SET_FLAG_MIN("exit", EXT);
394 DBG_SET_FLAG_MIN("find", FND);
395 DBG_SET_FLAG_MIN("flush", FLS);
396 DBG_SET_FLAG_MIN("fork", FRK);
397 DBG_SET_FLAG_MIN("getbuf", GTB);
398 DBG_SET_FLAG_MIN("hook", PMH);
399 DBG_SET_FLAG_MIN("init", INI);
400 DBG_SET_FLAG_MIN("intr", INT);
401 DBG_SET_FLAG_MIN("linktarget", TLK);
402 DBG_SET_FLAG_MIN("mayberemove", OMR);
403 DBG_SET_FLAG_MIN("ops", OPS);
404 DBG_SET_FLAG_MIN("read", REA);
405 DBG_SET_FLAG_MIN("register", REG);
406 DBG_SET_FLAG_MIN("release", REL);
407 DBG_SET_FLAG_MIN("remove", ORM);
408 DBG_SET_FLAG_MIN("sample", SAM);
409 DBG_SET_FLAG_MIN("scheduleio", SIO);
410 DBG_SET_FLAG_MIN("select", SEL);
411 DBG_SET_FLAG_MIN("signal", SIG);
412 DBG_SET_FLAG_MIN("swi", SWI);
413 DBG_SET_FLAG_MIN("swo", SWO);
414 DBG_SET_FLAG_MIN("start", STA);
415 DBG_SET_FLAG_MIN("stop", STO);
416 DBG_SET_FLAG_MIN("syscall", PMS);
417 DBG_SET_FLAG_MIN("unlinktarget", TUL);
418 DBG_SET_FLAG_MIN("write", WRI);
419 if (found == 0) {
420 /* unrecognized flag name */
421 error = EINVAL;
422 goto done;
423 }
424
425 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
426 *newbits = tmp;
427 continue;
428 }
429
430 p++;
431 goto newflag;
432 }
433
434 /* save the new flag set */
435 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
436
437 done:
438 FREE(tmpflags, M_PMC);
439 return error;
440}
441
442static int
443pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
444{
445 char *fence, *newstr;
446 int error;
447 unsigned int n;
448
449 (void) arg1; (void) arg2; /* unused parameters */
450
451 n = sizeof(pmc_debugstr);
452 MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
453 (void) strlcpy(newstr, pmc_debugstr, n);
454
455 error = sysctl_handle_string(oidp, newstr, n, req);
456
457 /* if there is a new string, parse and copy it */
458 if (error == 0 && req->newptr != NULL) {
459 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
460 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
461 (void) strlcpy(pmc_debugstr, newstr,
462 sizeof(pmc_debugstr));
463 }
464
465 FREE(newstr, M_PMC);
466
467 return error;
468}
469#endif
470
471/*
472 * Concurrency Control
473 *
474 * The driver manages the following data structures:
475 *
476 * - target process descriptors, one per target process
477 * - owner process descriptors (and attached lists), one per owner process
478 * - lookup hash tables for owner and target processes
479 * - PMC descriptors (and attached lists)
480 * - per-cpu hardware state
481 * - the 'hook' variable through which the kernel calls into
482 * this module
483 * - the machine hardware state (managed by the MD layer)
484 *
485 * These data structures are accessed from:
486 *
487 * - thread context-switch code
488 * - interrupt handlers (possibly on multiple cpus)
489 * - kernel threads on multiple cpus running on behalf of user
490 * processes doing system calls
491 * - this driver's private kernel threads
492 *
493 * = Locks and Locking strategy =
494 *
495 * The driver uses four locking strategies for its operation:
496 *
497 * - There is a 'global' SX lock "pmc_sx" that is used to protect
498 * the its 'meta-data'.
499 *
500 * Calls into the module (via syscall() or by the kernel) start with
501 * this lock being held in exclusive mode. Depending on the requested
502 * operation, the lock may be downgraded to 'shared' mode to allow
503 * more concurrent readers into the module.
504 *
505 * This SX lock is held in exclusive mode for any operations that
506 * modify the linkages between the driver's internal data structures.
507 *
508 * The 'pmc_hook' function pointer is also protected by this lock.
509 * It is only examined with the sx lock held in exclusive mode. The
510 * kernel module is allowed to be unloaded only with the sx lock
511 * held in exclusive mode. In normal syscall handling, after
512 * acquiring the pmc_sx lock we first check that 'pmc_hook' is
513 * non-null before proceeding. This prevents races between the
514 * thread unloading the module and other threads seeking to use the
515 * module.
516 *
517 * - Lookups of target process structures and owner process structures
518 * cannot use the global "pmc_sx" SX lock because these lookups need
519 * to happen during context switches and in other critical sections
520 * where sleeping is not allowed. We protect these lookup tables
521 * with their own private spin-mutexes, "pmc_processhash_mtx" and
522 * "pmc_ownerhash_mtx". These are 'leaf' mutexes, in that no other
523 * lock is acquired with these locks held.
524 *
525 * - Interrupt handlers work in a lock free manner. At interrupt
526 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
527 * when the PMC was started. If this pointer is NULL, the interrupt
528 * is ignored after updating driver statistics. We ensure that this
529 * pointer is set (using an atomic operation if necessary) before the
530 * PMC hardware is started. Conversely, this pointer is unset atomically
531 * only after the PMC hardware is stopped.
532 *
533 * We ensure that everything needed for the operation of an
534 * interrupt handler is available without it needing to acquire any
535 * locks. We also ensure that a PMC's software state is destroyed only
536 * after the PMC is taken off hardware (on all CPUs).
537 *
538 * - Context-switch handling with process-private PMCs needs more
539 * care.
540 *
541 * A given process may be the target of multiple PMCs. For example,
542 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
543 * while the target process is running on another. A PMC could also
544 * be getting released because its owner is exiting. We tackle
545 * these situations in the following manner:
546 *
547 * - each target process structure 'pmc_process' has an array
548 * of 'struct pmc *' pointers, one for each hardware PMC.
549 *
550 * - At context switch IN time, each "target" PMC in RUNNING state
551 * gets started on hardware and a pointer to each PMC is copied into
552 * the per-cpu phw array. The 'runcount' for the PMC is
553 * incremented.
554 *
555 * - At context switch OUT time, all process-virtual PMCs are stopped
556 * on hardware. The saved value is added to the PMCs value field
557 * only if the PMC is in a non-deleted state (the PMCs state could
558 * have changed during the current time slice).
559 *
560 * Note that since in-between a switch IN on a processor and a switch
561 * OUT, the PMC could have been released on another CPU. Therefore
562 * context switch OUT always looks at the hardware state to turn
563 * OFF PMCs and will update a PMC's saved value only if reachable
564 * from the target process record.
565 *
566 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
567 * be attached to many processes at the time of the call and could
568 * be active on multiple CPUs).
569 *
570 * We prevent further scheduling of the PMC by marking it as in
571 * state 'DELETED'. If the runcount of the PMC is non-zero then
572 * this PMC is currently running on a CPU somewhere. The thread
573 * doing the PMCRELEASE operation waits by repeatedly doing an
574 * tsleep() till the runcount comes to zero.
575 *
576 */
577
578/*
579 * save the cpu binding of the current kthread
580 */
581
582static void
583pmc_save_cpu_binding(struct pmc_binding *pb)
584{
585 PMCDBG(CPU,BND,2, "%s", "save-cpu");
586 mtx_lock_spin(&sched_lock);
587 pb->pb_bound = sched_is_bound(curthread);
588 pb->pb_cpu = curthread->td_oncpu;
589 mtx_unlock_spin(&sched_lock);
590 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
591}
592
593/*
594 * restore the cpu binding of the current thread
595 */
596
597static void
598pmc_restore_cpu_binding(struct pmc_binding *pb)
599{
600 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
601 curthread->td_oncpu, pb->pb_cpu);
602 mtx_lock_spin(&sched_lock);
603 if (pb->pb_bound)
604 sched_bind(curthread, pb->pb_cpu);
605 else
606 sched_unbind(curthread);
607 mtx_unlock_spin(&sched_lock);
608 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
609}
610
611/*
612 * move execution over the specified cpu and bind it there.
613 */
614
615static void
616pmc_select_cpu(int cpu)
617{
618 KASSERT(cpu >= 0 && cpu < mp_ncpus,
619 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
620
621 /* never move to a disabled CPU */
622 KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
623 "disabled CPU %d", __LINE__, cpu));
624
625 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
626 mtx_lock_spin(&sched_lock);
627 sched_bind(curthread, cpu);
628 mtx_unlock_spin(&sched_lock);
629
630 KASSERT(curthread->td_oncpu == cpu,
631 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
632 cpu, curthread->td_oncpu));
633
634 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
635}
636
637/*
638 * Force a context switch.
639 *
640 * We do this by tsleep'ing for 1 tick -- invoking mi_switch() is not
641 * guaranteed to force a context switch.
642 */
643
644static void
645pmc_force_context_switch(void)
646{
647 u_char curpri;
648
649 mtx_lock_spin(&sched_lock);
650 curpri = curthread->td_priority;
651 mtx_unlock_spin(&sched_lock);
652
653 (void) tsleep((void *) pmc_force_context_switch, curpri,
654 "pmcctx", 1);
655
656}
657
658/*
659 * Get the file name for an executable. This is a simple wrapper
660 * around vn_fullpath(9).
661 */
662
663static void
664pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
665{
666 struct thread *td;
667
668 td = curthread;
669 *fullpath = "unknown";
670 *freepath = NULL;
671 vn_lock(v, LK_EXCLUSIVE | LK_RETRY, td);
672 vn_fullpath(td, v, fullpath, freepath);
673 VOP_UNLOCK(v, 0, td);
674}
675
676/*
677 * remove an process owning PMCs
678 */
679
680void
681pmc_remove_owner(struct pmc_owner *po)
682{
683 struct pmc *pm, *tmp;
684
685 sx_assert(&pmc_sx, SX_XLOCKED);
686
687 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
688
689 /* Remove descriptor from the owner hash table */
690 LIST_REMOVE(po, po_next);
691
692 /* release all owned PMC descriptors */
693 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
694 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
695 KASSERT(pm->pm_owner == po,
696 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
697
698 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
699 }
700
701 KASSERT(po->po_sscount == 0,
702 ("[pmc,%d] SS count not zero", __LINE__));
703 KASSERT(LIST_EMPTY(&po->po_pmcs),
704 ("[pmc,%d] PMC list not empty", __LINE__));
705
706 /* de-configure the log file if present */
707 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
708 pmclog_deconfigure_log(po);
709}
710
711/*
712 * remove an owner process record if all conditions are met.
713 */
714
715static void
716pmc_maybe_remove_owner(struct pmc_owner *po)
717{
718
719 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
720
721 /*
722 * Remove owner record if
723 * - this process does not own any PMCs
724 * - this process has not allocated a system-wide sampling buffer
725 */
726
727 if (LIST_EMPTY(&po->po_pmcs) &&
728 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
729 pmc_remove_owner(po);
730 pmc_destroy_owner_descriptor(po);
731 }
732}
733
734/*
735 * Add an association between a target process and a PMC.
736 */
737
738static void
739pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
740{
741 int ri;
742 struct pmc_target *pt;
743
744 sx_assert(&pmc_sx, SX_XLOCKED);
745
746 KASSERT(pm != NULL && pp != NULL,
747 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
748 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
749 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
750 __LINE__, pm, pp->pp_proc->p_pid));
751 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
752 ("[pmc,%d] Illegal reference count %d for process record %p",
753 __LINE__, pp->pp_refcnt, (void *) pp));
754
755 ri = PMC_TO_ROWINDEX(pm);
756
757 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
758 pm, ri, pp);
759
760#if DEBUG
761 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
762 if (pt->pt_process == pp)
763 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
764 __LINE__, pp, pm));
765#endif
766
767 MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
768 M_PMC, M_ZERO|M_WAITOK);
769
770 pt->pt_process = pp;
771
772 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
773
774 atomic_store_rel_ptr(&pp->pp_pmcs[ri].pp_pmc, pm);
775
776 if (pm->pm_owner->po_owner == pp->pp_proc)
777 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
778
779 /*
780 * Initialize the per-process values at this row index.
781 */
782 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
783 pm->pm_sc.pm_reloadcount : 0;
784
785 pp->pp_refcnt++;
786
787}
788
789/*
790 * Removes the association between a target process and a PMC.
791 */
792
793static void
794pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
795{
796 int ri;
797 struct proc *p;
798 struct pmc_target *ptgt;
799
800 sx_assert(&pmc_sx, SX_XLOCKED);
801
802 KASSERT(pm != NULL && pp != NULL,
803 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
804
805 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
806 ("[pmc,%d] Illegal ref count %d on process record %p",
807 __LINE__, pp->pp_refcnt, (void *) pp));
808
809 ri = PMC_TO_ROWINDEX(pm);
810
811 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
812 pm, ri, pp);
813
814 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
815 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
816 ri, pm, pp->pp_pmcs[ri].pp_pmc));
817
818 pp->pp_pmcs[ri].pp_pmc = NULL;
819 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
820
821 /* Remove owner-specific flags */
822 if (pm->pm_owner->po_owner == pp->pp_proc) {
823 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
824 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
825 }
826
827 pp->pp_refcnt--;
828
829 /* Remove the target process from the PMC structure */
830 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
831 if (ptgt->pt_process == pp)
832 break;
833
834 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
835 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
836
837 LIST_REMOVE(ptgt, pt_next);
838 FREE(ptgt, M_PMC);
839
840 /* if the PMC now lacks targets, send the owner a SIGIO */
841 if (LIST_EMPTY(&pm->pm_targets)) {
842 p = pm->pm_owner->po_owner;
843 PROC_LOCK(p);
844 psignal(p, SIGIO);
845 PROC_UNLOCK(p);
846
847 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
848 SIGIO);
849 }
850}
851
852/*
853 * Check if PMC 'pm' may be attached to target process 't'.
854 */
855
856static int
857pmc_can_attach(struct pmc *pm, struct proc *t)
858{
859 struct proc *o; /* pmc owner */
860 struct ucred *oc, *tc; /* owner, target credentials */
861 int decline_attach, i;
862
863 /*
864 * A PMC's owner can always attach that PMC to itself.
865 */
866
867 if ((o = pm->pm_owner->po_owner) == t)
868 return 0;
869
870 PROC_LOCK(o);
871 oc = o->p_ucred;
872 crhold(oc);
873 PROC_UNLOCK(o);
874
875 PROC_LOCK(t);
876 tc = t->p_ucred;
877 crhold(tc);
878 PROC_UNLOCK(t);
879
880 /*
881 * The effective uid of the PMC owner should match at least one
882 * of the {effective,real,saved} uids of the target process.
883 */
884
885 decline_attach = oc->cr_uid != tc->cr_uid &&
886 oc->cr_uid != tc->cr_svuid &&
887 oc->cr_uid != tc->cr_ruid;
888
889 /*
890 * Every one of the target's group ids, must be in the owner's
891 * group list.
892 */
893 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
894 decline_attach = !groupmember(tc->cr_groups[i], oc);
895
896 /* check the read and saved gids too */
897 if (decline_attach == 0)
898 decline_attach = !groupmember(tc->cr_rgid, oc) ||
899 !groupmember(tc->cr_svgid, oc);
900
901 crfree(tc);
902 crfree(oc);
903
904 return !decline_attach;
905}
906
907/*
908 * Attach a process to a PMC.
909 */
910
911static int
912pmc_attach_one_process(struct proc *p, struct pmc *pm)
913{
914 int ri;
915 char *fullpath, *freepath;
916 struct pmc_process *pp;
917
918 sx_assert(&pmc_sx, SX_XLOCKED);
919
920 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
921 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
922
923 /*
924 * Locate the process descriptor corresponding to process 'p',
925 * allocating space as needed.
926 *
927 * Verify that rowindex 'pm_rowindex' is free in the process
928 * descriptor.
929 *
930 * If not, allocate space for a descriptor and link the
931 * process descriptor and PMC.
932 */
933 ri = PMC_TO_ROWINDEX(pm);
934
935 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
936 return ENOMEM;
937
938 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
939 return EEXIST;
940
941 if (pp->pp_pmcs[ri].pp_pmc != NULL)
942 return EBUSY;
943
944 pmc_link_target_process(pm, pp);
945
946 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
947 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
948 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
949
950 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
951
952 /* issue an attach event to a configured log file */
953 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
954 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
955 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
956 if (freepath)
957 FREE(freepath, M_TEMP);
958 }
959 /* mark process as using HWPMCs */
960 PROC_LOCK(p);
961 p->p_flag |= P_HWPMC;
962 PROC_UNLOCK(p);
963
964 return 0;
965}
966
967/*
968 * Attach a process and optionally its children
969 */
970
971static int
972pmc_attach_process(struct proc *p, struct pmc *pm)
973{
974 int error;
975 struct proc *top;
976
977 sx_assert(&pmc_sx, SX_XLOCKED);
978
979 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
980 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
981
982
983 /*
984 * If this PMC successfully allowed a GETMSR operation
985 * in the past, disallow further ATTACHes.
986 */
987
988 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
989 return EPERM;
990
991 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
992 return pmc_attach_one_process(p, pm);
993
994 /*
995 * Traverse all child processes, attaching them to
996 * this PMC.
997 */
998
999 sx_slock(&proctree_lock);
1000
1001 top = p;
1002
1003 for (;;) {
1004 if ((error = pmc_attach_one_process(p, pm)) != 0)
1005 break;
1006 if (!LIST_EMPTY(&p->p_children))
1007 p = LIST_FIRST(&p->p_children);
1008 else for (;;) {
1009 if (p == top)
1010 goto done;
1011 if (LIST_NEXT(p, p_sibling)) {
1012 p = LIST_NEXT(p, p_sibling);
1013 break;
1014 }
1015 p = p->p_pptr;
1016 }
1017 }
1018
1019 if (error)
1020 (void) pmc_detach_process(top, pm);
1021
1022 done:
1023 sx_sunlock(&proctree_lock);
1024 return error;
1025}
1026
1027/*
1028 * Detach a process from a PMC. If there are no other PMCs tracking
1029 * this process, remove the process structure from its hash table. If
1030 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1031 */
1032
1033static int
1034pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1035{
1036 int ri;
1037 struct pmc_process *pp;
1038
1039 sx_assert(&pmc_sx, SX_XLOCKED);
1040
1041 KASSERT(pm != NULL,
1042 ("[pmc,%d] null pm pointer", __LINE__));
1043
1044 ri = PMC_TO_ROWINDEX(pm);
1045
1046 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1047 pm, ri, p, p->p_pid, p->p_comm, flags);
1048
1049 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1050 return ESRCH;
1051
1052 if (pp->pp_pmcs[ri].pp_pmc != pm)
1053 return EINVAL;
1054
1055 pmc_unlink_target_process(pm, pp);
1056
1057 /* Issue a detach entry if a log file is configured */
1058 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1059 pmclog_process_pmcdetach(pm, p->p_pid);
1060
1061 /*
1062 * If there are no PMCs targetting this process, we remove its
1063 * descriptor from the target hash table and unset the P_HWPMC
1064 * flag in the struct proc.
1065 */
1066 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1067 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1068 __LINE__, pp->pp_refcnt, pp));
1069
1070 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1071 return 0;
1072
1073 pmc_remove_process_descriptor(pp);
1074
1075 if (flags & PMC_FLAG_REMOVE)
1076 FREE(pp, M_PMC);
1077
1078 PROC_LOCK(p);
1079 p->p_flag &= ~P_HWPMC;
1080 PROC_UNLOCK(p);
1081
1082 return 0;
1083}
1084
1085/*
1086 * Detach a process and optionally its descendants from a PMC.
1087 */
1088
1089static int
1090pmc_detach_process(struct proc *p, struct pmc *pm)
1091{
1092 struct proc *top;
1093
1094 sx_assert(&pmc_sx, SX_XLOCKED);
1095
1096 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1097 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1098
1099 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1100 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1101
1102 /*
1103 * Traverse all children, detaching them from this PMC. We
1104 * ignore errors since we could be detaching a PMC from a
1105 * partially attached proc tree.
1106 */
1107
1108 sx_slock(&proctree_lock);
1109
1110 top = p;
1111
1112 for (;;) {
1113 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1114
1115 if (!LIST_EMPTY(&p->p_children))
1116 p = LIST_FIRST(&p->p_children);
1117 else for (;;) {
1118 if (p == top)
1119 goto done;
1120 if (LIST_NEXT(p, p_sibling)) {
1121 p = LIST_NEXT(p, p_sibling);
1122 break;
1123 }
1124 p = p->p_pptr;
1125 }
1126 }
1127
1128 done:
1129 sx_sunlock(&proctree_lock);
1130
1131 if (LIST_EMPTY(&pm->pm_targets))
1132 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1133
1134 return 0;
1135}
1136
1137
1138/*
1139 * Thread context switch IN
1140 */
1141
1142static void
1143pmc_process_csw_in(struct thread *td)
1144{
1145 int cpu;
1146 unsigned int ri;
1147 struct pmc *pm;
1148 struct proc *p;
1149 struct pmc_cpu *pc;
1150 struct pmc_hw *phw;
1151 struct pmc_process *pp;
1152 pmc_value_t newvalue;
1153
1154 p = td->td_proc;
1155
1156 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1157 return;
1158
1159 KASSERT(pp->pp_proc == td->td_proc,
1160 ("[pmc,%d] not my thread state", __LINE__));
1161
1162 critical_enter(); /* no preemption from this point */
1163
1164 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1165
1166 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1167 p->p_pid, p->p_comm, pp);
1168
1169 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1170 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1171
1172 pc = pmc_pcpu[cpu];
1173
1174 for (ri = 0; ri < md->pmd_npmc; ri++) {
1175
1176 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1177 continue;
1178
1179 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1180 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1181 __LINE__, PMC_TO_MODE(pm)));
1182
1183 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1184 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1185 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1186
1187 /*
1188 * Only PMCs that are marked as 'RUNNING' need
1189 * be placed on hardware.
1190 */
1191
1192 if (pm->pm_state != PMC_STATE_RUNNING)
1193 continue;
1194
1195 /* increment PMC runcount */
1196 atomic_add_rel_32(&pm->pm_runcount, 1);
1197
1198 /* configure the HWPMC we are going to use. */
1199 md->pmd_config_pmc(cpu, ri, pm);
1200
1201 phw = pc->pc_hwpmcs[ri];
1202
1203 KASSERT(phw != NULL,
1204 ("[pmc,%d] null hw pointer", __LINE__));
1205
1206 KASSERT(phw->phw_pmc == pm,
1207 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1208 phw->phw_pmc, pm));
1209
1210 /*
1211 * Write out saved value and start the PMC.
1212 *
1213 * Sampling PMCs use a per-process value, while
1214 * counting mode PMCs use a per-pmc value that is
1215 * inherited across descendants.
1216 */
1217 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1218 mtx_pool_lock_spin(pmc_mtxpool, pm);
1219 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1220 pp->pp_pmcs[ri].pp_pmcval;
1221 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1222 } else {
1223 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1224 ("[pmc,%d] illegal mode=%d", __LINE__,
1225 PMC_TO_MODE(pm)));
1226 mtx_pool_lock_spin(pmc_mtxpool, pm);
1227 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1228 pm->pm_gv.pm_savedvalue;
1229 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1230 }
1231
1232 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1233
1234 md->pmd_write_pmc(cpu, ri, newvalue);
1235 md->pmd_start_pmc(cpu, ri);
1236 }
1237
1238 /*
1239 * perform any other architecture/cpu dependent thread
1240 * switch-in actions.
1241 */
1242
1243 (void) (*md->pmd_switch_in)(pc, pp);
1244
1245 critical_exit();
1246
1247}
1248
1249/*
1250 * Thread context switch OUT.
1251 */
1252
1253static void
1254pmc_process_csw_out(struct thread *td)
1255{
1256 int cpu;
1257 enum pmc_mode mode;
1258 unsigned int ri;
1259 struct pmc *pm;
1260 struct proc *p;
1261 struct pmc_cpu *pc;
1262 struct pmc_process *pp;
1263 int64_t tmp;
1264 pmc_value_t newvalue;
1265
1266 /*
1267 * Locate our process descriptor; this may be NULL if
1268 * this process is exiting and we have already removed
1269 * the process from the target process table.
1270 *
1271 * Note that due to kernel preemption, multiple
1272 * context switches may happen while the process is
1273 * exiting.
1274 *
1275 * Note also that if the target process cannot be
1276 * found we still need to deconfigure any PMCs that
1277 * are currently running on hardware.
1278 */
1279
1280 p = td->td_proc;
1281 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1282
1283 /*
1284 * save PMCs
1285 */
1286
1287 critical_enter();
1288
1289 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1290
1291 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1292 p->p_pid, p->p_comm, pp);
1293
1294 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1295 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1296
1297 pc = pmc_pcpu[cpu];
1298
1299 /*
1300 * When a PMC gets unlinked from a target PMC, it will
1301 * be removed from the target's pp_pmc[] array.
1302 *
1303 * However, on a MP system, the target could have been
1304 * executing on another CPU at the time of the unlink.
1305 * So, at context switch OUT time, we need to look at
1306 * the hardware to determine if a PMC is scheduled on
1307 * it.
1308 */
1309
1310 for (ri = 0; ri < md->pmd_npmc; ri++) {
1311
1312 pm = NULL;
1313 (void) (*md->pmd_get_config)(cpu, ri, &pm);
1314
1315 if (pm == NULL) /* nothing at this row index */
1316 continue;
1317
1318 mode = PMC_TO_MODE(pm);
1319 if (!PMC_IS_VIRTUAL_MODE(mode))
1320 continue; /* not a process virtual PMC */
1321
1322 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1323 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1324 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1325
1326 /* Stop hardware if not already stopped */
30
31#include <sys/param.h>
32#include <sys/eventhandler.h>
33#include <sys/jail.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/mutex.h>
41#include <sys/pmc.h>
42#include <sys/pmckern.h>
43#include <sys/pmclog.h>
44#include <sys/proc.h>
45#include <sys/queue.h>
46#include <sys/resourcevar.h>
47#include <sys/sched.h>
48#include <sys/signalvar.h>
49#include <sys/smp.h>
50#include <sys/sx.h>
51#include <sys/sysctl.h>
52#include <sys/sysent.h>
53#include <sys/systm.h>
54#include <sys/vnode.h>
55
56#include <machine/atomic.h>
57#include <machine/md_var.h>
58
59/*
60 * Types
61 */
62
63enum pmc_flags {
64 PMC_FLAG_NONE = 0x00, /* do nothing */
65 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
66 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
67};
68
69/*
70 * The offset in sysent where the syscall is allocated.
71 */
72
73static int pmc_syscall_num = NO_SYSCALL;
74struct pmc_cpu **pmc_pcpu; /* per-cpu state */
75pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
76
77#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
78
79struct mtx_pool *pmc_mtxpool;
80static int *pmc_pmcdisp; /* PMC row dispositions */
81
82#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
83#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
84#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
85
86#define PMC_MARK_ROW_FREE(R) do { \
87 pmc_pmcdisp[(R)] = 0; \
88} while (0)
89
90#define PMC_MARK_ROW_STANDALONE(R) do { \
91 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
92 __LINE__)); \
93 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
94 KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \
95 "disposition error", __LINE__)); \
96} while (0)
97
98#define PMC_UNMARK_ROW_STANDALONE(R) do { \
99 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
100 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
101 __LINE__)); \
102} while (0)
103
104#define PMC_MARK_ROW_THREAD(R) do { \
105 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
106 __LINE__)); \
107 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
108} while (0)
109
110#define PMC_UNMARK_ROW_THREAD(R) do { \
111 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
112 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
113 __LINE__)); \
114} while (0)
115
116
117/* various event handlers */
118static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
119
120/* Module statistics */
121struct pmc_op_getdriverstats pmc_stats;
122
123/* Machine/processor dependent operations */
124struct pmc_mdep *md;
125
126/*
127 * Hash tables mapping owner processes and target threads to PMCs.
128 */
129
130struct mtx pmc_processhash_mtx; /* spin mutex */
131static u_long pmc_processhashmask;
132static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
133
134/*
135 * Hash table of PMC owner descriptors. This table is protected by
136 * the shared PMC "sx" lock.
137 */
138
139static u_long pmc_ownerhashmask;
140static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
141
142/*
143 * List of PMC owners with system-wide sampling PMCs.
144 */
145
146static LIST_HEAD(, pmc_owner) pmc_ss_owners;
147
148
149/*
150 * Prototypes
151 */
152
153#if DEBUG
154static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
155static int pmc_debugflags_parse(char *newstr, char *fence);
156#endif
157
158static int load(struct module *module, int cmd, void *arg);
159static int pmc_attach_process(struct proc *p, struct pmc *pm);
160static struct pmc *pmc_allocate_pmc_descriptor(void);
161static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
162static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
163static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
164 int cpu);
165static int pmc_can_attach(struct pmc *pm, struct proc *p);
166static void pmc_cleanup(void);
167static int pmc_detach_process(struct proc *p, struct pmc *pm);
168static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
169 int flags);
170static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
171static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
172static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
173static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
174 pmc_id_t pmc);
175static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
176 uint32_t mode);
177static void pmc_force_context_switch(void);
178static void pmc_link_target_process(struct pmc *pm,
179 struct pmc_process *pp);
180static void pmc_maybe_remove_owner(struct pmc_owner *po);
181static void pmc_process_csw_in(struct thread *td);
182static void pmc_process_csw_out(struct thread *td);
183static void pmc_process_exit(void *arg, struct proc *p);
184static void pmc_process_fork(void *arg, struct proc *p1,
185 struct proc *p2, int n);
186static void pmc_process_samples(int cpu);
187static void pmc_release_pmc_descriptor(struct pmc *pmc);
188static void pmc_remove_owner(struct pmc_owner *po);
189static void pmc_remove_process_descriptor(struct pmc_process *pp);
190static void pmc_restore_cpu_binding(struct pmc_binding *pb);
191static void pmc_save_cpu_binding(struct pmc_binding *pb);
192static void pmc_select_cpu(int cpu);
193static int pmc_start(struct pmc *pm);
194static int pmc_stop(struct pmc *pm);
195static int pmc_syscall_handler(struct thread *td, void *syscall_args);
196static void pmc_unlink_target_process(struct pmc *pmc,
197 struct pmc_process *pp);
198
199/*
200 * Kernel tunables and sysctl(8) interface.
201 */
202
203SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
204
205#if DEBUG
206struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
207char pmc_debugstr[PMC_DEBUG_STRSIZE];
208TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
209 sizeof(pmc_debugstr));
210SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
211 CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
212 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
213#endif
214
215/*
216 * kern.hwpmc.hashrows -- determines the number of rows in the
217 * of the hash table used to look up threads
218 */
219
220static int pmc_hashsize = PMC_HASH_SIZE;
221TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
222SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
223 &pmc_hashsize, 0, "rows in hash tables");
224
225/*
226 * kern.hwpmc.nsamples --- number of PC samples per CPU
227 */
228
229static int pmc_nsamples = PMC_NSAMPLES;
230TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
231SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
232 &pmc_nsamples, 0, "number of PC samples per CPU");
233
234/*
235 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
236 */
237
238static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
239TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
240SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
241 &pmc_mtxpool_size, 0, "size of spin mutex pool");
242
243
244/*
245 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
246 * allocate system-wide PMCs.
247 *
248 * Allowing unprivileged processes to allocate system PMCs is convenient
249 * if system-wide measurements need to be taken concurrently with other
250 * per-process measurements. This feature is turned off by default.
251 */
252
253SYSCTL_DECL(_security_bsd);
254
255static int pmc_unprivileged_syspmcs = 0;
256TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
257SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
258 &pmc_unprivileged_syspmcs, 0,
259 "allow unprivileged process to allocate system PMCs");
260
261/*
262 * Hash function. Discard the lower 2 bits of the pointer since
263 * these are always zero for our uses. The hash multiplier is
264 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
265 */
266
267#if LONG_BIT == 64
268#define _PMC_HM 11400714819323198486u
269#elif LONG_BIT == 32
270#define _PMC_HM 2654435769u
271#else
272#error Must know the size of 'long' to compile
273#endif
274
275#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
276
277/*
278 * Syscall structures
279 */
280
281/* The `sysent' for the new syscall */
282static struct sysent pmc_sysent = {
283 2, /* sy_narg */
284 pmc_syscall_handler /* sy_call */
285};
286
287static struct syscall_module_data pmc_syscall_mod = {
288 load,
289 NULL,
290 &pmc_syscall_num,
291 &pmc_sysent,
292 { 0, NULL }
293};
294
295static moduledata_t pmc_mod = {
296 PMC_MODULE_NAME,
297 syscall_module_handler,
298 &pmc_syscall_mod
299};
300
301DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
302MODULE_VERSION(pmc, PMC_VERSION);
303
304#if DEBUG
305enum pmc_dbgparse_state {
306 PMCDS_WS, /* in whitespace */
307 PMCDS_MAJOR, /* seen a major keyword */
308 PMCDS_MINOR
309};
310
311static int
312pmc_debugflags_parse(char *newstr, char *fence)
313{
314 char c, *p, *q;
315 struct pmc_debugflags *tmpflags;
316 int error, found, *newbits, tmp;
317 size_t kwlen;
318
319 MALLOC(tmpflags, struct pmc_debugflags *, sizeof(*tmpflags),
320 M_PMC, M_WAITOK|M_ZERO);
321
322 p = newstr;
323 error = 0;
324
325 for (; p < fence && (c = *p); p++) {
326
327 /* skip white space */
328 if (c == ' ' || c == '\t')
329 continue;
330
331 /* look for a keyword followed by "=" */
332 for (q = p; p < fence && (c = *p) && c != '='; p++)
333 ;
334 if (c != '=') {
335 error = EINVAL;
336 goto done;
337 }
338
339 kwlen = p - q;
340 newbits = NULL;
341
342 /* lookup flag group name */
343#define DBG_SET_FLAG_MAJ(S,F) \
344 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
345 newbits = &tmpflags->pdb_ ## F;
346
347 DBG_SET_FLAG_MAJ("cpu", CPU);
348 DBG_SET_FLAG_MAJ("csw", CSW);
349 DBG_SET_FLAG_MAJ("logging", LOG);
350 DBG_SET_FLAG_MAJ("module", MOD);
351 DBG_SET_FLAG_MAJ("md", MDP);
352 DBG_SET_FLAG_MAJ("owner", OWN);
353 DBG_SET_FLAG_MAJ("pmc", PMC);
354 DBG_SET_FLAG_MAJ("process", PRC);
355 DBG_SET_FLAG_MAJ("sampling", SAM);
356
357 if (newbits == NULL) {
358 error = EINVAL;
359 goto done;
360 }
361
362 p++; /* skip the '=' */
363
364 /* Now parse the individual flags */
365 tmp = 0;
366 newflag:
367 for (q = p; p < fence && (c = *p); p++)
368 if (c == ' ' || c == '\t' || c == ',')
369 break;
370
371 /* p == fence or c == ws or c == "," or c == 0 */
372
373 if ((kwlen = p - q) == 0) {
374 *newbits = tmp;
375 continue;
376 }
377
378 found = 0;
379#define DBG_SET_FLAG_MIN(S,F) \
380 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
381 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
382
383 /* a '*' denotes all possible flags in the group */
384 if (kwlen == 1 && *q == '*')
385 tmp = found = ~0;
386 /* look for individual flag names */
387 DBG_SET_FLAG_MIN("allocaterow", ALR);
388 DBG_SET_FLAG_MIN("allocate", ALL);
389 DBG_SET_FLAG_MIN("attach", ATT);
390 DBG_SET_FLAG_MIN("bind", BND);
391 DBG_SET_FLAG_MIN("config", CFG);
392 DBG_SET_FLAG_MIN("exec", EXC);
393 DBG_SET_FLAG_MIN("exit", EXT);
394 DBG_SET_FLAG_MIN("find", FND);
395 DBG_SET_FLAG_MIN("flush", FLS);
396 DBG_SET_FLAG_MIN("fork", FRK);
397 DBG_SET_FLAG_MIN("getbuf", GTB);
398 DBG_SET_FLAG_MIN("hook", PMH);
399 DBG_SET_FLAG_MIN("init", INI);
400 DBG_SET_FLAG_MIN("intr", INT);
401 DBG_SET_FLAG_MIN("linktarget", TLK);
402 DBG_SET_FLAG_MIN("mayberemove", OMR);
403 DBG_SET_FLAG_MIN("ops", OPS);
404 DBG_SET_FLAG_MIN("read", REA);
405 DBG_SET_FLAG_MIN("register", REG);
406 DBG_SET_FLAG_MIN("release", REL);
407 DBG_SET_FLAG_MIN("remove", ORM);
408 DBG_SET_FLAG_MIN("sample", SAM);
409 DBG_SET_FLAG_MIN("scheduleio", SIO);
410 DBG_SET_FLAG_MIN("select", SEL);
411 DBG_SET_FLAG_MIN("signal", SIG);
412 DBG_SET_FLAG_MIN("swi", SWI);
413 DBG_SET_FLAG_MIN("swo", SWO);
414 DBG_SET_FLAG_MIN("start", STA);
415 DBG_SET_FLAG_MIN("stop", STO);
416 DBG_SET_FLAG_MIN("syscall", PMS);
417 DBG_SET_FLAG_MIN("unlinktarget", TUL);
418 DBG_SET_FLAG_MIN("write", WRI);
419 if (found == 0) {
420 /* unrecognized flag name */
421 error = EINVAL;
422 goto done;
423 }
424
425 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
426 *newbits = tmp;
427 continue;
428 }
429
430 p++;
431 goto newflag;
432 }
433
434 /* save the new flag set */
435 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
436
437 done:
438 FREE(tmpflags, M_PMC);
439 return error;
440}
441
442static int
443pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
444{
445 char *fence, *newstr;
446 int error;
447 unsigned int n;
448
449 (void) arg1; (void) arg2; /* unused parameters */
450
451 n = sizeof(pmc_debugstr);
452 MALLOC(newstr, char *, n, M_PMC, M_ZERO|M_WAITOK);
453 (void) strlcpy(newstr, pmc_debugstr, n);
454
455 error = sysctl_handle_string(oidp, newstr, n, req);
456
457 /* if there is a new string, parse and copy it */
458 if (error == 0 && req->newptr != NULL) {
459 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
460 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
461 (void) strlcpy(pmc_debugstr, newstr,
462 sizeof(pmc_debugstr));
463 }
464
465 FREE(newstr, M_PMC);
466
467 return error;
468}
469#endif
470
471/*
472 * Concurrency Control
473 *
474 * The driver manages the following data structures:
475 *
476 * - target process descriptors, one per target process
477 * - owner process descriptors (and attached lists), one per owner process
478 * - lookup hash tables for owner and target processes
479 * - PMC descriptors (and attached lists)
480 * - per-cpu hardware state
481 * - the 'hook' variable through which the kernel calls into
482 * this module
483 * - the machine hardware state (managed by the MD layer)
484 *
485 * These data structures are accessed from:
486 *
487 * - thread context-switch code
488 * - interrupt handlers (possibly on multiple cpus)
489 * - kernel threads on multiple cpus running on behalf of user
490 * processes doing system calls
491 * - this driver's private kernel threads
492 *
493 * = Locks and Locking strategy =
494 *
495 * The driver uses four locking strategies for its operation:
496 *
497 * - There is a 'global' SX lock "pmc_sx" that is used to protect
498 * the its 'meta-data'.
499 *
500 * Calls into the module (via syscall() or by the kernel) start with
501 * this lock being held in exclusive mode. Depending on the requested
502 * operation, the lock may be downgraded to 'shared' mode to allow
503 * more concurrent readers into the module.
504 *
505 * This SX lock is held in exclusive mode for any operations that
506 * modify the linkages between the driver's internal data structures.
507 *
508 * The 'pmc_hook' function pointer is also protected by this lock.
509 * It is only examined with the sx lock held in exclusive mode. The
510 * kernel module is allowed to be unloaded only with the sx lock
511 * held in exclusive mode. In normal syscall handling, after
512 * acquiring the pmc_sx lock we first check that 'pmc_hook' is
513 * non-null before proceeding. This prevents races between the
514 * thread unloading the module and other threads seeking to use the
515 * module.
516 *
517 * - Lookups of target process structures and owner process structures
518 * cannot use the global "pmc_sx" SX lock because these lookups need
519 * to happen during context switches and in other critical sections
520 * where sleeping is not allowed. We protect these lookup tables
521 * with their own private spin-mutexes, "pmc_processhash_mtx" and
522 * "pmc_ownerhash_mtx". These are 'leaf' mutexes, in that no other
523 * lock is acquired with these locks held.
524 *
525 * - Interrupt handlers work in a lock free manner. At interrupt
526 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
527 * when the PMC was started. If this pointer is NULL, the interrupt
528 * is ignored after updating driver statistics. We ensure that this
529 * pointer is set (using an atomic operation if necessary) before the
530 * PMC hardware is started. Conversely, this pointer is unset atomically
531 * only after the PMC hardware is stopped.
532 *
533 * We ensure that everything needed for the operation of an
534 * interrupt handler is available without it needing to acquire any
535 * locks. We also ensure that a PMC's software state is destroyed only
536 * after the PMC is taken off hardware (on all CPUs).
537 *
538 * - Context-switch handling with process-private PMCs needs more
539 * care.
540 *
541 * A given process may be the target of multiple PMCs. For example,
542 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
543 * while the target process is running on another. A PMC could also
544 * be getting released because its owner is exiting. We tackle
545 * these situations in the following manner:
546 *
547 * - each target process structure 'pmc_process' has an array
548 * of 'struct pmc *' pointers, one for each hardware PMC.
549 *
550 * - At context switch IN time, each "target" PMC in RUNNING state
551 * gets started on hardware and a pointer to each PMC is copied into
552 * the per-cpu phw array. The 'runcount' for the PMC is
553 * incremented.
554 *
555 * - At context switch OUT time, all process-virtual PMCs are stopped
556 * on hardware. The saved value is added to the PMCs value field
557 * only if the PMC is in a non-deleted state (the PMCs state could
558 * have changed during the current time slice).
559 *
560 * Note that since in-between a switch IN on a processor and a switch
561 * OUT, the PMC could have been released on another CPU. Therefore
562 * context switch OUT always looks at the hardware state to turn
563 * OFF PMCs and will update a PMC's saved value only if reachable
564 * from the target process record.
565 *
566 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
567 * be attached to many processes at the time of the call and could
568 * be active on multiple CPUs).
569 *
570 * We prevent further scheduling of the PMC by marking it as in
571 * state 'DELETED'. If the runcount of the PMC is non-zero then
572 * this PMC is currently running on a CPU somewhere. The thread
573 * doing the PMCRELEASE operation waits by repeatedly doing an
574 * tsleep() till the runcount comes to zero.
575 *
576 */
577
578/*
579 * save the cpu binding of the current kthread
580 */
581
582static void
583pmc_save_cpu_binding(struct pmc_binding *pb)
584{
585 PMCDBG(CPU,BND,2, "%s", "save-cpu");
586 mtx_lock_spin(&sched_lock);
587 pb->pb_bound = sched_is_bound(curthread);
588 pb->pb_cpu = curthread->td_oncpu;
589 mtx_unlock_spin(&sched_lock);
590 PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
591}
592
593/*
594 * restore the cpu binding of the current thread
595 */
596
597static void
598pmc_restore_cpu_binding(struct pmc_binding *pb)
599{
600 PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
601 curthread->td_oncpu, pb->pb_cpu);
602 mtx_lock_spin(&sched_lock);
603 if (pb->pb_bound)
604 sched_bind(curthread, pb->pb_cpu);
605 else
606 sched_unbind(curthread);
607 mtx_unlock_spin(&sched_lock);
608 PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
609}
610
611/*
612 * move execution over the specified cpu and bind it there.
613 */
614
615static void
616pmc_select_cpu(int cpu)
617{
618 KASSERT(cpu >= 0 && cpu < mp_ncpus,
619 ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
620
621 /* never move to a disabled CPU */
622 KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting "
623 "disabled CPU %d", __LINE__, cpu));
624
625 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
626 mtx_lock_spin(&sched_lock);
627 sched_bind(curthread, cpu);
628 mtx_unlock_spin(&sched_lock);
629
630 KASSERT(curthread->td_oncpu == cpu,
631 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
632 cpu, curthread->td_oncpu));
633
634 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
635}
636
637/*
638 * Force a context switch.
639 *
640 * We do this by tsleep'ing for 1 tick -- invoking mi_switch() is not
641 * guaranteed to force a context switch.
642 */
643
644static void
645pmc_force_context_switch(void)
646{
647 u_char curpri;
648
649 mtx_lock_spin(&sched_lock);
650 curpri = curthread->td_priority;
651 mtx_unlock_spin(&sched_lock);
652
653 (void) tsleep((void *) pmc_force_context_switch, curpri,
654 "pmcctx", 1);
655
656}
657
658/*
659 * Get the file name for an executable. This is a simple wrapper
660 * around vn_fullpath(9).
661 */
662
663static void
664pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
665{
666 struct thread *td;
667
668 td = curthread;
669 *fullpath = "unknown";
670 *freepath = NULL;
671 vn_lock(v, LK_EXCLUSIVE | LK_RETRY, td);
672 vn_fullpath(td, v, fullpath, freepath);
673 VOP_UNLOCK(v, 0, td);
674}
675
676/*
677 * remove an process owning PMCs
678 */
679
680void
681pmc_remove_owner(struct pmc_owner *po)
682{
683 struct pmc *pm, *tmp;
684
685 sx_assert(&pmc_sx, SX_XLOCKED);
686
687 PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
688
689 /* Remove descriptor from the owner hash table */
690 LIST_REMOVE(po, po_next);
691
692 /* release all owned PMC descriptors */
693 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
694 PMCDBG(OWN,ORM,2, "pmc=%p", pm);
695 KASSERT(pm->pm_owner == po,
696 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
697
698 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
699 }
700
701 KASSERT(po->po_sscount == 0,
702 ("[pmc,%d] SS count not zero", __LINE__));
703 KASSERT(LIST_EMPTY(&po->po_pmcs),
704 ("[pmc,%d] PMC list not empty", __LINE__));
705
706 /* de-configure the log file if present */
707 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
708 pmclog_deconfigure_log(po);
709}
710
711/*
712 * remove an owner process record if all conditions are met.
713 */
714
715static void
716pmc_maybe_remove_owner(struct pmc_owner *po)
717{
718
719 PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
720
721 /*
722 * Remove owner record if
723 * - this process does not own any PMCs
724 * - this process has not allocated a system-wide sampling buffer
725 */
726
727 if (LIST_EMPTY(&po->po_pmcs) &&
728 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
729 pmc_remove_owner(po);
730 pmc_destroy_owner_descriptor(po);
731 }
732}
733
734/*
735 * Add an association between a target process and a PMC.
736 */
737
738static void
739pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
740{
741 int ri;
742 struct pmc_target *pt;
743
744 sx_assert(&pmc_sx, SX_XLOCKED);
745
746 KASSERT(pm != NULL && pp != NULL,
747 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
748 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
749 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
750 __LINE__, pm, pp->pp_proc->p_pid));
751 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < ((int) md->pmd_npmc - 1),
752 ("[pmc,%d] Illegal reference count %d for process record %p",
753 __LINE__, pp->pp_refcnt, (void *) pp));
754
755 ri = PMC_TO_ROWINDEX(pm);
756
757 PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
758 pm, ri, pp);
759
760#if DEBUG
761 LIST_FOREACH(pt, &pm->pm_targets, pt_next)
762 if (pt->pt_process == pp)
763 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
764 __LINE__, pp, pm));
765#endif
766
767 MALLOC(pt, struct pmc_target *, sizeof(struct pmc_target),
768 M_PMC, M_ZERO|M_WAITOK);
769
770 pt->pt_process = pp;
771
772 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
773
774 atomic_store_rel_ptr(&pp->pp_pmcs[ri].pp_pmc, pm);
775
776 if (pm->pm_owner->po_owner == pp->pp_proc)
777 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
778
779 /*
780 * Initialize the per-process values at this row index.
781 */
782 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
783 pm->pm_sc.pm_reloadcount : 0;
784
785 pp->pp_refcnt++;
786
787}
788
789/*
790 * Removes the association between a target process and a PMC.
791 */
792
793static void
794pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
795{
796 int ri;
797 struct proc *p;
798 struct pmc_target *ptgt;
799
800 sx_assert(&pmc_sx, SX_XLOCKED);
801
802 KASSERT(pm != NULL && pp != NULL,
803 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
804
805 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt < (int) md->pmd_npmc,
806 ("[pmc,%d] Illegal ref count %d on process record %p",
807 __LINE__, pp->pp_refcnt, (void *) pp));
808
809 ri = PMC_TO_ROWINDEX(pm);
810
811 PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
812 pm, ri, pp);
813
814 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
815 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
816 ri, pm, pp->pp_pmcs[ri].pp_pmc));
817
818 pp->pp_pmcs[ri].pp_pmc = NULL;
819 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
820
821 /* Remove owner-specific flags */
822 if (pm->pm_owner->po_owner == pp->pp_proc) {
823 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
824 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
825 }
826
827 pp->pp_refcnt--;
828
829 /* Remove the target process from the PMC structure */
830 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
831 if (ptgt->pt_process == pp)
832 break;
833
834 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
835 "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
836
837 LIST_REMOVE(ptgt, pt_next);
838 FREE(ptgt, M_PMC);
839
840 /* if the PMC now lacks targets, send the owner a SIGIO */
841 if (LIST_EMPTY(&pm->pm_targets)) {
842 p = pm->pm_owner->po_owner;
843 PROC_LOCK(p);
844 psignal(p, SIGIO);
845 PROC_UNLOCK(p);
846
847 PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
848 SIGIO);
849 }
850}
851
852/*
853 * Check if PMC 'pm' may be attached to target process 't'.
854 */
855
856static int
857pmc_can_attach(struct pmc *pm, struct proc *t)
858{
859 struct proc *o; /* pmc owner */
860 struct ucred *oc, *tc; /* owner, target credentials */
861 int decline_attach, i;
862
863 /*
864 * A PMC's owner can always attach that PMC to itself.
865 */
866
867 if ((o = pm->pm_owner->po_owner) == t)
868 return 0;
869
870 PROC_LOCK(o);
871 oc = o->p_ucred;
872 crhold(oc);
873 PROC_UNLOCK(o);
874
875 PROC_LOCK(t);
876 tc = t->p_ucred;
877 crhold(tc);
878 PROC_UNLOCK(t);
879
880 /*
881 * The effective uid of the PMC owner should match at least one
882 * of the {effective,real,saved} uids of the target process.
883 */
884
885 decline_attach = oc->cr_uid != tc->cr_uid &&
886 oc->cr_uid != tc->cr_svuid &&
887 oc->cr_uid != tc->cr_ruid;
888
889 /*
890 * Every one of the target's group ids, must be in the owner's
891 * group list.
892 */
893 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
894 decline_attach = !groupmember(tc->cr_groups[i], oc);
895
896 /* check the read and saved gids too */
897 if (decline_attach == 0)
898 decline_attach = !groupmember(tc->cr_rgid, oc) ||
899 !groupmember(tc->cr_svgid, oc);
900
901 crfree(tc);
902 crfree(oc);
903
904 return !decline_attach;
905}
906
907/*
908 * Attach a process to a PMC.
909 */
910
911static int
912pmc_attach_one_process(struct proc *p, struct pmc *pm)
913{
914 int ri;
915 char *fullpath, *freepath;
916 struct pmc_process *pp;
917
918 sx_assert(&pmc_sx, SX_XLOCKED);
919
920 PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
921 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
922
923 /*
924 * Locate the process descriptor corresponding to process 'p',
925 * allocating space as needed.
926 *
927 * Verify that rowindex 'pm_rowindex' is free in the process
928 * descriptor.
929 *
930 * If not, allocate space for a descriptor and link the
931 * process descriptor and PMC.
932 */
933 ri = PMC_TO_ROWINDEX(pm);
934
935 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
936 return ENOMEM;
937
938 if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
939 return EEXIST;
940
941 if (pp->pp_pmcs[ri].pp_pmc != NULL)
942 return EBUSY;
943
944 pmc_link_target_process(pm, pp);
945
946 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
947 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
948 pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
949
950 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
951
952 /* issue an attach event to a configured log file */
953 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
954 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
955 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
956 if (freepath)
957 FREE(freepath, M_TEMP);
958 }
959 /* mark process as using HWPMCs */
960 PROC_LOCK(p);
961 p->p_flag |= P_HWPMC;
962 PROC_UNLOCK(p);
963
964 return 0;
965}
966
967/*
968 * Attach a process and optionally its children
969 */
970
971static int
972pmc_attach_process(struct proc *p, struct pmc *pm)
973{
974 int error;
975 struct proc *top;
976
977 sx_assert(&pmc_sx, SX_XLOCKED);
978
979 PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
980 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
981
982
983 /*
984 * If this PMC successfully allowed a GETMSR operation
985 * in the past, disallow further ATTACHes.
986 */
987
988 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
989 return EPERM;
990
991 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
992 return pmc_attach_one_process(p, pm);
993
994 /*
995 * Traverse all child processes, attaching them to
996 * this PMC.
997 */
998
999 sx_slock(&proctree_lock);
1000
1001 top = p;
1002
1003 for (;;) {
1004 if ((error = pmc_attach_one_process(p, pm)) != 0)
1005 break;
1006 if (!LIST_EMPTY(&p->p_children))
1007 p = LIST_FIRST(&p->p_children);
1008 else for (;;) {
1009 if (p == top)
1010 goto done;
1011 if (LIST_NEXT(p, p_sibling)) {
1012 p = LIST_NEXT(p, p_sibling);
1013 break;
1014 }
1015 p = p->p_pptr;
1016 }
1017 }
1018
1019 if (error)
1020 (void) pmc_detach_process(top, pm);
1021
1022 done:
1023 sx_sunlock(&proctree_lock);
1024 return error;
1025}
1026
1027/*
1028 * Detach a process from a PMC. If there are no other PMCs tracking
1029 * this process, remove the process structure from its hash table. If
1030 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1031 */
1032
1033static int
1034pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1035{
1036 int ri;
1037 struct pmc_process *pp;
1038
1039 sx_assert(&pmc_sx, SX_XLOCKED);
1040
1041 KASSERT(pm != NULL,
1042 ("[pmc,%d] null pm pointer", __LINE__));
1043
1044 ri = PMC_TO_ROWINDEX(pm);
1045
1046 PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1047 pm, ri, p, p->p_pid, p->p_comm, flags);
1048
1049 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1050 return ESRCH;
1051
1052 if (pp->pp_pmcs[ri].pp_pmc != pm)
1053 return EINVAL;
1054
1055 pmc_unlink_target_process(pm, pp);
1056
1057 /* Issue a detach entry if a log file is configured */
1058 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1059 pmclog_process_pmcdetach(pm, p->p_pid);
1060
1061 /*
1062 * If there are no PMCs targetting this process, we remove its
1063 * descriptor from the target hash table and unset the P_HWPMC
1064 * flag in the struct proc.
1065 */
1066 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1067 ("[pmc,%d] Illegal refcnt %d for process struct %p",
1068 __LINE__, pp->pp_refcnt, pp));
1069
1070 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1071 return 0;
1072
1073 pmc_remove_process_descriptor(pp);
1074
1075 if (flags & PMC_FLAG_REMOVE)
1076 FREE(pp, M_PMC);
1077
1078 PROC_LOCK(p);
1079 p->p_flag &= ~P_HWPMC;
1080 PROC_UNLOCK(p);
1081
1082 return 0;
1083}
1084
1085/*
1086 * Detach a process and optionally its descendants from a PMC.
1087 */
1088
1089static int
1090pmc_detach_process(struct proc *p, struct pmc *pm)
1091{
1092 struct proc *top;
1093
1094 sx_assert(&pmc_sx, SX_XLOCKED);
1095
1096 PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1097 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1098
1099 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1100 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1101
1102 /*
1103 * Traverse all children, detaching them from this PMC. We
1104 * ignore errors since we could be detaching a PMC from a
1105 * partially attached proc tree.
1106 */
1107
1108 sx_slock(&proctree_lock);
1109
1110 top = p;
1111
1112 for (;;) {
1113 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1114
1115 if (!LIST_EMPTY(&p->p_children))
1116 p = LIST_FIRST(&p->p_children);
1117 else for (;;) {
1118 if (p == top)
1119 goto done;
1120 if (LIST_NEXT(p, p_sibling)) {
1121 p = LIST_NEXT(p, p_sibling);
1122 break;
1123 }
1124 p = p->p_pptr;
1125 }
1126 }
1127
1128 done:
1129 sx_sunlock(&proctree_lock);
1130
1131 if (LIST_EMPTY(&pm->pm_targets))
1132 pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1133
1134 return 0;
1135}
1136
1137
1138/*
1139 * Thread context switch IN
1140 */
1141
1142static void
1143pmc_process_csw_in(struct thread *td)
1144{
1145 int cpu;
1146 unsigned int ri;
1147 struct pmc *pm;
1148 struct proc *p;
1149 struct pmc_cpu *pc;
1150 struct pmc_hw *phw;
1151 struct pmc_process *pp;
1152 pmc_value_t newvalue;
1153
1154 p = td->td_proc;
1155
1156 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1157 return;
1158
1159 KASSERT(pp->pp_proc == td->td_proc,
1160 ("[pmc,%d] not my thread state", __LINE__));
1161
1162 critical_enter(); /* no preemption from this point */
1163
1164 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1165
1166 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1167 p->p_pid, p->p_comm, pp);
1168
1169 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1170 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
1171
1172 pc = pmc_pcpu[cpu];
1173
1174 for (ri = 0; ri < md->pmd_npmc; ri++) {
1175
1176 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1177 continue;
1178
1179 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1180 ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1181 __LINE__, PMC_TO_MODE(pm)));
1182
1183 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1184 ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1185 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1186
1187 /*
1188 * Only PMCs that are marked as 'RUNNING' need
1189 * be placed on hardware.
1190 */
1191
1192 if (pm->pm_state != PMC_STATE_RUNNING)
1193 continue;
1194
1195 /* increment PMC runcount */
1196 atomic_add_rel_32(&pm->pm_runcount, 1);
1197
1198 /* configure the HWPMC we are going to use. */
1199 md->pmd_config_pmc(cpu, ri, pm);
1200
1201 phw = pc->pc_hwpmcs[ri];
1202
1203 KASSERT(phw != NULL,
1204 ("[pmc,%d] null hw pointer", __LINE__));
1205
1206 KASSERT(phw->phw_pmc == pm,
1207 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1208 phw->phw_pmc, pm));
1209
1210 /*
1211 * Write out saved value and start the PMC.
1212 *
1213 * Sampling PMCs use a per-process value, while
1214 * counting mode PMCs use a per-pmc value that is
1215 * inherited across descendants.
1216 */
1217 if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1218 mtx_pool_lock_spin(pmc_mtxpool, pm);
1219 newvalue = PMC_PCPU_SAVED(cpu,ri) =
1220 pp->pp_pmcs[ri].pp_pmcval;
1221 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1222 } else {
1223 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1224 ("[pmc,%d] illegal mode=%d", __LINE__,
1225 PMC_TO_MODE(pm)));
1226 mtx_pool_lock_spin(pmc_mtxpool, pm);
1227 newvalue = PMC_PCPU_SAVED(cpu, ri) =
1228 pm->pm_gv.pm_savedvalue;
1229 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1230 }
1231
1232 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1233
1234 md->pmd_write_pmc(cpu, ri, newvalue);
1235 md->pmd_start_pmc(cpu, ri);
1236 }
1237
1238 /*
1239 * perform any other architecture/cpu dependent thread
1240 * switch-in actions.
1241 */
1242
1243 (void) (*md->pmd_switch_in)(pc, pp);
1244
1245 critical_exit();
1246
1247}
1248
1249/*
1250 * Thread context switch OUT.
1251 */
1252
1253static void
1254pmc_process_csw_out(struct thread *td)
1255{
1256 int cpu;
1257 enum pmc_mode mode;
1258 unsigned int ri;
1259 struct pmc *pm;
1260 struct proc *p;
1261 struct pmc_cpu *pc;
1262 struct pmc_process *pp;
1263 int64_t tmp;
1264 pmc_value_t newvalue;
1265
1266 /*
1267 * Locate our process descriptor; this may be NULL if
1268 * this process is exiting and we have already removed
1269 * the process from the target process table.
1270 *
1271 * Note that due to kernel preemption, multiple
1272 * context switches may happen while the process is
1273 * exiting.
1274 *
1275 * Note also that if the target process cannot be
1276 * found we still need to deconfigure any PMCs that
1277 * are currently running on hardware.
1278 */
1279
1280 p = td->td_proc;
1281 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1282
1283 /*
1284 * save PMCs
1285 */
1286
1287 critical_enter();
1288
1289 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1290
1291 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1292 p->p_pid, p->p_comm, pp);
1293
1294 KASSERT(cpu >= 0 && cpu < mp_ncpus,
1295 ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
1296
1297 pc = pmc_pcpu[cpu];
1298
1299 /*
1300 * When a PMC gets unlinked from a target PMC, it will
1301 * be removed from the target's pp_pmc[] array.
1302 *
1303 * However, on a MP system, the target could have been
1304 * executing on another CPU at the time of the unlink.
1305 * So, at context switch OUT time, we need to look at
1306 * the hardware to determine if a PMC is scheduled on
1307 * it.
1308 */
1309
1310 for (ri = 0; ri < md->pmd_npmc; ri++) {
1311
1312 pm = NULL;
1313 (void) (*md->pmd_get_config)(cpu, ri, &pm);
1314
1315 if (pm == NULL) /* nothing at this row index */
1316 continue;
1317
1318 mode = PMC_TO_MODE(pm);
1319 if (!PMC_IS_VIRTUAL_MODE(mode))
1320 continue; /* not a process virtual PMC */
1321
1322 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1323 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1324 __LINE__, PMC_TO_ROWINDEX(pm), ri));
1325
1326 /* Stop hardware if not already stopped */
1327 if ((pm->pm_flags & PMC_F_IS_STALLED) == 0)
1327 if (pm->pm_stalled == 0)
1328 md->pmd_stop_pmc(cpu, ri);
1329
1330 /* reduce this PMC's runcount */
1331 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1332
1333 /*
1334 * If this PMC is associated with this process,
1335 * save the reading.
1336 */
1337
1338 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1339
1340 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1341 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1342 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1343
1344 KASSERT(pp->pp_refcnt > 0,
1345 ("[pmc,%d] pp refcnt = %d", __LINE__,
1346 pp->pp_refcnt));
1347
1348 md->pmd_read_pmc(cpu, ri, &newvalue);
1349
1350 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1351
1352 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1353 tmp);
1354
1355 if (mode == PMC_MODE_TS) {
1356
1357 /*
1358 * For sampling process-virtual PMCs,
1359 * we expect the count to be
1360 * decreasing as the 'value'
1361 * programmed into the PMC is the
1362 * number of events to be seen till
1363 * the next sampling interrupt.
1364 */
1365 if (tmp < 0)
1366 tmp += pm->pm_sc.pm_reloadcount;
1367 mtx_pool_lock_spin(pmc_mtxpool, pm);
1368 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1369 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1370 pp->pp_pmcs[ri].pp_pmcval +=
1371 pm->pm_sc.pm_reloadcount;
1372 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1373
1374 } else {
1375
1376 /*
1377 * For counting process-virtual PMCs,
1378 * we expect the count to be
1379 * increasing monotonically, modulo a 64
1380 * bit wraparound.
1381 */
1382 KASSERT((int64_t) tmp >= 0,
1383 ("[pmc,%d] negative increment cpu=%d "
1384 "ri=%d newvalue=%jx saved=%jx "
1385 "incr=%jx", __LINE__, cpu, ri,
1386 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1387
1388 mtx_pool_lock_spin(pmc_mtxpool, pm);
1389 pm->pm_gv.pm_savedvalue += tmp;
1390 pp->pp_pmcs[ri].pp_pmcval += tmp;
1391 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1392
1393 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1394 pmclog_process_proccsw(pm, pp, tmp);
1395 }
1396 }
1397
1398 /* mark hardware as free */
1399 md->pmd_config_pmc(cpu, ri, NULL);
1400 }
1401
1402 /*
1403 * perform any other architecture/cpu dependent thread
1404 * switch out functions.
1405 */
1406
1407 (void) (*md->pmd_switch_out)(pc, pp);
1408
1409 critical_exit();
1410}
1411
1412/*
1413 * The 'hook' invoked from the kernel proper
1414 */
1415
1416
1417#if DEBUG
1418const char *pmc_hooknames[] = {
1419 "",
1420 "EXIT",
1421 "EXEC",
1422 "FORK",
1423 "CSW-IN",
1424 "CSW-OUT",
1425 "SAMPLE"
1426};
1427#endif
1428
1429static int
1430pmc_hook_handler(struct thread *td, int function, void *arg)
1431{
1432
1433 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1434 pmc_hooknames[function], arg);
1435
1436 switch (function)
1437 {
1438
1439 /*
1440 * Process exec()
1441 */
1442
1443 case PMC_FN_PROCESS_EXEC:
1444 {
1445 char *fullpath, *freepath;
1446 unsigned int ri;
1447 int is_using_hwpmcs;
1448 struct pmc *pm;
1449 struct proc *p;
1450 struct pmc_owner *po;
1451 struct pmc_process *pp;
1452 struct pmckern_procexec *pk;
1453
1454 sx_assert(&pmc_sx, SX_XLOCKED);
1455
1456 p = td->td_proc;
1457 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1458
1459 pk = (struct pmckern_procexec *) arg;
1460
1461 /* Inform owners of SS mode PMCs of the exec event. */
1462 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1463 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1464 pmclog_process_procexec(po, PMC_ID_INVALID,
1465 p->p_pid, pk->pm_entryaddr, fullpath);
1466
1467 PROC_LOCK(p);
1468 is_using_hwpmcs = p->p_flag & P_HWPMC;
1469 PROC_UNLOCK(p);
1470
1471 if (!is_using_hwpmcs) {
1472 if (freepath)
1473 FREE(freepath, M_TEMP);
1474 break;
1475 }
1476
1477 /*
1478 * PMCs are not inherited across an exec(): remove any
1479 * PMCs that this process is the owner of.
1480 */
1481
1482 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1483 pmc_remove_owner(po);
1484 pmc_destroy_owner_descriptor(po);
1485 }
1486
1487 /*
1488 * If this process is the target of a PMC, check if the new
1489 * credentials are compatible with the owner's permissions.
1490 */
1491
1492 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1493 break;
1494
1495 /*
1496 * Log the exec event to all monitoring owners. Skip
1497 * owners who have already recieved the event because
1498 * the have system sampling PMCs active.
1499 */
1500 for (ri = 0; ri < md->pmd_npmc; ri++)
1501 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1502 po = pm->pm_owner;
1503 if (po->po_sscount == 0 &&
1504 po->po_flags & PMC_PO_OWNS_LOGFILE)
1505 pmclog_process_procexec(po, pm->pm_id,
1506 p->p_pid, pk->pm_entryaddr,
1507 fullpath);
1508 }
1509
1510 if (freepath)
1511 FREE(freepath, M_TEMP);
1512
1513
1514 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1515 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1516
1517 if (pk->pm_credentialschanged == 0) /* no change */
1518 break;
1519
1520 /*
1521 * If the newly exec()'ed process has a different credential
1522 * than before, allow it to be the target of a PMC only if
1523 * the PMC's owner has sufficient priviledge.
1524 */
1525
1526 for (ri = 0; ri < md->pmd_npmc; ri++)
1527 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1528 if (pmc_can_attach(pm, td->td_proc) != 0)
1529 pmc_detach_one_process(td->td_proc,
1530 pm, PMC_FLAG_NONE);
1531
1532 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1533 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1534 pp->pp_refcnt, pp));
1535
1536 /*
1537 * If this process is no longer the target of any
1538 * PMCs, we can remove the process entry and free
1539 * up space.
1540 */
1541
1542 if (pp->pp_refcnt == 0) {
1543 pmc_remove_process_descriptor(pp);
1544 FREE(pp, M_PMC);
1545 break;
1546 }
1547
1548 }
1549 break;
1550
1551 case PMC_FN_CSW_IN:
1552 pmc_process_csw_in(td);
1553 break;
1554
1555 case PMC_FN_CSW_OUT:
1556 pmc_process_csw_out(td);
1557 break;
1558
1559 /*
1560 * Process accumulated PC samples.
1561 *
1562 * This function is expected to be called by hardclock() for
1563 * each CPU that has accumulated PC samples.
1564 *
1565 * This function is to be executed on the CPU whose samples
1566 * are being processed.
1567 */
1568 case PMC_FN_DO_SAMPLES:
1569
1570 /*
1571 * Clear the cpu specific bit in the CPU mask before
1572 * do the rest of the processing. If the NMI handler
1573 * gets invoked after the "atomic_clear_int()" call
1574 * below but before "pmc_process_samples()" gets
1575 * around to processing the interrupt, then we will
1576 * come back here at the next hardclock() tick (and
1577 * may find nothing to do if "pmc_process_samples()"
1578 * had already processed the interrupt). We don't
1579 * lose the interrupt sample.
1580 */
1581 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1582 pmc_process_samples(PCPU_GET(cpuid));
1583 break;
1584
1585 default:
1586#if DEBUG
1587 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1588#endif
1589 break;
1590
1591 }
1592
1593 return 0;
1594}
1595
1596/*
1597 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1598 */
1599
1600static struct pmc_owner *
1601pmc_allocate_owner_descriptor(struct proc *p)
1602{
1603 uint32_t hindex;
1604 struct pmc_owner *po;
1605 struct pmc_ownerhash *poh;
1606
1607 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1608 poh = &pmc_ownerhash[hindex];
1609
1610 /* allocate space for N pointers and one descriptor struct */
1611 MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
1612 M_PMC, M_ZERO|M_WAITOK);
1613
1614 po->po_sscount = po->po_error = po->po_flags = 0;
1615 po->po_file = NULL;
1616 po->po_owner = p;
1617 po->po_kthread = NULL;
1618 LIST_INIT(&po->po_pmcs);
1619 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1620
1621 TAILQ_INIT(&po->po_logbuffers);
1622 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc", MTX_SPIN);
1623
1624 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1625 p, p->p_pid, p->p_comm, po);
1626
1627 return po;
1628}
1629
1630static void
1631pmc_destroy_owner_descriptor(struct pmc_owner *po)
1632{
1633
1634 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1635 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1636
1637 mtx_destroy(&po->po_mtx);
1638 FREE(po, M_PMC);
1639}
1640
1641/*
1642 * find the descriptor corresponding to process 'p', adding or removing it
1643 * as specified by 'mode'.
1644 */
1645
1646static struct pmc_process *
1647pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1648{
1649 uint32_t hindex;
1650 struct pmc_process *pp, *ppnew;
1651 struct pmc_processhash *pph;
1652
1653 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1654 pph = &pmc_processhash[hindex];
1655
1656 ppnew = NULL;
1657
1658 /*
1659 * Pre-allocate memory in the FIND_ALLOCATE case since we
1660 * cannot call malloc(9) once we hold a spin lock.
1661 */
1662
1663 if (mode & PMC_FLAG_ALLOCATE) {
1664 /* allocate additional space for 'n' pmc pointers */
1665 MALLOC(ppnew, struct pmc_process *,
1666 sizeof(struct pmc_process) + md->pmd_npmc *
1667 sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
1668 }
1669
1670 mtx_lock_spin(&pmc_processhash_mtx);
1671 LIST_FOREACH(pp, pph, pp_next)
1672 if (pp->pp_proc == p)
1673 break;
1674
1675 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1676 LIST_REMOVE(pp, pp_next);
1677
1678 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1679 ppnew != NULL) {
1680 ppnew->pp_proc = p;
1681 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1682 pp = ppnew;
1683 ppnew = NULL;
1684 }
1685 mtx_unlock_spin(&pmc_processhash_mtx);
1686
1687 if (pp != NULL && ppnew != NULL)
1688 FREE(ppnew, M_PMC);
1689
1690 return pp;
1691}
1692
1693/*
1694 * remove a process descriptor from the process hash table.
1695 */
1696
1697static void
1698pmc_remove_process_descriptor(struct pmc_process *pp)
1699{
1700 KASSERT(pp->pp_refcnt == 0,
1701 ("[pmc,%d] Removing process descriptor %p with count %d",
1702 __LINE__, pp, pp->pp_refcnt));
1703
1704 mtx_lock_spin(&pmc_processhash_mtx);
1705 LIST_REMOVE(pp, pp_next);
1706 mtx_unlock_spin(&pmc_processhash_mtx);
1707}
1708
1709
1710/*
1711 * find an owner descriptor corresponding to proc 'p'
1712 */
1713
1714static struct pmc_owner *
1715pmc_find_owner_descriptor(struct proc *p)
1716{
1717 uint32_t hindex;
1718 struct pmc_owner *po;
1719 struct pmc_ownerhash *poh;
1720
1721 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1722 poh = &pmc_ownerhash[hindex];
1723
1724 po = NULL;
1725 LIST_FOREACH(po, poh, po_next)
1726 if (po->po_owner == p)
1727 break;
1728
1729 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
1730 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
1731
1732 return po;
1733}
1734
1735/*
1736 * pmc_allocate_pmc_descriptor
1737 *
1738 * Allocate a pmc descriptor and initialize its
1739 * fields.
1740 */
1741
1742static struct pmc *
1743pmc_allocate_pmc_descriptor(void)
1744{
1745 struct pmc *pmc;
1746
1747 MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
1748
1749 if (pmc != NULL) {
1750 pmc->pm_owner = NULL;
1751 LIST_INIT(&pmc->pm_targets);
1752 }
1753
1754 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
1755
1756 return pmc;
1757}
1758
1759/*
1760 * Destroy a pmc descriptor.
1761 */
1762
1763static void
1764pmc_destroy_pmc_descriptor(struct pmc *pm)
1765{
1766 (void) pm;
1767
1768#if DEBUG
1769 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
1770 pm->pm_state == PMC_STATE_FREE,
1771 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
1772 KASSERT(LIST_EMPTY(&pm->pm_targets),
1773 ("[pmc,%d] destroying pmc with targets", __LINE__));
1774 KASSERT(pm->pm_owner == NULL,
1775 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
1776 KASSERT(pm->pm_runcount == 0,
1777 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
1778 pm->pm_runcount));
1779#endif
1780}
1781
1782static void
1783pmc_wait_for_pmc_idle(struct pmc *pm)
1784{
1785#if DEBUG
1786 volatile int maxloop;
1787
1788 maxloop = 100 * mp_ncpus;
1789#endif
1790
1791 /*
1792 * Loop (with a forced context switch) till the PMC's runcount
1793 * comes down to zero.
1794 */
1795 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
1796#if DEBUG
1797 maxloop--;
1798 KASSERT(maxloop > 0,
1799 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
1800 "pmc to be free", __LINE__,
1801 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
1802#endif
1803 pmc_force_context_switch();
1804 }
1805}
1806
1807/*
1808 * This function does the following things:
1809 *
1810 * - detaches the PMC from hardware
1811 * - unlinks all target threads that were attached to it
1812 * - removes the PMC from its owner's list
1813 * - destroy's the PMC private mutex
1814 *
1815 * Once this function completes, the given pmc pointer can be safely
1816 * FREE'd by the caller.
1817 */
1818
1819static void
1820pmc_release_pmc_descriptor(struct pmc *pm)
1821{
1822 u_int ri, cpu;
1823 enum pmc_mode mode;
1824 struct pmc_hw *phw;
1825 struct pmc_owner *po;
1826 struct pmc_process *pp;
1827 struct pmc_target *ptgt, *tmp;
1828 struct pmc_binding pb;
1829
1830 sx_assert(&pmc_sx, SX_XLOCKED);
1831
1832 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
1833
1834 ri = PMC_TO_ROWINDEX(pm);
1835 mode = PMC_TO_MODE(pm);
1836
1837 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
1838 mode);
1839
1840 /*
1841 * First, we take the PMC off hardware.
1842 */
1843 cpu = 0;
1844 if (PMC_IS_SYSTEM_MODE(mode)) {
1845
1846 /*
1847 * A system mode PMC runs on a specific CPU. Switch
1848 * to this CPU and turn hardware off.
1849 */
1850 pmc_save_cpu_binding(&pb);
1851
1852 cpu = PMC_TO_CPU(pm);
1853
1854 pmc_select_cpu(cpu);
1855
1856 /* switch off non-stalled CPUs */
1857 if (pm->pm_state == PMC_STATE_RUNNING &&
1328 md->pmd_stop_pmc(cpu, ri);
1329
1330 /* reduce this PMC's runcount */
1331 atomic_subtract_rel_32(&pm->pm_runcount, 1);
1332
1333 /*
1334 * If this PMC is associated with this process,
1335 * save the reading.
1336 */
1337
1338 if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
1339
1340 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1341 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1342 pm, ri, pp->pp_pmcs[ri].pp_pmc));
1343
1344 KASSERT(pp->pp_refcnt > 0,
1345 ("[pmc,%d] pp refcnt = %d", __LINE__,
1346 pp->pp_refcnt));
1347
1348 md->pmd_read_pmc(cpu, ri, &newvalue);
1349
1350 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
1351
1352 PMCDBG(CSW,SWI,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
1353 tmp);
1354
1355 if (mode == PMC_MODE_TS) {
1356
1357 /*
1358 * For sampling process-virtual PMCs,
1359 * we expect the count to be
1360 * decreasing as the 'value'
1361 * programmed into the PMC is the
1362 * number of events to be seen till
1363 * the next sampling interrupt.
1364 */
1365 if (tmp < 0)
1366 tmp += pm->pm_sc.pm_reloadcount;
1367 mtx_pool_lock_spin(pmc_mtxpool, pm);
1368 pp->pp_pmcs[ri].pp_pmcval -= tmp;
1369 if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
1370 pp->pp_pmcs[ri].pp_pmcval +=
1371 pm->pm_sc.pm_reloadcount;
1372 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1373
1374 } else {
1375
1376 /*
1377 * For counting process-virtual PMCs,
1378 * we expect the count to be
1379 * increasing monotonically, modulo a 64
1380 * bit wraparound.
1381 */
1382 KASSERT((int64_t) tmp >= 0,
1383 ("[pmc,%d] negative increment cpu=%d "
1384 "ri=%d newvalue=%jx saved=%jx "
1385 "incr=%jx", __LINE__, cpu, ri,
1386 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
1387
1388 mtx_pool_lock_spin(pmc_mtxpool, pm);
1389 pm->pm_gv.pm_savedvalue += tmp;
1390 pp->pp_pmcs[ri].pp_pmcval += tmp;
1391 mtx_pool_unlock_spin(pmc_mtxpool, pm);
1392
1393 if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1394 pmclog_process_proccsw(pm, pp, tmp);
1395 }
1396 }
1397
1398 /* mark hardware as free */
1399 md->pmd_config_pmc(cpu, ri, NULL);
1400 }
1401
1402 /*
1403 * perform any other architecture/cpu dependent thread
1404 * switch out functions.
1405 */
1406
1407 (void) (*md->pmd_switch_out)(pc, pp);
1408
1409 critical_exit();
1410}
1411
1412/*
1413 * The 'hook' invoked from the kernel proper
1414 */
1415
1416
1417#if DEBUG
1418const char *pmc_hooknames[] = {
1419 "",
1420 "EXIT",
1421 "EXEC",
1422 "FORK",
1423 "CSW-IN",
1424 "CSW-OUT",
1425 "SAMPLE"
1426};
1427#endif
1428
1429static int
1430pmc_hook_handler(struct thread *td, int function, void *arg)
1431{
1432
1433 PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
1434 pmc_hooknames[function], arg);
1435
1436 switch (function)
1437 {
1438
1439 /*
1440 * Process exec()
1441 */
1442
1443 case PMC_FN_PROCESS_EXEC:
1444 {
1445 char *fullpath, *freepath;
1446 unsigned int ri;
1447 int is_using_hwpmcs;
1448 struct pmc *pm;
1449 struct proc *p;
1450 struct pmc_owner *po;
1451 struct pmc_process *pp;
1452 struct pmckern_procexec *pk;
1453
1454 sx_assert(&pmc_sx, SX_XLOCKED);
1455
1456 p = td->td_proc;
1457 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1458
1459 pk = (struct pmckern_procexec *) arg;
1460
1461 /* Inform owners of SS mode PMCs of the exec event. */
1462 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
1463 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1464 pmclog_process_procexec(po, PMC_ID_INVALID,
1465 p->p_pid, pk->pm_entryaddr, fullpath);
1466
1467 PROC_LOCK(p);
1468 is_using_hwpmcs = p->p_flag & P_HWPMC;
1469 PROC_UNLOCK(p);
1470
1471 if (!is_using_hwpmcs) {
1472 if (freepath)
1473 FREE(freepath, M_TEMP);
1474 break;
1475 }
1476
1477 /*
1478 * PMCs are not inherited across an exec(): remove any
1479 * PMCs that this process is the owner of.
1480 */
1481
1482 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1483 pmc_remove_owner(po);
1484 pmc_destroy_owner_descriptor(po);
1485 }
1486
1487 /*
1488 * If this process is the target of a PMC, check if the new
1489 * credentials are compatible with the owner's permissions.
1490 */
1491
1492 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1493 break;
1494
1495 /*
1496 * Log the exec event to all monitoring owners. Skip
1497 * owners who have already recieved the event because
1498 * the have system sampling PMCs active.
1499 */
1500 for (ri = 0; ri < md->pmd_npmc; ri++)
1501 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1502 po = pm->pm_owner;
1503 if (po->po_sscount == 0 &&
1504 po->po_flags & PMC_PO_OWNS_LOGFILE)
1505 pmclog_process_procexec(po, pm->pm_id,
1506 p->p_pid, pk->pm_entryaddr,
1507 fullpath);
1508 }
1509
1510 if (freepath)
1511 FREE(freepath, M_TEMP);
1512
1513
1514 PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1515 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1516
1517 if (pk->pm_credentialschanged == 0) /* no change */
1518 break;
1519
1520 /*
1521 * If the newly exec()'ed process has a different credential
1522 * than before, allow it to be the target of a PMC only if
1523 * the PMC's owner has sufficient priviledge.
1524 */
1525
1526 for (ri = 0; ri < md->pmd_npmc; ri++)
1527 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
1528 if (pmc_can_attach(pm, td->td_proc) != 0)
1529 pmc_detach_one_process(td->td_proc,
1530 pm, PMC_FLAG_NONE);
1531
1532 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt < (int) md->pmd_npmc,
1533 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
1534 pp->pp_refcnt, pp));
1535
1536 /*
1537 * If this process is no longer the target of any
1538 * PMCs, we can remove the process entry and free
1539 * up space.
1540 */
1541
1542 if (pp->pp_refcnt == 0) {
1543 pmc_remove_process_descriptor(pp);
1544 FREE(pp, M_PMC);
1545 break;
1546 }
1547
1548 }
1549 break;
1550
1551 case PMC_FN_CSW_IN:
1552 pmc_process_csw_in(td);
1553 break;
1554
1555 case PMC_FN_CSW_OUT:
1556 pmc_process_csw_out(td);
1557 break;
1558
1559 /*
1560 * Process accumulated PC samples.
1561 *
1562 * This function is expected to be called by hardclock() for
1563 * each CPU that has accumulated PC samples.
1564 *
1565 * This function is to be executed on the CPU whose samples
1566 * are being processed.
1567 */
1568 case PMC_FN_DO_SAMPLES:
1569
1570 /*
1571 * Clear the cpu specific bit in the CPU mask before
1572 * do the rest of the processing. If the NMI handler
1573 * gets invoked after the "atomic_clear_int()" call
1574 * below but before "pmc_process_samples()" gets
1575 * around to processing the interrupt, then we will
1576 * come back here at the next hardclock() tick (and
1577 * may find nothing to do if "pmc_process_samples()"
1578 * had already processed the interrupt). We don't
1579 * lose the interrupt sample.
1580 */
1581 atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
1582 pmc_process_samples(PCPU_GET(cpuid));
1583 break;
1584
1585 default:
1586#if DEBUG
1587 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
1588#endif
1589 break;
1590
1591 }
1592
1593 return 0;
1594}
1595
1596/*
1597 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
1598 */
1599
1600static struct pmc_owner *
1601pmc_allocate_owner_descriptor(struct proc *p)
1602{
1603 uint32_t hindex;
1604 struct pmc_owner *po;
1605 struct pmc_ownerhash *poh;
1606
1607 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1608 poh = &pmc_ownerhash[hindex];
1609
1610 /* allocate space for N pointers and one descriptor struct */
1611 MALLOC(po, struct pmc_owner *, sizeof(struct pmc_owner),
1612 M_PMC, M_ZERO|M_WAITOK);
1613
1614 po->po_sscount = po->po_error = po->po_flags = 0;
1615 po->po_file = NULL;
1616 po->po_owner = p;
1617 po->po_kthread = NULL;
1618 LIST_INIT(&po->po_pmcs);
1619 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
1620
1621 TAILQ_INIT(&po->po_logbuffers);
1622 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc", MTX_SPIN);
1623
1624 PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
1625 p, p->p_pid, p->p_comm, po);
1626
1627 return po;
1628}
1629
1630static void
1631pmc_destroy_owner_descriptor(struct pmc_owner *po)
1632{
1633
1634 PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
1635 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
1636
1637 mtx_destroy(&po->po_mtx);
1638 FREE(po, M_PMC);
1639}
1640
1641/*
1642 * find the descriptor corresponding to process 'p', adding or removing it
1643 * as specified by 'mode'.
1644 */
1645
1646static struct pmc_process *
1647pmc_find_process_descriptor(struct proc *p, uint32_t mode)
1648{
1649 uint32_t hindex;
1650 struct pmc_process *pp, *ppnew;
1651 struct pmc_processhash *pph;
1652
1653 hindex = PMC_HASH_PTR(p, pmc_processhashmask);
1654 pph = &pmc_processhash[hindex];
1655
1656 ppnew = NULL;
1657
1658 /*
1659 * Pre-allocate memory in the FIND_ALLOCATE case since we
1660 * cannot call malloc(9) once we hold a spin lock.
1661 */
1662
1663 if (mode & PMC_FLAG_ALLOCATE) {
1664 /* allocate additional space for 'n' pmc pointers */
1665 MALLOC(ppnew, struct pmc_process *,
1666 sizeof(struct pmc_process) + md->pmd_npmc *
1667 sizeof(struct pmc_targetstate), M_PMC, M_ZERO|M_WAITOK);
1668 }
1669
1670 mtx_lock_spin(&pmc_processhash_mtx);
1671 LIST_FOREACH(pp, pph, pp_next)
1672 if (pp->pp_proc == p)
1673 break;
1674
1675 if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
1676 LIST_REMOVE(pp, pp_next);
1677
1678 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
1679 ppnew != NULL) {
1680 ppnew->pp_proc = p;
1681 LIST_INSERT_HEAD(pph, ppnew, pp_next);
1682 pp = ppnew;
1683 ppnew = NULL;
1684 }
1685 mtx_unlock_spin(&pmc_processhash_mtx);
1686
1687 if (pp != NULL && ppnew != NULL)
1688 FREE(ppnew, M_PMC);
1689
1690 return pp;
1691}
1692
1693/*
1694 * remove a process descriptor from the process hash table.
1695 */
1696
1697static void
1698pmc_remove_process_descriptor(struct pmc_process *pp)
1699{
1700 KASSERT(pp->pp_refcnt == 0,
1701 ("[pmc,%d] Removing process descriptor %p with count %d",
1702 __LINE__, pp, pp->pp_refcnt));
1703
1704 mtx_lock_spin(&pmc_processhash_mtx);
1705 LIST_REMOVE(pp, pp_next);
1706 mtx_unlock_spin(&pmc_processhash_mtx);
1707}
1708
1709
1710/*
1711 * find an owner descriptor corresponding to proc 'p'
1712 */
1713
1714static struct pmc_owner *
1715pmc_find_owner_descriptor(struct proc *p)
1716{
1717 uint32_t hindex;
1718 struct pmc_owner *po;
1719 struct pmc_ownerhash *poh;
1720
1721 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
1722 poh = &pmc_ownerhash[hindex];
1723
1724 po = NULL;
1725 LIST_FOREACH(po, poh, po_next)
1726 if (po->po_owner == p)
1727 break;
1728
1729 PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
1730 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
1731
1732 return po;
1733}
1734
1735/*
1736 * pmc_allocate_pmc_descriptor
1737 *
1738 * Allocate a pmc descriptor and initialize its
1739 * fields.
1740 */
1741
1742static struct pmc *
1743pmc_allocate_pmc_descriptor(void)
1744{
1745 struct pmc *pmc;
1746
1747 MALLOC(pmc, struct pmc *, sizeof(struct pmc), M_PMC, M_ZERO|M_WAITOK);
1748
1749 if (pmc != NULL) {
1750 pmc->pm_owner = NULL;
1751 LIST_INIT(&pmc->pm_targets);
1752 }
1753
1754 PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
1755
1756 return pmc;
1757}
1758
1759/*
1760 * Destroy a pmc descriptor.
1761 */
1762
1763static void
1764pmc_destroy_pmc_descriptor(struct pmc *pm)
1765{
1766 (void) pm;
1767
1768#if DEBUG
1769 KASSERT(pm->pm_state == PMC_STATE_DELETED ||
1770 pm->pm_state == PMC_STATE_FREE,
1771 ("[pmc,%d] destroying non-deleted PMC", __LINE__));
1772 KASSERT(LIST_EMPTY(&pm->pm_targets),
1773 ("[pmc,%d] destroying pmc with targets", __LINE__));
1774 KASSERT(pm->pm_owner == NULL,
1775 ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
1776 KASSERT(pm->pm_runcount == 0,
1777 ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
1778 pm->pm_runcount));
1779#endif
1780}
1781
1782static void
1783pmc_wait_for_pmc_idle(struct pmc *pm)
1784{
1785#if DEBUG
1786 volatile int maxloop;
1787
1788 maxloop = 100 * mp_ncpus;
1789#endif
1790
1791 /*
1792 * Loop (with a forced context switch) till the PMC's runcount
1793 * comes down to zero.
1794 */
1795 while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
1796#if DEBUG
1797 maxloop--;
1798 KASSERT(maxloop > 0,
1799 ("[pmc,%d] (ri%d, rc%d) waiting too long for "
1800 "pmc to be free", __LINE__,
1801 PMC_TO_ROWINDEX(pm), pm->pm_runcount));
1802#endif
1803 pmc_force_context_switch();
1804 }
1805}
1806
1807/*
1808 * This function does the following things:
1809 *
1810 * - detaches the PMC from hardware
1811 * - unlinks all target threads that were attached to it
1812 * - removes the PMC from its owner's list
1813 * - destroy's the PMC private mutex
1814 *
1815 * Once this function completes, the given pmc pointer can be safely
1816 * FREE'd by the caller.
1817 */
1818
1819static void
1820pmc_release_pmc_descriptor(struct pmc *pm)
1821{
1822 u_int ri, cpu;
1823 enum pmc_mode mode;
1824 struct pmc_hw *phw;
1825 struct pmc_owner *po;
1826 struct pmc_process *pp;
1827 struct pmc_target *ptgt, *tmp;
1828 struct pmc_binding pb;
1829
1830 sx_assert(&pmc_sx, SX_XLOCKED);
1831
1832 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
1833
1834 ri = PMC_TO_ROWINDEX(pm);
1835 mode = PMC_TO_MODE(pm);
1836
1837 PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
1838 mode);
1839
1840 /*
1841 * First, we take the PMC off hardware.
1842 */
1843 cpu = 0;
1844 if (PMC_IS_SYSTEM_MODE(mode)) {
1845
1846 /*
1847 * A system mode PMC runs on a specific CPU. Switch
1848 * to this CPU and turn hardware off.
1849 */
1850 pmc_save_cpu_binding(&pb);
1851
1852 cpu = PMC_TO_CPU(pm);
1853
1854 pmc_select_cpu(cpu);
1855
1856 /* switch off non-stalled CPUs */
1857 if (pm->pm_state == PMC_STATE_RUNNING &&
1858 (pm->pm_flags & PMC_F_IS_STALLED) == 0) {
1858 pm->pm_stalled == 0) {
1859
1860 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
1861
1862 KASSERT(phw->phw_pmc == pm,
1863 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
1864 __LINE__, ri, phw->phw_pmc, pm));
1865 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
1866
1867 critical_enter();
1868 md->pmd_stop_pmc(cpu, ri);
1869 critical_exit();
1870 }
1871
1872 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
1873
1874 critical_enter();
1875 md->pmd_config_pmc(cpu, ri, NULL);
1876 critical_exit();
1877
1878 /* adjust the global and process count of SS mode PMCs */
1879 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
1880 po = pm->pm_owner;
1881 po->po_sscount--;
1882 if (po->po_sscount == 0) {
1883 atomic_subtract_rel_int(&pmc_ss_count, 1);
1884 LIST_REMOVE(po, po_ssnext);
1885 }
1886 }
1887
1888 pm->pm_state = PMC_STATE_DELETED;
1889
1890 pmc_restore_cpu_binding(&pb);
1891
1892 /*
1893 * We could have references to this PMC structure in
1894 * the per-cpu sample queues. Wait for the queue to
1895 * drain.
1896 */
1897 pmc_wait_for_pmc_idle(pm);
1898
1899 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
1900
1901 /*
1902 * A virtual PMC could be running on multiple CPUs at
1903 * a given instant.
1904 *
1905 * By marking its state as DELETED, we ensure that
1906 * this PMC is never further scheduled on hardware.
1907 *
1908 * Then we wait till all CPUs are done with this PMC.
1909 */
1910 pm->pm_state = PMC_STATE_DELETED;
1911
1912
1913 /* Wait for the PMCs runcount to come to zero. */
1914 pmc_wait_for_pmc_idle(pm);
1915
1916 /*
1917 * At this point the PMC is off all CPUs and cannot be
1918 * freshly scheduled onto a CPU. It is now safe to
1919 * unlink all targets from this PMC. If a
1920 * process-record's refcount falls to zero, we remove
1921 * it from the hash table. The module-wide SX lock
1922 * protects us from races.
1923 */
1924 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
1925 pp = ptgt->pt_process;
1926 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
1927
1928 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
1929
1930 /*
1931 * If the target process record shows that no
1932 * PMCs are attached to it, reclaim its space.
1933 */
1934
1935 if (pp->pp_refcnt == 0) {
1936 pmc_remove_process_descriptor(pp);
1937 FREE(pp, M_PMC);
1938 }
1939 }
1940
1941 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
1942
1943 }
1944
1945 /*
1946 * Release any MD resources
1947 */
1948
1949 (void) md->pmd_release_pmc(cpu, ri, pm);
1950
1951 /*
1952 * Update row disposition
1953 */
1954
1955 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
1956 PMC_UNMARK_ROW_STANDALONE(ri);
1957 else
1958 PMC_UNMARK_ROW_THREAD(ri);
1959
1960 /* unlink from the owner's list */
1961 if (pm->pm_owner) {
1962 LIST_REMOVE(pm, pm_next);
1963 pm->pm_owner = NULL;
1964 }
1965
1966 pmc_destroy_pmc_descriptor(pm);
1967}
1968
1969/*
1970 * Register an owner and a pmc.
1971 */
1972
1973static int
1974pmc_register_owner(struct proc *p, struct pmc *pmc)
1975{
1976 struct pmc_owner *po;
1977
1978 sx_assert(&pmc_sx, SX_XLOCKED);
1979
1980 if ((po = pmc_find_owner_descriptor(p)) == NULL)
1981 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
1982 return ENOMEM;
1983
1984 KASSERT(pmc->pm_owner == NULL,
1985 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
1986 pmc->pm_owner = po;
1987
1988 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
1989
1990 PROC_LOCK(p);
1991 p->p_flag |= P_HWPMC;
1992 PROC_UNLOCK(p);
1993
1994 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1995 pmclog_process_pmcallocate(pmc);
1996
1997 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
1998 po, pmc);
1999
2000 return 0;
2001}
2002
2003/*
2004 * Return the current row disposition:
2005 * == 0 => FREE
2006 * > 0 => PROCESS MODE
2007 * < 0 => SYSTEM MODE
2008 */
2009
2010int
2011pmc_getrowdisp(int ri)
2012{
2013 return pmc_pmcdisp[ri];
2014}
2015
2016/*
2017 * Check if a PMC at row index 'ri' can be allocated to the current
2018 * process.
2019 *
2020 * Allocation can fail if:
2021 * - the current process is already being profiled by a PMC at index 'ri',
2022 * attached to it via OP_PMCATTACH.
2023 * - the current process has already allocated a PMC at index 'ri'
2024 * via OP_ALLOCATE.
2025 */
2026
2027static int
2028pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2029{
2030 enum pmc_mode mode;
2031 struct pmc *pm;
2032 struct pmc_owner *po;
2033 struct pmc_process *pp;
2034
2035 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2036 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2037
2038 /*
2039 * We shouldn't have already allocated a process-mode PMC at
2040 * row index 'ri'.
2041 *
2042 * We shouldn't have allocated a system-wide PMC on the same
2043 * CPU and same RI.
2044 */
2045 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2046 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2047 if (PMC_TO_ROWINDEX(pm) == ri) {
2048 mode = PMC_TO_MODE(pm);
2049 if (PMC_IS_VIRTUAL_MODE(mode))
2050 return EEXIST;
2051 if (PMC_IS_SYSTEM_MODE(mode) &&
2052 (int) PMC_TO_CPU(pm) == cpu)
2053 return EEXIST;
2054 }
2055 }
2056
2057 /*
2058 * We also shouldn't be the target of any PMC at this index
2059 * since otherwise a PMC_ATTACH to ourselves will fail.
2060 */
2061 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2062 if (pp->pp_pmcs[ri].pp_pmc)
2063 return EEXIST;
2064
2065 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2066 p, p->p_pid, p->p_comm, ri);
2067
2068 return 0;
2069}
2070
2071/*
2072 * Check if a given PMC at row index 'ri' can be currently used in
2073 * mode 'mode'.
2074 */
2075
2076static int
2077pmc_can_allocate_row(int ri, enum pmc_mode mode)
2078{
2079 enum pmc_disp disp;
2080
2081 sx_assert(&pmc_sx, SX_XLOCKED);
2082
2083 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2084
2085 if (PMC_IS_SYSTEM_MODE(mode))
2086 disp = PMC_DISP_STANDALONE;
2087 else
2088 disp = PMC_DISP_THREAD;
2089
2090 /*
2091 * check disposition for PMC row 'ri':
2092 *
2093 * Expected disposition Row-disposition Result
2094 *
2095 * STANDALONE STANDALONE or FREE proceed
2096 * STANDALONE THREAD fail
2097 * THREAD THREAD or FREE proceed
2098 * THREAD STANDALONE fail
2099 */
2100
2101 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2102 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2103 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2104 return EBUSY;
2105
2106 /*
2107 * All OK
2108 */
2109
2110 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2111
2112 return 0;
2113
2114}
2115
2116/*
2117 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2118 */
2119
2120static struct pmc *
2121pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2122{
2123 struct pmc *pm;
2124
2125 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2126 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2127 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2128
2129 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2130 if (pm->pm_id == pmcid)
2131 return pm;
2132
2133 return NULL;
2134}
2135
2136static int
2137pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2138{
2139
2140 struct pmc *pm;
2141 struct pmc_owner *po;
2142
2143 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2144
2145 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2146 return ESRCH;
2147
2148 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2149 return EINVAL;
2150
2151 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2152
2153 *pmc = pm;
2154 return 0;
2155}
2156
2157/*
2158 * Start a PMC.
2159 */
2160
2161static int
2162pmc_start(struct pmc *pm)
2163{
2164 int error, cpu, ri;
2165 enum pmc_mode mode;
2166 struct pmc_owner *po;
2167 struct pmc_binding pb;
2168
2169 KASSERT(pm != NULL,
2170 ("[pmc,%d] null pm", __LINE__));
2171
2172 mode = PMC_TO_MODE(pm);
2173 ri = PMC_TO_ROWINDEX(pm);
2174 error = 0;
2175
2176 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2177
2178 po = pm->pm_owner;
2179
2180 if (PMC_IS_VIRTUAL_MODE(mode)) {
2181
2182 /*
2183 * If a PMCATTACH has never been done on this PMC,
2184 * attach it to its owner process.
2185 */
2186
2187 if (LIST_EMPTY(&pm->pm_targets))
2188 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2189 pmc_attach_process(po->po_owner, pm);
2190
2191 /*
2192 * Disallow PMCSTART if a logfile is required but has not
2193 * been configured yet.
2194 */
2195
2196 if (error == 0 && (pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2197 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2198 error = EDOOFUS;
2199
2200 /*
2201 * If the PMC is attached to its owner, then force a context
2202 * switch to ensure that the MD state gets set correctly.
2203 */
2204
2205 if (error == 0) {
2206 pm->pm_state = PMC_STATE_RUNNING;
2207 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2208 pmc_force_context_switch();
2209 }
2210
2211 return error;
2212 }
2213
2214
2215 /*
2216 * A system-wide PMC.
2217 */
2218
2219 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2220 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2221 return EDOOFUS; /* programming error */
2222
2223 /*
2224 * Add the owner to the global list if this is a system-wide
2225 * sampling PMC.
2226 */
2227
2228 if (mode == PMC_MODE_SS) {
2229 if (po->po_sscount == 0) {
2230 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2231 atomic_add_rel_int(&pmc_ss_count, 1);
2232 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2233 }
2234 po->po_sscount++;
2235 }
2236
2237 /*
2238 * Move to the CPU associated with this
2239 * PMC, and start the hardware.
2240 */
2241
2242 pmc_save_cpu_binding(&pb);
2243
2244 cpu = PMC_TO_CPU(pm);
2245
2246 if (pmc_cpu_is_disabled(cpu))
2247 return ENXIO;
2248
2249 pmc_select_cpu(cpu);
2250
2251 /*
2252 * global PMCs are configured at allocation time
2253 * so write out the initial value and start the PMC.
2254 */
2255
2256 pm->pm_state = PMC_STATE_RUNNING;
2257
2258 critical_enter();
2259 if ((error = md->pmd_write_pmc(cpu, ri,
2260 PMC_IS_SAMPLING_MODE(mode) ?
2261 pm->pm_sc.pm_reloadcount :
2262 pm->pm_sc.pm_initial)) == 0)
2263 error = md->pmd_start_pmc(cpu, ri);
2264 critical_exit();
2265
2266 pmc_restore_cpu_binding(&pb);
2267
2268 return error;
2269}
2270
2271/*
2272 * Stop a PMC.
2273 */
2274
2275static int
2276pmc_stop(struct pmc *pm)
2277{
2278 int cpu, error, ri;
2279 struct pmc_owner *po;
2280 struct pmc_binding pb;
2281
2282 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2283
2284 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2285 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2286
2287 pm->pm_state = PMC_STATE_STOPPED;
2288
2289 /*
2290 * If the PMC is a virtual mode one, changing the state to
2291 * non-RUNNING is enough to ensure that the PMC never gets
2292 * scheduled.
2293 *
2294 * If this PMC is current running on a CPU, then it will
2295 * handled correctly at the time its target process is context
2296 * switched out.
2297 */
2298
2299 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2300 return 0;
2301
2302 /*
2303 * A system-mode PMC. Move to the CPU associated with
2304 * this PMC, and stop the hardware. We update the
2305 * 'initial count' so that a subsequent PMCSTART will
2306 * resume counting from the current hardware count.
2307 */
2308
2309 pmc_save_cpu_binding(&pb);
2310
2311 cpu = PMC_TO_CPU(pm);
2312
2313 KASSERT(cpu >= 0 && cpu < mp_ncpus,
2314 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2315
2316 if (pmc_cpu_is_disabled(cpu))
2317 return ENXIO;
2318
2319 pmc_select_cpu(cpu);
2320
2321 ri = PMC_TO_ROWINDEX(pm);
2322
2323 critical_enter();
2324 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0)
2325 error = md->pmd_read_pmc(cpu, ri, &pm->pm_sc.pm_initial);
2326 critical_exit();
2327
2328 pmc_restore_cpu_binding(&pb);
2329
2330 po = pm->pm_owner;
2331
2332 /* remove this owner from the global list of SS PMC owners */
2333 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2334 po->po_sscount--;
2335 if (po->po_sscount == 0) {
2336 atomic_subtract_rel_int(&pmc_ss_count, 1);
2337 LIST_REMOVE(po, po_ssnext);
2338 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2339 }
2340 }
2341
2342 return error;
2343}
2344
2345
2346#if DEBUG
2347static const char *pmc_op_to_name[] = {
2348#undef __PMC_OP
2349#define __PMC_OP(N, D) #N ,
2350 __PMC_OPS()
2351 NULL
2352};
2353#endif
2354
2355/*
2356 * The syscall interface
2357 */
2358
2359#define PMC_GET_SX_XLOCK(...) do { \
2360 sx_xlock(&pmc_sx); \
2361 if (pmc_hook == NULL) { \
2362 sx_xunlock(&pmc_sx); \
2363 return __VA_ARGS__; \
2364 } \
2365} while (0)
2366
2367#define PMC_DOWNGRADE_SX() do { \
2368 sx_downgrade(&pmc_sx); \
2369 is_sx_downgraded = 1; \
2370} while (0)
2371
2372static int
2373pmc_syscall_handler(struct thread *td, void *syscall_args)
2374{
2375 int error, is_sx_downgraded, op;
2376 struct pmc_syscall_args *c;
2377 void *arg;
2378
2379 PMC_GET_SX_XLOCK(ENOSYS);
2380
2381 DROP_GIANT();
2382
2383 is_sx_downgraded = 0;
2384
2385 c = (struct pmc_syscall_args *) syscall_args;
2386
2387 op = c->pmop_code;
2388 arg = c->pmop_data;
2389
2390 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2391 pmc_op_to_name[op], arg);
2392
2393 error = 0;
2394 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2395
2396 switch(op)
2397 {
2398
2399
2400 /*
2401 * Configure a log file.
2402 *
2403 * XXX This OP will be reworked.
2404 */
2405
2406 case PMC_OP_CONFIGURELOG:
2407 {
2408 struct pmc_owner *po;
2409 struct pmc_op_configurelog cl;
2410 struct proc *p;
2411
2412 sx_assert(&pmc_sx, SX_XLOCKED);
2413
2414 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2415 break;
2416
2417 /* mark this process as owning a log file */
2418 p = td->td_proc;
2419 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2420 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2421 error = ENOMEM;
2422 break;
2423 }
2424
2425 /*
2426 * If a valid fd was passed in, try to configure that,
2427 * otherwise if 'fd' was less than zero and there was
2428 * a log file configured, flush its buffers and
2429 * de-configure it.
2430 */
2431 if (cl.pm_logfd >= 0)
2432 error = pmclog_configure_log(po, cl.pm_logfd);
2433 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2434 pmclog_process_closelog(po);
2435 error = pmclog_flush(po);
2436 if (error == 0)
2437 error = pmclog_deconfigure_log(po);
2438 } else
2439 error = EINVAL;
2440 }
2441 break;
2442
2443
2444 /*
2445 * Flush a log file.
2446 */
2447
2448 case PMC_OP_FLUSHLOG:
2449 {
2450 struct pmc_owner *po;
2451
2452 sx_assert(&pmc_sx, SX_XLOCKED);
2453
2454 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2455 error = EINVAL;
2456 break;
2457 }
2458
2459 error = pmclog_flush(po);
2460 }
2461 break;
2462
2463 /*
2464 * Retrieve hardware configuration.
2465 */
2466
2467 case PMC_OP_GETCPUINFO: /* CPU information */
2468 {
2469 struct pmc_op_getcpuinfo gci;
2470
2471 gci.pm_cputype = md->pmd_cputype;
2472 gci.pm_ncpu = mp_ncpus;
2473 gci.pm_npmc = md->pmd_npmc;
2474 gci.pm_nclass = md->pmd_nclass;
2475 bcopy(md->pmd_classes, &gci.pm_classes,
2476 sizeof(gci.pm_classes));
2477 error = copyout(&gci, arg, sizeof(gci));
2478 }
2479 break;
2480
2481
2482 /*
2483 * Get module statistics
2484 */
2485
2486 case PMC_OP_GETDRIVERSTATS:
2487 {
2488 struct pmc_op_getdriverstats gms;
2489
2490 bcopy(&pmc_stats, &gms, sizeof(gms));
2491 error = copyout(&gms, arg, sizeof(gms));
2492 }
2493 break;
2494
2495
2496 /*
2497 * Retrieve module version number
2498 */
2499
2500 case PMC_OP_GETMODULEVERSION:
2501 {
2502 uint32_t cv, modv;
2503
2504 /* retrieve the client's idea of the ABI version */
2505 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2506 break;
2507 /* don't service clients newer than our driver */
2508 modv = PMC_VERSION;
2509 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2510 error = EPROGMISMATCH;
2511 break;
2512 }
2513 error = copyout(&modv, arg, sizeof(int));
2514 }
2515 break;
2516
2517
2518 /*
2519 * Retrieve the state of all the PMCs on a given
2520 * CPU.
2521 */
2522
2523 case PMC_OP_GETPMCINFO:
2524 {
2525 uint32_t cpu, n, npmc;
2526 size_t pmcinfo_size;
2527 struct pmc *pm;
2528 struct pmc_info *p, *pmcinfo;
2529 struct pmc_op_getpmcinfo *gpi;
2530 struct pmc_owner *po;
2531 struct pmc_binding pb;
2532
2533 PMC_DOWNGRADE_SX();
2534
2535 gpi = (struct pmc_op_getpmcinfo *) arg;
2536
2537 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2538 break;
2539
2540 if (cpu >= (unsigned int) mp_ncpus) {
2541 error = EINVAL;
2542 break;
2543 }
2544
2545 if (pmc_cpu_is_disabled(cpu)) {
2546 error = ENXIO;
2547 break;
2548 }
2549
2550 /* switch to CPU 'cpu' */
2551 pmc_save_cpu_binding(&pb);
2552 pmc_select_cpu(cpu);
2553
2554 npmc = md->pmd_npmc;
2555
2556 pmcinfo_size = npmc * sizeof(struct pmc_info);
2557 MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
2558 M_WAITOK);
2559
2560 p = pmcinfo;
2561
2562 for (n = 0; n < md->pmd_npmc; n++, p++) {
2563
2564 if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
2565 break;
2566
2567 if (PMC_ROW_DISP_IS_STANDALONE(n))
2568 p->pm_rowdisp = PMC_DISP_STANDALONE;
2569 else if (PMC_ROW_DISP_IS_THREAD(n))
2570 p->pm_rowdisp = PMC_DISP_THREAD;
2571 else
2572 p->pm_rowdisp = PMC_DISP_FREE;
2573
2574 p->pm_ownerpid = -1;
2575
2576 if (pm == NULL) /* no PMC associated */
2577 continue;
2578
2579 po = pm->pm_owner;
2580
2581 KASSERT(po->po_owner != NULL,
2582 ("[pmc,%d] pmc_owner had a null proc pointer",
2583 __LINE__));
2584
2585 p->pm_ownerpid = po->po_owner->p_pid;
2586 p->pm_mode = PMC_TO_MODE(pm);
2587 p->pm_event = pm->pm_event;
2588 p->pm_flags = pm->pm_flags;
2589
2590 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2591 p->pm_reloadcount =
2592 pm->pm_sc.pm_reloadcount;
2593 }
2594
2595 pmc_restore_cpu_binding(&pb);
2596
2597 /* now copy out the PMC info collected */
2598 if (error == 0)
2599 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2600
2601 FREE(pmcinfo, M_PMC);
2602 }
2603 break;
2604
2605
2606 /*
2607 * Set the administrative state of a PMC. I.e. whether
2608 * the PMC is to be used or not.
2609 */
2610
2611 case PMC_OP_PMCADMIN:
2612 {
2613 int cpu, ri;
2614 enum pmc_state request;
2615 struct pmc_cpu *pc;
2616 struct pmc_hw *phw;
2617 struct pmc_op_pmcadmin pma;
2618 struct pmc_binding pb;
2619
2620 sx_assert(&pmc_sx, SX_XLOCKED);
2621
2622 KASSERT(td == curthread,
2623 ("[pmc,%d] td != curthread", __LINE__));
2624
2625 if (suser(td) || jailed(td->td_ucred)) {
2626 error = EPERM;
2627 break;
2628 }
2629
2630 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2631 break;
2632
2633 cpu = pma.pm_cpu;
2634
2635 if (cpu < 0 || cpu >= mp_ncpus) {
2636 error = EINVAL;
2637 break;
2638 }
2639
2640 if (pmc_cpu_is_disabled(cpu)) {
2641 error = ENXIO;
2642 break;
2643 }
2644
2645 request = pma.pm_state;
2646
2647 if (request != PMC_STATE_DISABLED &&
2648 request != PMC_STATE_FREE) {
2649 error = EINVAL;
2650 break;
2651 }
2652
2653 ri = pma.pm_pmc; /* pmc id == row index */
2654 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2655 error = EINVAL;
2656 break;
2657 }
2658
2659 /*
2660 * We can't disable a PMC with a row-index allocated
2661 * for process virtual PMCs.
2662 */
2663
2664 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2665 request == PMC_STATE_DISABLED) {
2666 error = EBUSY;
2667 break;
2668 }
2669
2670 /*
2671 * otherwise, this PMC on this CPU is either free or
2672 * in system-wide mode.
2673 */
2674
2675 pmc_save_cpu_binding(&pb);
2676 pmc_select_cpu(cpu);
2677
2678 pc = pmc_pcpu[cpu];
2679 phw = pc->pc_hwpmcs[ri];
2680
2681 /*
2682 * XXX do we need some kind of 'forced' disable?
2683 */
2684
2685 if (phw->phw_pmc == NULL) {
2686 if (request == PMC_STATE_DISABLED &&
2687 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
2688 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
2689 PMC_MARK_ROW_STANDALONE(ri);
2690 } else if (request == PMC_STATE_FREE &&
2691 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
2692 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
2693 PMC_UNMARK_ROW_STANDALONE(ri);
2694 }
2695 /* other cases are a no-op */
2696 } else
2697 error = EBUSY;
2698
2699 pmc_restore_cpu_binding(&pb);
2700 }
2701 break;
2702
2703
2704 /*
2705 * Allocate a PMC.
2706 */
2707
2708 case PMC_OP_PMCALLOCATE:
2709 {
2710 uint32_t caps;
2711 u_int cpu;
2712 int n;
2713 enum pmc_mode mode;
2714 struct pmc *pmc;
2715 struct pmc_hw *phw;
2716 struct pmc_op_pmcallocate pa;
2717 struct pmc_binding pb;
2718
2719 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
2720 break;
2721
2722 caps = pa.pm_caps;
2723 mode = pa.pm_mode;
2724 cpu = pa.pm_cpu;
2725
2726 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
2727 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
2728 (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
2729 error = EINVAL;
2730 break;
2731 }
2732
2733 /*
2734 * Virtual PMCs should only ask for a default CPU.
2735 * System mode PMCs need to specify a non-default CPU.
2736 */
2737
2738 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
2739 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
2740 error = EINVAL;
2741 break;
2742 }
2743
2744 /*
2745 * Check that a disabled CPU is not being asked for.
2746 */
2747
2748 if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
2749 error = ENXIO;
2750 break;
2751 }
2752
2753 /*
2754 * Refuse an allocation for a system-wide PMC if this
2755 * process has been jailed, or if this process lacks
2756 * super-user credentials and the sysctl tunable
2757 * 'security.bsd.unprivileged_syspmcs' is zero.
2758 */
2759
2760 if (PMC_IS_SYSTEM_MODE(mode)) {
2761 if (jailed(curthread->td_ucred))
2762 error = EPERM;
2763 else if (suser(curthread) &&
2764 (pmc_unprivileged_syspmcs == 0))
2765 error = EPERM;
2766 }
2767
2768 if (error)
2769 break;
2770
2771 /*
2772 * Look for valid values for 'pm_flags'
2773 */
2774
2775 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
2776 PMC_F_LOG_PROCEXIT)) != 0) {
2777 error = EINVAL;
2778 break;
2779 }
2780
2781 /* process logging options are not allowed for system PMCs */
2782 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
2783 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
2784 error = EINVAL;
2785 break;
2786 }
2787
2788 /*
2789 * All sampling mode PMCs need to be able to interrupt the
2790 * CPU.
2791 */
2792
2793 if (PMC_IS_SAMPLING_MODE(mode))
2794 caps |= PMC_CAP_INTERRUPT;
2795
2796 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
2797 pa.pm_ev, caps, mode, cpu);
2798
2799 pmc = pmc_allocate_pmc_descriptor();
2800 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
2801 PMC_ID_INVALID);
2802 pmc->pm_event = pa.pm_ev;
2803 pmc->pm_state = PMC_STATE_FREE;
2804 pmc->pm_caps = caps;
2805 pmc->pm_flags = pa.pm_flags;
2806
2807 /* switch thread to CPU 'cpu' */
2808 pmc_save_cpu_binding(&pb);
2809
2810#define PMC_IS_SHAREABLE_PMC(cpu, n) \
2811 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
2812 PMC_PHW_FLAG_IS_SHAREABLE)
2813#define PMC_IS_UNALLOCATED(cpu, n) \
2814 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
2815
2816 if (PMC_IS_SYSTEM_MODE(mode)) {
2817 pmc_select_cpu(cpu);
2818 for (n = 0; n < (int) md->pmd_npmc; n++)
2819 if (pmc_can_allocate_row(n, mode) == 0 &&
2820 pmc_can_allocate_rowindex(
2821 curthread->td_proc, n, cpu) == 0 &&
2822 (PMC_IS_UNALLOCATED(cpu, n) ||
2823 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
2824 md->pmd_allocate_pmc(cpu, n, pmc,
2825 &pa) == 0)
2826 break;
2827 } else {
2828 /* Process virtual mode */
2829 for (n = 0; n < (int) md->pmd_npmc; n++) {
2830 if (pmc_can_allocate_row(n, mode) == 0 &&
2831 pmc_can_allocate_rowindex(
2832 curthread->td_proc, n,
2833 PMC_CPU_ANY) == 0 &&
2834 md->pmd_allocate_pmc(curthread->td_oncpu,
2835 n, pmc, &pa) == 0)
2836 break;
2837 }
2838 }
2839
2840#undef PMC_IS_UNALLOCATED
2841#undef PMC_IS_SHAREABLE_PMC
2842
2843 pmc_restore_cpu_binding(&pb);
2844
2845 if (n == (int) md->pmd_npmc) {
2846 pmc_destroy_pmc_descriptor(pmc);
2847 FREE(pmc, M_PMC);
2848 pmc = NULL;
2849 error = EINVAL;
2850 break;
2851 }
2852
2853 /* Fill in the correct value in the ID field */
2854 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
2855
2856 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
2857 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
2858
2859 /* Process mode PMCs with logging enabled need log files */
2860 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
2861 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
2862
2863 /* All system mode sampling PMCs require a log file */
2864 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
2865 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
2866
2867 /*
2868 * Configure global pmc's immediately
2869 */
2870
2871 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
2872
2873 pmc_save_cpu_binding(&pb);
2874 pmc_select_cpu(cpu);
2875
2876 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
2877
2878 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
2879 (error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
2880 (void) md->pmd_release_pmc(cpu, n, pmc);
2881 pmc_destroy_pmc_descriptor(pmc);
2882 FREE(pmc, M_PMC);
2883 pmc = NULL;
2884 pmc_restore_cpu_binding(&pb);
2885 error = EPERM;
2886 break;
2887 }
2888
2889 pmc_restore_cpu_binding(&pb);
2890 }
2891
2892 pmc->pm_state = PMC_STATE_ALLOCATED;
2893
2894 /*
2895 * mark row disposition
2896 */
2897
2898 if (PMC_IS_SYSTEM_MODE(mode))
2899 PMC_MARK_ROW_STANDALONE(n);
2900 else
2901 PMC_MARK_ROW_THREAD(n);
2902
2903 /*
2904 * Register this PMC with the current thread as its owner.
2905 */
2906
2907 if ((error =
2908 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
2909 pmc_release_pmc_descriptor(pmc);
2910 FREE(pmc, M_PMC);
2911 pmc = NULL;
2912 break;
2913 }
2914
2915 /*
2916 * Return the allocated index.
2917 */
2918
2919 pa.pm_pmcid = pmc->pm_id;
2920
2921 error = copyout(&pa, arg, sizeof(pa));
2922 }
2923 break;
2924
2925
2926 /*
2927 * Attach a PMC to a process.
2928 */
2929
2930 case PMC_OP_PMCATTACH:
2931 {
2932 struct pmc *pm;
2933 struct proc *p;
2934 struct pmc_op_pmcattach a;
2935
2936 sx_assert(&pmc_sx, SX_XLOCKED);
2937
2938 if ((error = copyin(arg, &a, sizeof(a))) != 0)
2939 break;
2940
2941 if (a.pm_pid < 0) {
2942 error = EINVAL;
2943 break;
2944 } else if (a.pm_pid == 0)
2945 a.pm_pid = td->td_proc->p_pid;
2946
2947 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
2948 break;
2949
2950 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
2951 error = EINVAL;
2952 break;
2953 }
2954
2955 /* PMCs may be (re)attached only when allocated or stopped */
2956 if (pm->pm_state == PMC_STATE_RUNNING) {
2957 error = EBUSY;
2958 break;
2959 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
2960 pm->pm_state != PMC_STATE_STOPPED) {
2961 error = EINVAL;
2962 break;
2963 }
2964
2965 /* lookup pid */
2966 if ((p = pfind(a.pm_pid)) == NULL) {
2967 error = ESRCH;
2968 break;
2969 }
2970
2971 /*
2972 * Ignore processes that are working on exiting.
2973 */
2974 if (p->p_flag & P_WEXIT) {
2975 error = ESRCH;
2976 PROC_UNLOCK(p); /* pfind() returns a locked process */
2977 break;
2978 }
2979
2980 /*
2981 * we are allowed to attach a PMC to a process if
2982 * we can debug it.
2983 */
2984 error = p_candebug(curthread, p);
2985
2986 PROC_UNLOCK(p);
2987
2988 if (error == 0)
2989 error = pmc_attach_process(p, pm);
2990 }
2991 break;
2992
2993
2994 /*
2995 * Detach an attached PMC from a process.
2996 */
2997
2998 case PMC_OP_PMCDETACH:
2999 {
3000 struct pmc *pm;
3001 struct proc *p;
3002 struct pmc_op_pmcattach a;
3003
3004 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3005 break;
3006
3007 if (a.pm_pid < 0) {
3008 error = EINVAL;
3009 break;
3010 } else if (a.pm_pid == 0)
3011 a.pm_pid = td->td_proc->p_pid;
3012
3013 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3014 break;
3015
3016 if ((p = pfind(a.pm_pid)) == NULL) {
3017 error = ESRCH;
3018 break;
3019 }
3020
3021 /*
3022 * Treat processes that are in the process of exiting
3023 * as if they were not present.
3024 */
3025
3026 if (p->p_flag & P_WEXIT)
3027 error = ESRCH;
3028
3029 PROC_UNLOCK(p); /* pfind() returns a locked process */
3030
3031 if (error == 0)
3032 error = pmc_detach_process(p, pm);
3033 }
3034 break;
3035
3036
3037 /*
3038 * Retrieve the MSR number associated with the counter
3039 * 'pmc_id'. This allows processes to directly use RDPMC
3040 * instructions to read their PMCs, without the overhead of a
3041 * system call.
3042 */
3043
3044 case PMC_OP_PMCGETMSR:
3045 {
3046 int ri;
3047 struct pmc *pm;
3048 struct pmc_target *pt;
3049 struct pmc_op_getmsr gm;
3050
3051 PMC_DOWNGRADE_SX();
3052
3053 /* CPU has no 'GETMSR' support */
3054 if (md->pmd_get_msr == NULL) {
3055 error = ENOSYS;
3056 break;
3057 }
3058
3059 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3060 break;
3061
3062 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3063 break;
3064
3065 /*
3066 * The allocated PMC has to be a process virtual PMC,
3067 * i.e., of type MODE_T[CS]. Global PMCs can only be
3068 * read using the PMCREAD operation since they may be
3069 * allocated on a different CPU than the one we could
3070 * be running on at the time of the RDPMC instruction.
3071 *
3072 * The GETMSR operation is not allowed for PMCs that
3073 * are inherited across processes.
3074 */
3075
3076 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3077 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3078 error = EINVAL;
3079 break;
3080 }
3081
3082 /*
3083 * It only makes sense to use a RDPMC (or its
3084 * equivalent instruction on non-x86 architectures) on
3085 * a process that has allocated and attached a PMC to
3086 * itself. Conversely the PMC is only allowed to have
3087 * one process attached to it -- its owner.
3088 */
3089
3090 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3091 LIST_NEXT(pt, pt_next) != NULL ||
3092 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3093 error = EINVAL;
3094 break;
3095 }
3096
3097 ri = PMC_TO_ROWINDEX(pm);
3098
3099 if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
3100 break;
3101
3102 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3103 break;
3104
3105 /*
3106 * Mark our process as using MSRs. Update machine
3107 * state using a forced context switch.
3108 */
3109
3110 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3111 pmc_force_context_switch();
3112
3113 }
3114 break;
3115
3116 /*
3117 * Release an allocated PMC
3118 */
3119
3120 case PMC_OP_PMCRELEASE:
3121 {
3122 pmc_id_t pmcid;
3123 struct pmc *pm;
3124 struct pmc_owner *po;
3125 struct pmc_op_simple sp;
3126
3127 /*
3128 * Find PMC pointer for the named PMC.
3129 *
3130 * Use pmc_release_pmc_descriptor() to switch off the
3131 * PMC, remove all its target threads, and remove the
3132 * PMC from its owner's list.
3133 *
3134 * Remove the owner record if this is the last PMC
3135 * owned.
3136 *
3137 * Free up space.
3138 */
3139
3140 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3141 break;
3142
3143 pmcid = sp.pm_pmcid;
3144
3145 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3146 break;
3147
3148 po = pm->pm_owner;
3149 pmc_release_pmc_descriptor(pm);
3150 pmc_maybe_remove_owner(po);
3151
3152 FREE(pm, M_PMC);
3153 }
3154 break;
3155
3156
3157 /*
3158 * Read and/or write a PMC.
3159 */
3160
3161 case PMC_OP_PMCRW:
3162 {
3163 uint32_t cpu, ri;
3164 struct pmc *pm;
3165 struct pmc_op_pmcrw *pprw;
3166 struct pmc_op_pmcrw prw;
3167 struct pmc_binding pb;
3168 pmc_value_t oldvalue;
3169
3170 PMC_DOWNGRADE_SX();
3171
3172 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3173 break;
3174
3175 ri = 0;
3176 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3177 prw.pm_flags);
3178
3179 /* must have at least one flag set */
3180 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3181 error = EINVAL;
3182 break;
3183 }
3184
3185 /* locate pmc descriptor */
3186 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3187 break;
3188
3189 /* Can't read a PMC that hasn't been started. */
3190 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3191 pm->pm_state != PMC_STATE_STOPPED &&
3192 pm->pm_state != PMC_STATE_RUNNING) {
3193 error = EINVAL;
3194 break;
3195 }
3196
3197 /* writing a new value is allowed only for 'STOPPED' pmcs */
3198 if (pm->pm_state == PMC_STATE_RUNNING &&
3199 (prw.pm_flags & PMC_F_NEWVALUE)) {
3200 error = EBUSY;
3201 break;
3202 }
3203
3204 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3205
3206 /*
3207 * If this PMC is attached to its owner (i.e.,
3208 * the process requesting this operation) and
3209 * is running, then attempt to get an
3210 * upto-date reading from hardware for a READ.
3211 * Writes are only allowed when the PMC is
3212 * stopped, so only update the saved value
3213 * field.
3214 *
3215 * If the PMC is not running, or is not
3216 * attached to its owner, read/write to the
3217 * savedvalue field.
3218 */
3219
3220 ri = PMC_TO_ROWINDEX(pm);
3221
3222 mtx_pool_lock_spin(pmc_mtxpool, pm);
3223 cpu = curthread->td_oncpu;
3224
3225 if (prw.pm_flags & PMC_F_OLDVALUE) {
3226 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3227 (pm->pm_state == PMC_STATE_RUNNING))
3228 error = (*md->pmd_read_pmc)(cpu, ri,
3229 &oldvalue);
3230 else
3231 oldvalue = pm->pm_gv.pm_savedvalue;
3232 }
3233 if (prw.pm_flags & PMC_F_NEWVALUE)
3234 pm->pm_gv.pm_savedvalue = prw.pm_value;
3235
3236 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3237
3238 } else { /* System mode PMCs */
3239 cpu = PMC_TO_CPU(pm);
3240 ri = PMC_TO_ROWINDEX(pm);
3241
3242 if (pmc_cpu_is_disabled(cpu)) {
3243 error = ENXIO;
3244 break;
3245 }
3246
3247 /* move this thread to CPU 'cpu' */
3248 pmc_save_cpu_binding(&pb);
3249 pmc_select_cpu(cpu);
3250
3251 critical_enter();
3252 /* save old value */
3253 if (prw.pm_flags & PMC_F_OLDVALUE)
3254 if ((error = (*md->pmd_read_pmc)(cpu, ri,
3255 &oldvalue)))
3256 goto error;
3257 /* write out new value */
3258 if (prw.pm_flags & PMC_F_NEWVALUE)
3259 error = (*md->pmd_write_pmc)(cpu, ri,
3260 prw.pm_value);
3261 error:
3262 critical_exit();
3263 pmc_restore_cpu_binding(&pb);
3264 if (error)
3265 break;
3266 }
3267
3268 pprw = (struct pmc_op_pmcrw *) arg;
3269
3270#if DEBUG
3271 if (prw.pm_flags & PMC_F_NEWVALUE)
3272 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3273 ri, prw.pm_value, oldvalue);
3274 else
3275 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3276#endif
3277
3278 /* return old value if requested */
3279 if (prw.pm_flags & PMC_F_OLDVALUE)
3280 if ((error = copyout(&oldvalue, &pprw->pm_value,
3281 sizeof(prw.pm_value))))
3282 break;
3283
3284 }
3285 break;
3286
3287
3288 /*
3289 * Set the sampling rate for a sampling mode PMC and the
3290 * initial count for a counting mode PMC.
3291 */
3292
3293 case PMC_OP_PMCSETCOUNT:
3294 {
3295 struct pmc *pm;
3296 struct pmc_op_pmcsetcount sc;
3297
3298 PMC_DOWNGRADE_SX();
3299
3300 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3301 break;
3302
3303 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3304 break;
3305
3306 if (pm->pm_state == PMC_STATE_RUNNING) {
3307 error = EBUSY;
3308 break;
3309 }
3310
3311 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3312 pm->pm_sc.pm_reloadcount = sc.pm_count;
3313 else
3314 pm->pm_sc.pm_initial = sc.pm_count;
3315 }
3316 break;
3317
3318
3319 /*
3320 * Start a PMC.
3321 */
3322
3323 case PMC_OP_PMCSTART:
3324 {
3325 pmc_id_t pmcid;
3326 struct pmc *pm;
3327 struct pmc_op_simple sp;
3328
3329 sx_assert(&pmc_sx, SX_XLOCKED);
3330
3331 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3332 break;
3333
3334 pmcid = sp.pm_pmcid;
3335
3336 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3337 break;
3338
3339 KASSERT(pmcid == pm->pm_id,
3340 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3341 pm->pm_id, pmcid));
3342
3343 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3344 break;
3345 else if (pm->pm_state != PMC_STATE_STOPPED &&
3346 pm->pm_state != PMC_STATE_ALLOCATED) {
3347 error = EINVAL;
3348 break;
3349 }
3350
3351 error = pmc_start(pm);
3352 }
3353 break;
3354
3355
3356 /*
3357 * Stop a PMC.
3358 */
3359
3360 case PMC_OP_PMCSTOP:
3361 {
3362 pmc_id_t pmcid;
3363 struct pmc *pm;
3364 struct pmc_op_simple sp;
3365
3366 PMC_DOWNGRADE_SX();
3367
3368 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3369 break;
3370
3371 pmcid = sp.pm_pmcid;
3372
3373 /*
3374 * Mark the PMC as inactive and invoke the MD stop
3375 * routines if needed.
3376 */
3377
3378 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3379 break;
3380
3381 KASSERT(pmcid == pm->pm_id,
3382 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3383 pm->pm_id, pmcid));
3384
3385 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3386 break;
3387 else if (pm->pm_state != PMC_STATE_RUNNING) {
3388 error = EINVAL;
3389 break;
3390 }
3391
3392 error = pmc_stop(pm);
3393 }
3394 break;
3395
3396
3397 /*
1859
1860 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
1861
1862 KASSERT(phw->phw_pmc == pm,
1863 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
1864 __LINE__, ri, phw->phw_pmc, pm));
1865 PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
1866
1867 critical_enter();
1868 md->pmd_stop_pmc(cpu, ri);
1869 critical_exit();
1870 }
1871
1872 PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
1873
1874 critical_enter();
1875 md->pmd_config_pmc(cpu, ri, NULL);
1876 critical_exit();
1877
1878 /* adjust the global and process count of SS mode PMCs */
1879 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
1880 po = pm->pm_owner;
1881 po->po_sscount--;
1882 if (po->po_sscount == 0) {
1883 atomic_subtract_rel_int(&pmc_ss_count, 1);
1884 LIST_REMOVE(po, po_ssnext);
1885 }
1886 }
1887
1888 pm->pm_state = PMC_STATE_DELETED;
1889
1890 pmc_restore_cpu_binding(&pb);
1891
1892 /*
1893 * We could have references to this PMC structure in
1894 * the per-cpu sample queues. Wait for the queue to
1895 * drain.
1896 */
1897 pmc_wait_for_pmc_idle(pm);
1898
1899 } else if (PMC_IS_VIRTUAL_MODE(mode)) {
1900
1901 /*
1902 * A virtual PMC could be running on multiple CPUs at
1903 * a given instant.
1904 *
1905 * By marking its state as DELETED, we ensure that
1906 * this PMC is never further scheduled on hardware.
1907 *
1908 * Then we wait till all CPUs are done with this PMC.
1909 */
1910 pm->pm_state = PMC_STATE_DELETED;
1911
1912
1913 /* Wait for the PMCs runcount to come to zero. */
1914 pmc_wait_for_pmc_idle(pm);
1915
1916 /*
1917 * At this point the PMC is off all CPUs and cannot be
1918 * freshly scheduled onto a CPU. It is now safe to
1919 * unlink all targets from this PMC. If a
1920 * process-record's refcount falls to zero, we remove
1921 * it from the hash table. The module-wide SX lock
1922 * protects us from races.
1923 */
1924 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
1925 pp = ptgt->pt_process;
1926 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
1927
1928 PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
1929
1930 /*
1931 * If the target process record shows that no
1932 * PMCs are attached to it, reclaim its space.
1933 */
1934
1935 if (pp->pp_refcnt == 0) {
1936 pmc_remove_process_descriptor(pp);
1937 FREE(pp, M_PMC);
1938 }
1939 }
1940
1941 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
1942
1943 }
1944
1945 /*
1946 * Release any MD resources
1947 */
1948
1949 (void) md->pmd_release_pmc(cpu, ri, pm);
1950
1951 /*
1952 * Update row disposition
1953 */
1954
1955 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
1956 PMC_UNMARK_ROW_STANDALONE(ri);
1957 else
1958 PMC_UNMARK_ROW_THREAD(ri);
1959
1960 /* unlink from the owner's list */
1961 if (pm->pm_owner) {
1962 LIST_REMOVE(pm, pm_next);
1963 pm->pm_owner = NULL;
1964 }
1965
1966 pmc_destroy_pmc_descriptor(pm);
1967}
1968
1969/*
1970 * Register an owner and a pmc.
1971 */
1972
1973static int
1974pmc_register_owner(struct proc *p, struct pmc *pmc)
1975{
1976 struct pmc_owner *po;
1977
1978 sx_assert(&pmc_sx, SX_XLOCKED);
1979
1980 if ((po = pmc_find_owner_descriptor(p)) == NULL)
1981 if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
1982 return ENOMEM;
1983
1984 KASSERT(pmc->pm_owner == NULL,
1985 ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
1986 pmc->pm_owner = po;
1987
1988 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
1989
1990 PROC_LOCK(p);
1991 p->p_flag |= P_HWPMC;
1992 PROC_UNLOCK(p);
1993
1994 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1995 pmclog_process_pmcallocate(pmc);
1996
1997 PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
1998 po, pmc);
1999
2000 return 0;
2001}
2002
2003/*
2004 * Return the current row disposition:
2005 * == 0 => FREE
2006 * > 0 => PROCESS MODE
2007 * < 0 => SYSTEM MODE
2008 */
2009
2010int
2011pmc_getrowdisp(int ri)
2012{
2013 return pmc_pmcdisp[ri];
2014}
2015
2016/*
2017 * Check if a PMC at row index 'ri' can be allocated to the current
2018 * process.
2019 *
2020 * Allocation can fail if:
2021 * - the current process is already being profiled by a PMC at index 'ri',
2022 * attached to it via OP_PMCATTACH.
2023 * - the current process has already allocated a PMC at index 'ri'
2024 * via OP_ALLOCATE.
2025 */
2026
2027static int
2028pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2029{
2030 enum pmc_mode mode;
2031 struct pmc *pm;
2032 struct pmc_owner *po;
2033 struct pmc_process *pp;
2034
2035 PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2036 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2037
2038 /*
2039 * We shouldn't have already allocated a process-mode PMC at
2040 * row index 'ri'.
2041 *
2042 * We shouldn't have allocated a system-wide PMC on the same
2043 * CPU and same RI.
2044 */
2045 if ((po = pmc_find_owner_descriptor(p)) != NULL)
2046 LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2047 if (PMC_TO_ROWINDEX(pm) == ri) {
2048 mode = PMC_TO_MODE(pm);
2049 if (PMC_IS_VIRTUAL_MODE(mode))
2050 return EEXIST;
2051 if (PMC_IS_SYSTEM_MODE(mode) &&
2052 (int) PMC_TO_CPU(pm) == cpu)
2053 return EEXIST;
2054 }
2055 }
2056
2057 /*
2058 * We also shouldn't be the target of any PMC at this index
2059 * since otherwise a PMC_ATTACH to ourselves will fail.
2060 */
2061 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2062 if (pp->pp_pmcs[ri].pp_pmc)
2063 return EEXIST;
2064
2065 PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2066 p, p->p_pid, p->p_comm, ri);
2067
2068 return 0;
2069}
2070
2071/*
2072 * Check if a given PMC at row index 'ri' can be currently used in
2073 * mode 'mode'.
2074 */
2075
2076static int
2077pmc_can_allocate_row(int ri, enum pmc_mode mode)
2078{
2079 enum pmc_disp disp;
2080
2081 sx_assert(&pmc_sx, SX_XLOCKED);
2082
2083 PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2084
2085 if (PMC_IS_SYSTEM_MODE(mode))
2086 disp = PMC_DISP_STANDALONE;
2087 else
2088 disp = PMC_DISP_THREAD;
2089
2090 /*
2091 * check disposition for PMC row 'ri':
2092 *
2093 * Expected disposition Row-disposition Result
2094 *
2095 * STANDALONE STANDALONE or FREE proceed
2096 * STANDALONE THREAD fail
2097 * THREAD THREAD or FREE proceed
2098 * THREAD STANDALONE fail
2099 */
2100
2101 if (!PMC_ROW_DISP_IS_FREE(ri) &&
2102 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2103 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2104 return EBUSY;
2105
2106 /*
2107 * All OK
2108 */
2109
2110 PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2111
2112 return 0;
2113
2114}
2115
2116/*
2117 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2118 */
2119
2120static struct pmc *
2121pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2122{
2123 struct pmc *pm;
2124
2125 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
2126 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
2127 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
2128
2129 LIST_FOREACH(pm, &po->po_pmcs, pm_next)
2130 if (pm->pm_id == pmcid)
2131 return pm;
2132
2133 return NULL;
2134}
2135
2136static int
2137pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
2138{
2139
2140 struct pmc *pm;
2141 struct pmc_owner *po;
2142
2143 PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
2144
2145 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
2146 return ESRCH;
2147
2148 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
2149 return EINVAL;
2150
2151 PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
2152
2153 *pmc = pm;
2154 return 0;
2155}
2156
2157/*
2158 * Start a PMC.
2159 */
2160
2161static int
2162pmc_start(struct pmc *pm)
2163{
2164 int error, cpu, ri;
2165 enum pmc_mode mode;
2166 struct pmc_owner *po;
2167 struct pmc_binding pb;
2168
2169 KASSERT(pm != NULL,
2170 ("[pmc,%d] null pm", __LINE__));
2171
2172 mode = PMC_TO_MODE(pm);
2173 ri = PMC_TO_ROWINDEX(pm);
2174 error = 0;
2175
2176 PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
2177
2178 po = pm->pm_owner;
2179
2180 if (PMC_IS_VIRTUAL_MODE(mode)) {
2181
2182 /*
2183 * If a PMCATTACH has never been done on this PMC,
2184 * attach it to its owner process.
2185 */
2186
2187 if (LIST_EMPTY(&pm->pm_targets))
2188 error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
2189 pmc_attach_process(po->po_owner, pm);
2190
2191 /*
2192 * Disallow PMCSTART if a logfile is required but has not
2193 * been configured yet.
2194 */
2195
2196 if (error == 0 && (pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2197 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2198 error = EDOOFUS;
2199
2200 /*
2201 * If the PMC is attached to its owner, then force a context
2202 * switch to ensure that the MD state gets set correctly.
2203 */
2204
2205 if (error == 0) {
2206 pm->pm_state = PMC_STATE_RUNNING;
2207 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
2208 pmc_force_context_switch();
2209 }
2210
2211 return error;
2212 }
2213
2214
2215 /*
2216 * A system-wide PMC.
2217 */
2218
2219 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
2220 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
2221 return EDOOFUS; /* programming error */
2222
2223 /*
2224 * Add the owner to the global list if this is a system-wide
2225 * sampling PMC.
2226 */
2227
2228 if (mode == PMC_MODE_SS) {
2229 if (po->po_sscount == 0) {
2230 LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
2231 atomic_add_rel_int(&pmc_ss_count, 1);
2232 PMCDBG(PMC,OPS,1, "po=%p in global list", po);
2233 }
2234 po->po_sscount++;
2235 }
2236
2237 /*
2238 * Move to the CPU associated with this
2239 * PMC, and start the hardware.
2240 */
2241
2242 pmc_save_cpu_binding(&pb);
2243
2244 cpu = PMC_TO_CPU(pm);
2245
2246 if (pmc_cpu_is_disabled(cpu))
2247 return ENXIO;
2248
2249 pmc_select_cpu(cpu);
2250
2251 /*
2252 * global PMCs are configured at allocation time
2253 * so write out the initial value and start the PMC.
2254 */
2255
2256 pm->pm_state = PMC_STATE_RUNNING;
2257
2258 critical_enter();
2259 if ((error = md->pmd_write_pmc(cpu, ri,
2260 PMC_IS_SAMPLING_MODE(mode) ?
2261 pm->pm_sc.pm_reloadcount :
2262 pm->pm_sc.pm_initial)) == 0)
2263 error = md->pmd_start_pmc(cpu, ri);
2264 critical_exit();
2265
2266 pmc_restore_cpu_binding(&pb);
2267
2268 return error;
2269}
2270
2271/*
2272 * Stop a PMC.
2273 */
2274
2275static int
2276pmc_stop(struct pmc *pm)
2277{
2278 int cpu, error, ri;
2279 struct pmc_owner *po;
2280 struct pmc_binding pb;
2281
2282 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
2283
2284 PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
2285 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
2286
2287 pm->pm_state = PMC_STATE_STOPPED;
2288
2289 /*
2290 * If the PMC is a virtual mode one, changing the state to
2291 * non-RUNNING is enough to ensure that the PMC never gets
2292 * scheduled.
2293 *
2294 * If this PMC is current running on a CPU, then it will
2295 * handled correctly at the time its target process is context
2296 * switched out.
2297 */
2298
2299 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
2300 return 0;
2301
2302 /*
2303 * A system-mode PMC. Move to the CPU associated with
2304 * this PMC, and stop the hardware. We update the
2305 * 'initial count' so that a subsequent PMCSTART will
2306 * resume counting from the current hardware count.
2307 */
2308
2309 pmc_save_cpu_binding(&pb);
2310
2311 cpu = PMC_TO_CPU(pm);
2312
2313 KASSERT(cpu >= 0 && cpu < mp_ncpus,
2314 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
2315
2316 if (pmc_cpu_is_disabled(cpu))
2317 return ENXIO;
2318
2319 pmc_select_cpu(cpu);
2320
2321 ri = PMC_TO_ROWINDEX(pm);
2322
2323 critical_enter();
2324 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0)
2325 error = md->pmd_read_pmc(cpu, ri, &pm->pm_sc.pm_initial);
2326 critical_exit();
2327
2328 pmc_restore_cpu_binding(&pb);
2329
2330 po = pm->pm_owner;
2331
2332 /* remove this owner from the global list of SS PMC owners */
2333 if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
2334 po->po_sscount--;
2335 if (po->po_sscount == 0) {
2336 atomic_subtract_rel_int(&pmc_ss_count, 1);
2337 LIST_REMOVE(po, po_ssnext);
2338 PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
2339 }
2340 }
2341
2342 return error;
2343}
2344
2345
2346#if DEBUG
2347static const char *pmc_op_to_name[] = {
2348#undef __PMC_OP
2349#define __PMC_OP(N, D) #N ,
2350 __PMC_OPS()
2351 NULL
2352};
2353#endif
2354
2355/*
2356 * The syscall interface
2357 */
2358
2359#define PMC_GET_SX_XLOCK(...) do { \
2360 sx_xlock(&pmc_sx); \
2361 if (pmc_hook == NULL) { \
2362 sx_xunlock(&pmc_sx); \
2363 return __VA_ARGS__; \
2364 } \
2365} while (0)
2366
2367#define PMC_DOWNGRADE_SX() do { \
2368 sx_downgrade(&pmc_sx); \
2369 is_sx_downgraded = 1; \
2370} while (0)
2371
2372static int
2373pmc_syscall_handler(struct thread *td, void *syscall_args)
2374{
2375 int error, is_sx_downgraded, op;
2376 struct pmc_syscall_args *c;
2377 void *arg;
2378
2379 PMC_GET_SX_XLOCK(ENOSYS);
2380
2381 DROP_GIANT();
2382
2383 is_sx_downgraded = 0;
2384
2385 c = (struct pmc_syscall_args *) syscall_args;
2386
2387 op = c->pmop_code;
2388 arg = c->pmop_data;
2389
2390 PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
2391 pmc_op_to_name[op], arg);
2392
2393 error = 0;
2394 atomic_add_int(&pmc_stats.pm_syscalls, 1);
2395
2396 switch(op)
2397 {
2398
2399
2400 /*
2401 * Configure a log file.
2402 *
2403 * XXX This OP will be reworked.
2404 */
2405
2406 case PMC_OP_CONFIGURELOG:
2407 {
2408 struct pmc_owner *po;
2409 struct pmc_op_configurelog cl;
2410 struct proc *p;
2411
2412 sx_assert(&pmc_sx, SX_XLOCKED);
2413
2414 if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
2415 break;
2416
2417 /* mark this process as owning a log file */
2418 p = td->td_proc;
2419 if ((po = pmc_find_owner_descriptor(p)) == NULL)
2420 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
2421 error = ENOMEM;
2422 break;
2423 }
2424
2425 /*
2426 * If a valid fd was passed in, try to configure that,
2427 * otherwise if 'fd' was less than zero and there was
2428 * a log file configured, flush its buffers and
2429 * de-configure it.
2430 */
2431 if (cl.pm_logfd >= 0)
2432 error = pmclog_configure_log(po, cl.pm_logfd);
2433 else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
2434 pmclog_process_closelog(po);
2435 error = pmclog_flush(po);
2436 if (error == 0)
2437 error = pmclog_deconfigure_log(po);
2438 } else
2439 error = EINVAL;
2440 }
2441 break;
2442
2443
2444 /*
2445 * Flush a log file.
2446 */
2447
2448 case PMC_OP_FLUSHLOG:
2449 {
2450 struct pmc_owner *po;
2451
2452 sx_assert(&pmc_sx, SX_XLOCKED);
2453
2454 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
2455 error = EINVAL;
2456 break;
2457 }
2458
2459 error = pmclog_flush(po);
2460 }
2461 break;
2462
2463 /*
2464 * Retrieve hardware configuration.
2465 */
2466
2467 case PMC_OP_GETCPUINFO: /* CPU information */
2468 {
2469 struct pmc_op_getcpuinfo gci;
2470
2471 gci.pm_cputype = md->pmd_cputype;
2472 gci.pm_ncpu = mp_ncpus;
2473 gci.pm_npmc = md->pmd_npmc;
2474 gci.pm_nclass = md->pmd_nclass;
2475 bcopy(md->pmd_classes, &gci.pm_classes,
2476 sizeof(gci.pm_classes));
2477 error = copyout(&gci, arg, sizeof(gci));
2478 }
2479 break;
2480
2481
2482 /*
2483 * Get module statistics
2484 */
2485
2486 case PMC_OP_GETDRIVERSTATS:
2487 {
2488 struct pmc_op_getdriverstats gms;
2489
2490 bcopy(&pmc_stats, &gms, sizeof(gms));
2491 error = copyout(&gms, arg, sizeof(gms));
2492 }
2493 break;
2494
2495
2496 /*
2497 * Retrieve module version number
2498 */
2499
2500 case PMC_OP_GETMODULEVERSION:
2501 {
2502 uint32_t cv, modv;
2503
2504 /* retrieve the client's idea of the ABI version */
2505 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
2506 break;
2507 /* don't service clients newer than our driver */
2508 modv = PMC_VERSION;
2509 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
2510 error = EPROGMISMATCH;
2511 break;
2512 }
2513 error = copyout(&modv, arg, sizeof(int));
2514 }
2515 break;
2516
2517
2518 /*
2519 * Retrieve the state of all the PMCs on a given
2520 * CPU.
2521 */
2522
2523 case PMC_OP_GETPMCINFO:
2524 {
2525 uint32_t cpu, n, npmc;
2526 size_t pmcinfo_size;
2527 struct pmc *pm;
2528 struct pmc_info *p, *pmcinfo;
2529 struct pmc_op_getpmcinfo *gpi;
2530 struct pmc_owner *po;
2531 struct pmc_binding pb;
2532
2533 PMC_DOWNGRADE_SX();
2534
2535 gpi = (struct pmc_op_getpmcinfo *) arg;
2536
2537 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
2538 break;
2539
2540 if (cpu >= (unsigned int) mp_ncpus) {
2541 error = EINVAL;
2542 break;
2543 }
2544
2545 if (pmc_cpu_is_disabled(cpu)) {
2546 error = ENXIO;
2547 break;
2548 }
2549
2550 /* switch to CPU 'cpu' */
2551 pmc_save_cpu_binding(&pb);
2552 pmc_select_cpu(cpu);
2553
2554 npmc = md->pmd_npmc;
2555
2556 pmcinfo_size = npmc * sizeof(struct pmc_info);
2557 MALLOC(pmcinfo, struct pmc_info *, pmcinfo_size, M_PMC,
2558 M_WAITOK);
2559
2560 p = pmcinfo;
2561
2562 for (n = 0; n < md->pmd_npmc; n++, p++) {
2563
2564 if ((error = md->pmd_describe(cpu, n, p, &pm)) != 0)
2565 break;
2566
2567 if (PMC_ROW_DISP_IS_STANDALONE(n))
2568 p->pm_rowdisp = PMC_DISP_STANDALONE;
2569 else if (PMC_ROW_DISP_IS_THREAD(n))
2570 p->pm_rowdisp = PMC_DISP_THREAD;
2571 else
2572 p->pm_rowdisp = PMC_DISP_FREE;
2573
2574 p->pm_ownerpid = -1;
2575
2576 if (pm == NULL) /* no PMC associated */
2577 continue;
2578
2579 po = pm->pm_owner;
2580
2581 KASSERT(po->po_owner != NULL,
2582 ("[pmc,%d] pmc_owner had a null proc pointer",
2583 __LINE__));
2584
2585 p->pm_ownerpid = po->po_owner->p_pid;
2586 p->pm_mode = PMC_TO_MODE(pm);
2587 p->pm_event = pm->pm_event;
2588 p->pm_flags = pm->pm_flags;
2589
2590 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
2591 p->pm_reloadcount =
2592 pm->pm_sc.pm_reloadcount;
2593 }
2594
2595 pmc_restore_cpu_binding(&pb);
2596
2597 /* now copy out the PMC info collected */
2598 if (error == 0)
2599 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
2600
2601 FREE(pmcinfo, M_PMC);
2602 }
2603 break;
2604
2605
2606 /*
2607 * Set the administrative state of a PMC. I.e. whether
2608 * the PMC is to be used or not.
2609 */
2610
2611 case PMC_OP_PMCADMIN:
2612 {
2613 int cpu, ri;
2614 enum pmc_state request;
2615 struct pmc_cpu *pc;
2616 struct pmc_hw *phw;
2617 struct pmc_op_pmcadmin pma;
2618 struct pmc_binding pb;
2619
2620 sx_assert(&pmc_sx, SX_XLOCKED);
2621
2622 KASSERT(td == curthread,
2623 ("[pmc,%d] td != curthread", __LINE__));
2624
2625 if (suser(td) || jailed(td->td_ucred)) {
2626 error = EPERM;
2627 break;
2628 }
2629
2630 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
2631 break;
2632
2633 cpu = pma.pm_cpu;
2634
2635 if (cpu < 0 || cpu >= mp_ncpus) {
2636 error = EINVAL;
2637 break;
2638 }
2639
2640 if (pmc_cpu_is_disabled(cpu)) {
2641 error = ENXIO;
2642 break;
2643 }
2644
2645 request = pma.pm_state;
2646
2647 if (request != PMC_STATE_DISABLED &&
2648 request != PMC_STATE_FREE) {
2649 error = EINVAL;
2650 break;
2651 }
2652
2653 ri = pma.pm_pmc; /* pmc id == row index */
2654 if (ri < 0 || ri >= (int) md->pmd_npmc) {
2655 error = EINVAL;
2656 break;
2657 }
2658
2659 /*
2660 * We can't disable a PMC with a row-index allocated
2661 * for process virtual PMCs.
2662 */
2663
2664 if (PMC_ROW_DISP_IS_THREAD(ri) &&
2665 request == PMC_STATE_DISABLED) {
2666 error = EBUSY;
2667 break;
2668 }
2669
2670 /*
2671 * otherwise, this PMC on this CPU is either free or
2672 * in system-wide mode.
2673 */
2674
2675 pmc_save_cpu_binding(&pb);
2676 pmc_select_cpu(cpu);
2677
2678 pc = pmc_pcpu[cpu];
2679 phw = pc->pc_hwpmcs[ri];
2680
2681 /*
2682 * XXX do we need some kind of 'forced' disable?
2683 */
2684
2685 if (phw->phw_pmc == NULL) {
2686 if (request == PMC_STATE_DISABLED &&
2687 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
2688 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
2689 PMC_MARK_ROW_STANDALONE(ri);
2690 } else if (request == PMC_STATE_FREE &&
2691 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
2692 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
2693 PMC_UNMARK_ROW_STANDALONE(ri);
2694 }
2695 /* other cases are a no-op */
2696 } else
2697 error = EBUSY;
2698
2699 pmc_restore_cpu_binding(&pb);
2700 }
2701 break;
2702
2703
2704 /*
2705 * Allocate a PMC.
2706 */
2707
2708 case PMC_OP_PMCALLOCATE:
2709 {
2710 uint32_t caps;
2711 u_int cpu;
2712 int n;
2713 enum pmc_mode mode;
2714 struct pmc *pmc;
2715 struct pmc_hw *phw;
2716 struct pmc_op_pmcallocate pa;
2717 struct pmc_binding pb;
2718
2719 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
2720 break;
2721
2722 caps = pa.pm_caps;
2723 mode = pa.pm_mode;
2724 cpu = pa.pm_cpu;
2725
2726 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
2727 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
2728 (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) {
2729 error = EINVAL;
2730 break;
2731 }
2732
2733 /*
2734 * Virtual PMCs should only ask for a default CPU.
2735 * System mode PMCs need to specify a non-default CPU.
2736 */
2737
2738 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
2739 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
2740 error = EINVAL;
2741 break;
2742 }
2743
2744 /*
2745 * Check that a disabled CPU is not being asked for.
2746 */
2747
2748 if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) {
2749 error = ENXIO;
2750 break;
2751 }
2752
2753 /*
2754 * Refuse an allocation for a system-wide PMC if this
2755 * process has been jailed, or if this process lacks
2756 * super-user credentials and the sysctl tunable
2757 * 'security.bsd.unprivileged_syspmcs' is zero.
2758 */
2759
2760 if (PMC_IS_SYSTEM_MODE(mode)) {
2761 if (jailed(curthread->td_ucred))
2762 error = EPERM;
2763 else if (suser(curthread) &&
2764 (pmc_unprivileged_syspmcs == 0))
2765 error = EPERM;
2766 }
2767
2768 if (error)
2769 break;
2770
2771 /*
2772 * Look for valid values for 'pm_flags'
2773 */
2774
2775 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
2776 PMC_F_LOG_PROCEXIT)) != 0) {
2777 error = EINVAL;
2778 break;
2779 }
2780
2781 /* process logging options are not allowed for system PMCs */
2782 if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
2783 (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
2784 error = EINVAL;
2785 break;
2786 }
2787
2788 /*
2789 * All sampling mode PMCs need to be able to interrupt the
2790 * CPU.
2791 */
2792
2793 if (PMC_IS_SAMPLING_MODE(mode))
2794 caps |= PMC_CAP_INTERRUPT;
2795
2796 PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
2797 pa.pm_ev, caps, mode, cpu);
2798
2799 pmc = pmc_allocate_pmc_descriptor();
2800 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
2801 PMC_ID_INVALID);
2802 pmc->pm_event = pa.pm_ev;
2803 pmc->pm_state = PMC_STATE_FREE;
2804 pmc->pm_caps = caps;
2805 pmc->pm_flags = pa.pm_flags;
2806
2807 /* switch thread to CPU 'cpu' */
2808 pmc_save_cpu_binding(&pb);
2809
2810#define PMC_IS_SHAREABLE_PMC(cpu, n) \
2811 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
2812 PMC_PHW_FLAG_IS_SHAREABLE)
2813#define PMC_IS_UNALLOCATED(cpu, n) \
2814 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
2815
2816 if (PMC_IS_SYSTEM_MODE(mode)) {
2817 pmc_select_cpu(cpu);
2818 for (n = 0; n < (int) md->pmd_npmc; n++)
2819 if (pmc_can_allocate_row(n, mode) == 0 &&
2820 pmc_can_allocate_rowindex(
2821 curthread->td_proc, n, cpu) == 0 &&
2822 (PMC_IS_UNALLOCATED(cpu, n) ||
2823 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
2824 md->pmd_allocate_pmc(cpu, n, pmc,
2825 &pa) == 0)
2826 break;
2827 } else {
2828 /* Process virtual mode */
2829 for (n = 0; n < (int) md->pmd_npmc; n++) {
2830 if (pmc_can_allocate_row(n, mode) == 0 &&
2831 pmc_can_allocate_rowindex(
2832 curthread->td_proc, n,
2833 PMC_CPU_ANY) == 0 &&
2834 md->pmd_allocate_pmc(curthread->td_oncpu,
2835 n, pmc, &pa) == 0)
2836 break;
2837 }
2838 }
2839
2840#undef PMC_IS_UNALLOCATED
2841#undef PMC_IS_SHAREABLE_PMC
2842
2843 pmc_restore_cpu_binding(&pb);
2844
2845 if (n == (int) md->pmd_npmc) {
2846 pmc_destroy_pmc_descriptor(pmc);
2847 FREE(pmc, M_PMC);
2848 pmc = NULL;
2849 error = EINVAL;
2850 break;
2851 }
2852
2853 /* Fill in the correct value in the ID field */
2854 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
2855
2856 PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
2857 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
2858
2859 /* Process mode PMCs with logging enabled need log files */
2860 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
2861 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
2862
2863 /* All system mode sampling PMCs require a log file */
2864 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
2865 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
2866
2867 /*
2868 * Configure global pmc's immediately
2869 */
2870
2871 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
2872
2873 pmc_save_cpu_binding(&pb);
2874 pmc_select_cpu(cpu);
2875
2876 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
2877
2878 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
2879 (error = md->pmd_config_pmc(cpu, n, pmc)) != 0) {
2880 (void) md->pmd_release_pmc(cpu, n, pmc);
2881 pmc_destroy_pmc_descriptor(pmc);
2882 FREE(pmc, M_PMC);
2883 pmc = NULL;
2884 pmc_restore_cpu_binding(&pb);
2885 error = EPERM;
2886 break;
2887 }
2888
2889 pmc_restore_cpu_binding(&pb);
2890 }
2891
2892 pmc->pm_state = PMC_STATE_ALLOCATED;
2893
2894 /*
2895 * mark row disposition
2896 */
2897
2898 if (PMC_IS_SYSTEM_MODE(mode))
2899 PMC_MARK_ROW_STANDALONE(n);
2900 else
2901 PMC_MARK_ROW_THREAD(n);
2902
2903 /*
2904 * Register this PMC with the current thread as its owner.
2905 */
2906
2907 if ((error =
2908 pmc_register_owner(curthread->td_proc, pmc)) != 0) {
2909 pmc_release_pmc_descriptor(pmc);
2910 FREE(pmc, M_PMC);
2911 pmc = NULL;
2912 break;
2913 }
2914
2915 /*
2916 * Return the allocated index.
2917 */
2918
2919 pa.pm_pmcid = pmc->pm_id;
2920
2921 error = copyout(&pa, arg, sizeof(pa));
2922 }
2923 break;
2924
2925
2926 /*
2927 * Attach a PMC to a process.
2928 */
2929
2930 case PMC_OP_PMCATTACH:
2931 {
2932 struct pmc *pm;
2933 struct proc *p;
2934 struct pmc_op_pmcattach a;
2935
2936 sx_assert(&pmc_sx, SX_XLOCKED);
2937
2938 if ((error = copyin(arg, &a, sizeof(a))) != 0)
2939 break;
2940
2941 if (a.pm_pid < 0) {
2942 error = EINVAL;
2943 break;
2944 } else if (a.pm_pid == 0)
2945 a.pm_pid = td->td_proc->p_pid;
2946
2947 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
2948 break;
2949
2950 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
2951 error = EINVAL;
2952 break;
2953 }
2954
2955 /* PMCs may be (re)attached only when allocated or stopped */
2956 if (pm->pm_state == PMC_STATE_RUNNING) {
2957 error = EBUSY;
2958 break;
2959 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
2960 pm->pm_state != PMC_STATE_STOPPED) {
2961 error = EINVAL;
2962 break;
2963 }
2964
2965 /* lookup pid */
2966 if ((p = pfind(a.pm_pid)) == NULL) {
2967 error = ESRCH;
2968 break;
2969 }
2970
2971 /*
2972 * Ignore processes that are working on exiting.
2973 */
2974 if (p->p_flag & P_WEXIT) {
2975 error = ESRCH;
2976 PROC_UNLOCK(p); /* pfind() returns a locked process */
2977 break;
2978 }
2979
2980 /*
2981 * we are allowed to attach a PMC to a process if
2982 * we can debug it.
2983 */
2984 error = p_candebug(curthread, p);
2985
2986 PROC_UNLOCK(p);
2987
2988 if (error == 0)
2989 error = pmc_attach_process(p, pm);
2990 }
2991 break;
2992
2993
2994 /*
2995 * Detach an attached PMC from a process.
2996 */
2997
2998 case PMC_OP_PMCDETACH:
2999 {
3000 struct pmc *pm;
3001 struct proc *p;
3002 struct pmc_op_pmcattach a;
3003
3004 if ((error = copyin(arg, &a, sizeof(a))) != 0)
3005 break;
3006
3007 if (a.pm_pid < 0) {
3008 error = EINVAL;
3009 break;
3010 } else if (a.pm_pid == 0)
3011 a.pm_pid = td->td_proc->p_pid;
3012
3013 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
3014 break;
3015
3016 if ((p = pfind(a.pm_pid)) == NULL) {
3017 error = ESRCH;
3018 break;
3019 }
3020
3021 /*
3022 * Treat processes that are in the process of exiting
3023 * as if they were not present.
3024 */
3025
3026 if (p->p_flag & P_WEXIT)
3027 error = ESRCH;
3028
3029 PROC_UNLOCK(p); /* pfind() returns a locked process */
3030
3031 if (error == 0)
3032 error = pmc_detach_process(p, pm);
3033 }
3034 break;
3035
3036
3037 /*
3038 * Retrieve the MSR number associated with the counter
3039 * 'pmc_id'. This allows processes to directly use RDPMC
3040 * instructions to read their PMCs, without the overhead of a
3041 * system call.
3042 */
3043
3044 case PMC_OP_PMCGETMSR:
3045 {
3046 int ri;
3047 struct pmc *pm;
3048 struct pmc_target *pt;
3049 struct pmc_op_getmsr gm;
3050
3051 PMC_DOWNGRADE_SX();
3052
3053 /* CPU has no 'GETMSR' support */
3054 if (md->pmd_get_msr == NULL) {
3055 error = ENOSYS;
3056 break;
3057 }
3058
3059 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
3060 break;
3061
3062 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
3063 break;
3064
3065 /*
3066 * The allocated PMC has to be a process virtual PMC,
3067 * i.e., of type MODE_T[CS]. Global PMCs can only be
3068 * read using the PMCREAD operation since they may be
3069 * allocated on a different CPU than the one we could
3070 * be running on at the time of the RDPMC instruction.
3071 *
3072 * The GETMSR operation is not allowed for PMCs that
3073 * are inherited across processes.
3074 */
3075
3076 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
3077 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3078 error = EINVAL;
3079 break;
3080 }
3081
3082 /*
3083 * It only makes sense to use a RDPMC (or its
3084 * equivalent instruction on non-x86 architectures) on
3085 * a process that has allocated and attached a PMC to
3086 * itself. Conversely the PMC is only allowed to have
3087 * one process attached to it -- its owner.
3088 */
3089
3090 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
3091 LIST_NEXT(pt, pt_next) != NULL ||
3092 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
3093 error = EINVAL;
3094 break;
3095 }
3096
3097 ri = PMC_TO_ROWINDEX(pm);
3098
3099 if ((error = (*md->pmd_get_msr)(ri, &gm.pm_msr)) < 0)
3100 break;
3101
3102 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
3103 break;
3104
3105 /*
3106 * Mark our process as using MSRs. Update machine
3107 * state using a forced context switch.
3108 */
3109
3110 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
3111 pmc_force_context_switch();
3112
3113 }
3114 break;
3115
3116 /*
3117 * Release an allocated PMC
3118 */
3119
3120 case PMC_OP_PMCRELEASE:
3121 {
3122 pmc_id_t pmcid;
3123 struct pmc *pm;
3124 struct pmc_owner *po;
3125 struct pmc_op_simple sp;
3126
3127 /*
3128 * Find PMC pointer for the named PMC.
3129 *
3130 * Use pmc_release_pmc_descriptor() to switch off the
3131 * PMC, remove all its target threads, and remove the
3132 * PMC from its owner's list.
3133 *
3134 * Remove the owner record if this is the last PMC
3135 * owned.
3136 *
3137 * Free up space.
3138 */
3139
3140 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3141 break;
3142
3143 pmcid = sp.pm_pmcid;
3144
3145 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3146 break;
3147
3148 po = pm->pm_owner;
3149 pmc_release_pmc_descriptor(pm);
3150 pmc_maybe_remove_owner(po);
3151
3152 FREE(pm, M_PMC);
3153 }
3154 break;
3155
3156
3157 /*
3158 * Read and/or write a PMC.
3159 */
3160
3161 case PMC_OP_PMCRW:
3162 {
3163 uint32_t cpu, ri;
3164 struct pmc *pm;
3165 struct pmc_op_pmcrw *pprw;
3166 struct pmc_op_pmcrw prw;
3167 struct pmc_binding pb;
3168 pmc_value_t oldvalue;
3169
3170 PMC_DOWNGRADE_SX();
3171
3172 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
3173 break;
3174
3175 ri = 0;
3176 PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
3177 prw.pm_flags);
3178
3179 /* must have at least one flag set */
3180 if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
3181 error = EINVAL;
3182 break;
3183 }
3184
3185 /* locate pmc descriptor */
3186 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
3187 break;
3188
3189 /* Can't read a PMC that hasn't been started. */
3190 if (pm->pm_state != PMC_STATE_ALLOCATED &&
3191 pm->pm_state != PMC_STATE_STOPPED &&
3192 pm->pm_state != PMC_STATE_RUNNING) {
3193 error = EINVAL;
3194 break;
3195 }
3196
3197 /* writing a new value is allowed only for 'STOPPED' pmcs */
3198 if (pm->pm_state == PMC_STATE_RUNNING &&
3199 (prw.pm_flags & PMC_F_NEWVALUE)) {
3200 error = EBUSY;
3201 break;
3202 }
3203
3204 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3205
3206 /*
3207 * If this PMC is attached to its owner (i.e.,
3208 * the process requesting this operation) and
3209 * is running, then attempt to get an
3210 * upto-date reading from hardware for a READ.
3211 * Writes are only allowed when the PMC is
3212 * stopped, so only update the saved value
3213 * field.
3214 *
3215 * If the PMC is not running, or is not
3216 * attached to its owner, read/write to the
3217 * savedvalue field.
3218 */
3219
3220 ri = PMC_TO_ROWINDEX(pm);
3221
3222 mtx_pool_lock_spin(pmc_mtxpool, pm);
3223 cpu = curthread->td_oncpu;
3224
3225 if (prw.pm_flags & PMC_F_OLDVALUE) {
3226 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3227 (pm->pm_state == PMC_STATE_RUNNING))
3228 error = (*md->pmd_read_pmc)(cpu, ri,
3229 &oldvalue);
3230 else
3231 oldvalue = pm->pm_gv.pm_savedvalue;
3232 }
3233 if (prw.pm_flags & PMC_F_NEWVALUE)
3234 pm->pm_gv.pm_savedvalue = prw.pm_value;
3235
3236 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3237
3238 } else { /* System mode PMCs */
3239 cpu = PMC_TO_CPU(pm);
3240 ri = PMC_TO_ROWINDEX(pm);
3241
3242 if (pmc_cpu_is_disabled(cpu)) {
3243 error = ENXIO;
3244 break;
3245 }
3246
3247 /* move this thread to CPU 'cpu' */
3248 pmc_save_cpu_binding(&pb);
3249 pmc_select_cpu(cpu);
3250
3251 critical_enter();
3252 /* save old value */
3253 if (prw.pm_flags & PMC_F_OLDVALUE)
3254 if ((error = (*md->pmd_read_pmc)(cpu, ri,
3255 &oldvalue)))
3256 goto error;
3257 /* write out new value */
3258 if (prw.pm_flags & PMC_F_NEWVALUE)
3259 error = (*md->pmd_write_pmc)(cpu, ri,
3260 prw.pm_value);
3261 error:
3262 critical_exit();
3263 pmc_restore_cpu_binding(&pb);
3264 if (error)
3265 break;
3266 }
3267
3268 pprw = (struct pmc_op_pmcrw *) arg;
3269
3270#if DEBUG
3271 if (prw.pm_flags & PMC_F_NEWVALUE)
3272 PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3273 ri, prw.pm_value, oldvalue);
3274 else
3275 PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
3276#endif
3277
3278 /* return old value if requested */
3279 if (prw.pm_flags & PMC_F_OLDVALUE)
3280 if ((error = copyout(&oldvalue, &pprw->pm_value,
3281 sizeof(prw.pm_value))))
3282 break;
3283
3284 }
3285 break;
3286
3287
3288 /*
3289 * Set the sampling rate for a sampling mode PMC and the
3290 * initial count for a counting mode PMC.
3291 */
3292
3293 case PMC_OP_PMCSETCOUNT:
3294 {
3295 struct pmc *pm;
3296 struct pmc_op_pmcsetcount sc;
3297
3298 PMC_DOWNGRADE_SX();
3299
3300 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
3301 break;
3302
3303 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
3304 break;
3305
3306 if (pm->pm_state == PMC_STATE_RUNNING) {
3307 error = EBUSY;
3308 break;
3309 }
3310
3311 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3312 pm->pm_sc.pm_reloadcount = sc.pm_count;
3313 else
3314 pm->pm_sc.pm_initial = sc.pm_count;
3315 }
3316 break;
3317
3318
3319 /*
3320 * Start a PMC.
3321 */
3322
3323 case PMC_OP_PMCSTART:
3324 {
3325 pmc_id_t pmcid;
3326 struct pmc *pm;
3327 struct pmc_op_simple sp;
3328
3329 sx_assert(&pmc_sx, SX_XLOCKED);
3330
3331 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3332 break;
3333
3334 pmcid = sp.pm_pmcid;
3335
3336 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3337 break;
3338
3339 KASSERT(pmcid == pm->pm_id,
3340 ("[pmc,%d] pmcid %x != id %x", __LINE__,
3341 pm->pm_id, pmcid));
3342
3343 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
3344 break;
3345 else if (pm->pm_state != PMC_STATE_STOPPED &&
3346 pm->pm_state != PMC_STATE_ALLOCATED) {
3347 error = EINVAL;
3348 break;
3349 }
3350
3351 error = pmc_start(pm);
3352 }
3353 break;
3354
3355
3356 /*
3357 * Stop a PMC.
3358 */
3359
3360 case PMC_OP_PMCSTOP:
3361 {
3362 pmc_id_t pmcid;
3363 struct pmc *pm;
3364 struct pmc_op_simple sp;
3365
3366 PMC_DOWNGRADE_SX();
3367
3368 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
3369 break;
3370
3371 pmcid = sp.pm_pmcid;
3372
3373 /*
3374 * Mark the PMC as inactive and invoke the MD stop
3375 * routines if needed.
3376 */
3377
3378 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
3379 break;
3380
3381 KASSERT(pmcid == pm->pm_id,
3382 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
3383 pm->pm_id, pmcid));
3384
3385 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
3386 break;
3387 else if (pm->pm_state != PMC_STATE_RUNNING) {
3388 error = EINVAL;
3389 break;
3390 }
3391
3392 error = pmc_stop(pm);
3393 }
3394 break;
3395
3396
3397 /*
3398 * Flush the per-owner log file and Write a user-entry to the
3399 * log file.
3398 * Write a user supplied value to the log file.
3400 */
3401
3402 case PMC_OP_WRITELOG:
3403 {
3404 struct pmc_op_writelog wl;
3405 struct pmc_owner *po;
3406
3407 PMC_DOWNGRADE_SX();
3408
3409 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3410 break;
3411
3412 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3413 error = EINVAL;
3414 break;
3415 }
3416
3417 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3418 error = EINVAL;
3419 break;
3420 }
3421
3422 error = pmclog_process_userlog(po, &wl);
3423 }
3424 break;
3425
3426
3427 default:
3428 error = EINVAL;
3429 break;
3430 }
3431
3432 if (is_sx_downgraded)
3433 sx_sunlock(&pmc_sx);
3434 else
3435 sx_xunlock(&pmc_sx);
3436
3437 if (error)
3438 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3439
3440 PICKUP_GIANT();
3441
3442 return error;
3443}
3444
3445/*
3446 * Helper functions
3447 */
3448
3449
3450/*
3451 * Interrupt processing.
3452 *
3453 * Find a free slot in the per-cpu array of PC samples and write the
3454 * current (PMC,PID,PC) triple to it. If an event was successfully
3455 * added, a bit is set in mask 'pmc_cpumask' denoting that the
3456 * DO_SAMPLES hook needs to be invoked from the clock handler.
3457 *
3458 * This function is meant to be called from an NMI handler. It cannot
3459 * use any of the locking primitives supplied by the OS.
3460 */
3461
3462int
3463pmc_process_interrupt(int cpu, struct pmc *pm, uintfptr_t pc, int usermode)
3464{
3465 int error, ri;
3466 struct thread *td;
3467 struct pmc_sample *ps;
3468 struct pmc_samplebuffer *psb;
3469
3470 error = 0;
3471 ri = PMC_TO_ROWINDEX(pm);
3472
3473 psb = pmc_pcpu[cpu]->pc_sb;
3474
3475 ps = psb->ps_write;
3476 if (ps->ps_pc) { /* in use, reader hasn't caught up */
3399 */
3400
3401 case PMC_OP_WRITELOG:
3402 {
3403 struct pmc_op_writelog wl;
3404 struct pmc_owner *po;
3405
3406 PMC_DOWNGRADE_SX();
3407
3408 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
3409 break;
3410
3411 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3412 error = EINVAL;
3413 break;
3414 }
3415
3416 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
3417 error = EINVAL;
3418 break;
3419 }
3420
3421 error = pmclog_process_userlog(po, &wl);
3422 }
3423 break;
3424
3425
3426 default:
3427 error = EINVAL;
3428 break;
3429 }
3430
3431 if (is_sx_downgraded)
3432 sx_sunlock(&pmc_sx);
3433 else
3434 sx_xunlock(&pmc_sx);
3435
3436 if (error)
3437 atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
3438
3439 PICKUP_GIANT();
3440
3441 return error;
3442}
3443
3444/*
3445 * Helper functions
3446 */
3447
3448
3449/*
3450 * Interrupt processing.
3451 *
3452 * Find a free slot in the per-cpu array of PC samples and write the
3453 * current (PMC,PID,PC) triple to it. If an event was successfully
3454 * added, a bit is set in mask 'pmc_cpumask' denoting that the
3455 * DO_SAMPLES hook needs to be invoked from the clock handler.
3456 *
3457 * This function is meant to be called from an NMI handler. It cannot
3458 * use any of the locking primitives supplied by the OS.
3459 */
3460
3461int
3462pmc_process_interrupt(int cpu, struct pmc *pm, uintfptr_t pc, int usermode)
3463{
3464 int error, ri;
3465 struct thread *td;
3466 struct pmc_sample *ps;
3467 struct pmc_samplebuffer *psb;
3468
3469 error = 0;
3470 ri = PMC_TO_ROWINDEX(pm);
3471
3472 psb = pmc_pcpu[cpu]->pc_sb;
3473
3474 ps = psb->ps_write;
3475 if (ps->ps_pc) { /* in use, reader hasn't caught up */
3476 pm->pm_stalled = 1;
3477 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3477 atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
3478 atomic_set_int(&pm->pm_flags, PMC_F_IS_STALLED);
3479 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d",
3480 cpu, pm, (uint64_t) pc, usermode,
3481 (int) (psb->ps_write - psb->ps_samples),
3482 (int) (psb->ps_read - psb->ps_samples));
3483 error = ENOMEM;
3484 goto done;
3485 }
3486
3487 /* fill in entry */
3488 PMCDBG(SAM,INT,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu, pm,
3489 (uint64_t) pc, usermode,
3490 (int) (psb->ps_write - psb->ps_samples),
3491 (int) (psb->ps_read - psb->ps_samples));
3492
3493 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3494 ps->ps_pmc = pm;
3495 if ((td = curthread) && td->td_proc)
3496 ps->ps_pid = td->td_proc->p_pid;
3497 else
3498 ps->ps_pid = -1;
3499 ps->ps_usermode = usermode;
3500 ps->ps_pc = pc; /* mark entry as in use */
3501
3502 /* increment write pointer, modulo ring buffer size */
3503 ps++;
3504 if (ps == psb->ps_fence)
3505 psb->ps_write = psb->ps_samples;
3506 else
3507 psb->ps_write = ps;
3508
3509 done:
3510 /* mark CPU as needing processing */
3511 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3512
3513 return error;
3514}
3515
3516
3517/*
3518 * Process saved PC samples.
3519 */
3520
3521static void
3522pmc_process_samples(int cpu)
3523{
3524 int n, ri;
3525 struct pmc *pm;
3526 struct thread *td;
3527 struct pmc_owner *po;
3528 struct pmc_sample *ps;
3529 struct pmc_samplebuffer *psb;
3530
3531 KASSERT(PCPU_GET(cpuid) == cpu,
3532 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
3533 PCPU_GET(cpuid), cpu));
3534
3535 psb = pmc_pcpu[cpu]->pc_sb;
3536
3537 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
3538
3539 ps = psb->ps_read;
3540 if (ps->ps_pc == (uintfptr_t) 0) /* no data */
3541 break;
3542
3543 pm = ps->ps_pmc;
3544 po = pm->pm_owner;
3545
3546 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
3547 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
3548 pm, PMC_TO_MODE(pm)));
3549
3550 /* Ignore PMCs that have been switched off */
3551 if (pm->pm_state != PMC_STATE_RUNNING)
3552 goto entrydone;
3553
3554 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu,
3555 pm, (uint64_t) ps->ps_pc, ps->ps_usermode,
3556 (int) (psb->ps_write - psb->ps_samples),
3557 (int) (psb->ps_read - psb->ps_samples));
3558
3559 /*
3560 * If this is a process-mode PMC that is attached to
3561 * its owner, and if the PC is in user mode, update
3562 * profiling statistics like timer-based profiling
3563 * would have done.
3564 */
3565 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
3566 if (ps->ps_usermode) {
3567 td = FIRST_THREAD_IN_PROC(po->po_owner);
3568 addupc_intr(td, ps->ps_pc, 1);
3569 }
3570 goto entrydone;
3571 }
3572
3573 /*
3574 * Otherwise, this is either a sampling mode PMC that
3575 * is attached to a different process than its owner,
3576 * or a system-wide sampling PMC. Dispatch a log
3577 * entry to the PMC's owner process.
3578 */
3579
3580 pmclog_process_pcsample(pm, ps);
3581
3582 entrydone:
3583 ps->ps_pc = (uintfptr_t) 0; /* mark entry as free */
3584 atomic_subtract_rel_32(&pm->pm_runcount, 1);
3585
3586 /* increment read pointer, modulo sample size */
3587 if (++ps == psb->ps_fence)
3588 psb->ps_read = psb->ps_samples;
3589 else
3590 psb->ps_read = ps;
3591 }
3592
3593 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
3594
3595 /* Do not re-enable stalled PMCs if we failed to process any samples */
3596 if (n == 0)
3597 return;
3598
3599 /*
3600 * Restart any stalled sampling PMCs on this CPU.
3601 *
3478 PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d",
3479 cpu, pm, (uint64_t) pc, usermode,
3480 (int) (psb->ps_write - psb->ps_samples),
3481 (int) (psb->ps_read - psb->ps_samples));
3482 error = ENOMEM;
3483 goto done;
3484 }
3485
3486 /* fill in entry */
3487 PMCDBG(SAM,INT,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu, pm,
3488 (uint64_t) pc, usermode,
3489 (int) (psb->ps_write - psb->ps_samples),
3490 (int) (psb->ps_read - psb->ps_samples));
3491
3492 atomic_add_rel_32(&pm->pm_runcount, 1); /* hold onto PMC */
3493 ps->ps_pmc = pm;
3494 if ((td = curthread) && td->td_proc)
3495 ps->ps_pid = td->td_proc->p_pid;
3496 else
3497 ps->ps_pid = -1;
3498 ps->ps_usermode = usermode;
3499 ps->ps_pc = pc; /* mark entry as in use */
3500
3501 /* increment write pointer, modulo ring buffer size */
3502 ps++;
3503 if (ps == psb->ps_fence)
3504 psb->ps_write = psb->ps_samples;
3505 else
3506 psb->ps_write = ps;
3507
3508 done:
3509 /* mark CPU as needing processing */
3510 atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
3511
3512 return error;
3513}
3514
3515
3516/*
3517 * Process saved PC samples.
3518 */
3519
3520static void
3521pmc_process_samples(int cpu)
3522{
3523 int n, ri;
3524 struct pmc *pm;
3525 struct thread *td;
3526 struct pmc_owner *po;
3527 struct pmc_sample *ps;
3528 struct pmc_samplebuffer *psb;
3529
3530 KASSERT(PCPU_GET(cpuid) == cpu,
3531 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
3532 PCPU_GET(cpuid), cpu));
3533
3534 psb = pmc_pcpu[cpu]->pc_sb;
3535
3536 for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
3537
3538 ps = psb->ps_read;
3539 if (ps->ps_pc == (uintfptr_t) 0) /* no data */
3540 break;
3541
3542 pm = ps->ps_pmc;
3543 po = pm->pm_owner;
3544
3545 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
3546 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
3547 pm, PMC_TO_MODE(pm)));
3548
3549 /* Ignore PMCs that have been switched off */
3550 if (pm->pm_state != PMC_STATE_RUNNING)
3551 goto entrydone;
3552
3553 PMCDBG(SAM,OPS,1,"cpu=%d pm=%p pc=%jx um=%d wr=%d rd=%d", cpu,
3554 pm, (uint64_t) ps->ps_pc, ps->ps_usermode,
3555 (int) (psb->ps_write - psb->ps_samples),
3556 (int) (psb->ps_read - psb->ps_samples));
3557
3558 /*
3559 * If this is a process-mode PMC that is attached to
3560 * its owner, and if the PC is in user mode, update
3561 * profiling statistics like timer-based profiling
3562 * would have done.
3563 */
3564 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
3565 if (ps->ps_usermode) {
3566 td = FIRST_THREAD_IN_PROC(po->po_owner);
3567 addupc_intr(td, ps->ps_pc, 1);
3568 }
3569 goto entrydone;
3570 }
3571
3572 /*
3573 * Otherwise, this is either a sampling mode PMC that
3574 * is attached to a different process than its owner,
3575 * or a system-wide sampling PMC. Dispatch a log
3576 * entry to the PMC's owner process.
3577 */
3578
3579 pmclog_process_pcsample(pm, ps);
3580
3581 entrydone:
3582 ps->ps_pc = (uintfptr_t) 0; /* mark entry as free */
3583 atomic_subtract_rel_32(&pm->pm_runcount, 1);
3584
3585 /* increment read pointer, modulo sample size */
3586 if (++ps == psb->ps_fence)
3587 psb->ps_read = psb->ps_samples;
3588 else
3589 psb->ps_read = ps;
3590 }
3591
3592 atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
3593
3594 /* Do not re-enable stalled PMCs if we failed to process any samples */
3595 if (n == 0)
3596 return;
3597
3598 /*
3599 * Restart any stalled sampling PMCs on this CPU.
3600 *
3602 * If the NMI handler sets PMC_F_IS_STALLED on a PMC after the
3603 * check below, we'll end up processing the stalled PMC at the
3604 * next hardclock tick.
3601 * If the NMI handler sets the pm_stalled field of a PMC after
3602 * the check below, we'll end up processing the stalled PMC at
3603 * the next hardclock tick.
3605 */
3606 for (n = 0; n < md->pmd_npmc; n++) {
3607 (void) (*md->pmd_get_config)(cpu,n,&pm);
3608 if (pm == NULL || /* !cfg'ed */
3609 pm->pm_state != PMC_STATE_RUNNING || /* !active */
3610 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
3604 */
3605 for (n = 0; n < md->pmd_npmc; n++) {
3606 (void) (*md->pmd_get_config)(cpu,n,&pm);
3607 if (pm == NULL || /* !cfg'ed */
3608 pm->pm_state != PMC_STATE_RUNNING || /* !active */
3609 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
3611 (pm->pm_flags & PMC_F_IS_STALLED) == 0) /* !stalled */
3610 pm->pm_stalled == 0) /* !stalled */
3612 continue;
3613
3611 continue;
3612
3614 pm->pm_flags &= ~PMC_F_IS_STALLED;
3613 pm->pm_stalled = 0;
3615 ri = PMC_TO_ROWINDEX(pm);
3616 (*md->pmd_start_pmc)(cpu, ri);
3617 }
3618}
3619
3620/*
3621 * Event handlers.
3622 */
3623
3624/*
3625 * Handle a process exit.
3626 *
3627 * Remove this process from all hash tables. If this process
3628 * owned any PMCs, turn off those PMCs and deallocate them,
3629 * removing any associations with target processes.
3630 *
3631 * This function will be called by the last 'thread' of a
3632 * process.
3633 *
3634 * XXX This eventhandler gets called early in the exit process.
3635 * Consider using a 'hook' invocation from thread_exit() or equivalent
3636 * spot. Another negative is that kse_exit doesn't seem to call
3637 * exit1() [??].
3638 *
3639 */
3640
3641static void
3642pmc_process_exit(void *arg __unused, struct proc *p)
3643{
3644 int is_using_hwpmcs;
3645 int cpu;
3646 unsigned int ri;
3647 struct pmc *pm;
3648 struct pmc_process *pp;
3649 struct pmc_owner *po;
3650 pmc_value_t newvalue, tmp;
3651
3652 PROC_LOCK(p);
3653 is_using_hwpmcs = p->p_flag & P_HWPMC;
3654 PROC_UNLOCK(p);
3655
3656 /*
3657 * Log a sysexit event to all SS PMC owners.
3658 */
3659 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
3660 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
3661 pmclog_process_sysexit(po, p->p_pid);
3662
3663 if (!is_using_hwpmcs)
3664 return;
3665
3666 PMC_GET_SX_XLOCK();
3667 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
3668 p->p_comm);
3669
3670 /*
3671 * Since this code is invoked by the last thread in an exiting
3672 * process, we would have context switched IN at some prior
3673 * point. However, with PREEMPTION, kernel mode context
3674 * switches may happen any time, so we want to disable a
3675 * context switch OUT till we get any PMCs targetting this
3676 * process off the hardware.
3677 *
3678 * We also need to atomically remove this process'
3679 * entry from our target process hash table, using
3680 * PMC_FLAG_REMOVE.
3681 */
3682 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
3683 p->p_comm);
3684
3685 critical_enter(); /* no preemption */
3686
3687 cpu = curthread->td_oncpu;
3688
3689 if ((pp = pmc_find_process_descriptor(p,
3690 PMC_FLAG_REMOVE)) != NULL) {
3691
3692 PMCDBG(PRC,EXT,2,
3693 "process-exit proc=%p pmc-process=%p", p, pp);
3694
3695 /*
3696 * The exiting process could the target of
3697 * some PMCs which will be running on
3698 * currently executing CPU.
3699 *
3700 * We need to turn these PMCs off like we
3701 * would do at context switch OUT time.
3702 */
3703 for (ri = 0; ri < md->pmd_npmc; ri++) {
3704
3705 /*
3706 * Pick up the pmc pointer from hardware
3707 * state similar to the CSW_OUT code.
3708 */
3709 pm = NULL;
3710 (void) (*md->pmd_get_config)(cpu, ri, &pm);
3711
3712 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
3713
3714 if (pm == NULL ||
3715 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3716 continue;
3717
3718 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
3719 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
3720 pm, pm->pm_state);
3721
3722 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
3723 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
3724 __LINE__, PMC_TO_ROWINDEX(pm), ri));
3725
3726 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
3727 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
3728 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
3729
3730 (void) md->pmd_stop_pmc(cpu, ri);
3731
3732 KASSERT(pm->pm_runcount > 0,
3733 ("[pmc,%d] bad runcount ri %d rc %d",
3734 __LINE__, ri, pm->pm_runcount));
3735
3614 ri = PMC_TO_ROWINDEX(pm);
3615 (*md->pmd_start_pmc)(cpu, ri);
3616 }
3617}
3618
3619/*
3620 * Event handlers.
3621 */
3622
3623/*
3624 * Handle a process exit.
3625 *
3626 * Remove this process from all hash tables. If this process
3627 * owned any PMCs, turn off those PMCs and deallocate them,
3628 * removing any associations with target processes.
3629 *
3630 * This function will be called by the last 'thread' of a
3631 * process.
3632 *
3633 * XXX This eventhandler gets called early in the exit process.
3634 * Consider using a 'hook' invocation from thread_exit() or equivalent
3635 * spot. Another negative is that kse_exit doesn't seem to call
3636 * exit1() [??].
3637 *
3638 */
3639
3640static void
3641pmc_process_exit(void *arg __unused, struct proc *p)
3642{
3643 int is_using_hwpmcs;
3644 int cpu;
3645 unsigned int ri;
3646 struct pmc *pm;
3647 struct pmc_process *pp;
3648 struct pmc_owner *po;
3649 pmc_value_t newvalue, tmp;
3650
3651 PROC_LOCK(p);
3652 is_using_hwpmcs = p->p_flag & P_HWPMC;
3653 PROC_UNLOCK(p);
3654
3655 /*
3656 * Log a sysexit event to all SS PMC owners.
3657 */
3658 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
3659 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
3660 pmclog_process_sysexit(po, p->p_pid);
3661
3662 if (!is_using_hwpmcs)
3663 return;
3664
3665 PMC_GET_SX_XLOCK();
3666 PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
3667 p->p_comm);
3668
3669 /*
3670 * Since this code is invoked by the last thread in an exiting
3671 * process, we would have context switched IN at some prior
3672 * point. However, with PREEMPTION, kernel mode context
3673 * switches may happen any time, so we want to disable a
3674 * context switch OUT till we get any PMCs targetting this
3675 * process off the hardware.
3676 *
3677 * We also need to atomically remove this process'
3678 * entry from our target process hash table, using
3679 * PMC_FLAG_REMOVE.
3680 */
3681 PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
3682 p->p_comm);
3683
3684 critical_enter(); /* no preemption */
3685
3686 cpu = curthread->td_oncpu;
3687
3688 if ((pp = pmc_find_process_descriptor(p,
3689 PMC_FLAG_REMOVE)) != NULL) {
3690
3691 PMCDBG(PRC,EXT,2,
3692 "process-exit proc=%p pmc-process=%p", p, pp);
3693
3694 /*
3695 * The exiting process could the target of
3696 * some PMCs which will be running on
3697 * currently executing CPU.
3698 *
3699 * We need to turn these PMCs off like we
3700 * would do at context switch OUT time.
3701 */
3702 for (ri = 0; ri < md->pmd_npmc; ri++) {
3703
3704 /*
3705 * Pick up the pmc pointer from hardware
3706 * state similar to the CSW_OUT code.
3707 */
3708 pm = NULL;
3709 (void) (*md->pmd_get_config)(cpu, ri, &pm);
3710
3711 PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
3712
3713 if (pm == NULL ||
3714 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3715 continue;
3716
3717 PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
3718 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
3719 pm, pm->pm_state);
3720
3721 KASSERT(PMC_TO_ROWINDEX(pm) == ri,
3722 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
3723 __LINE__, PMC_TO_ROWINDEX(pm), ri));
3724
3725 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
3726 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
3727 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
3728
3729 (void) md->pmd_stop_pmc(cpu, ri);
3730
3731 KASSERT(pm->pm_runcount > 0,
3732 ("[pmc,%d] bad runcount ri %d rc %d",
3733 __LINE__, ri, pm->pm_runcount));
3734
3736 /* Stopped the hardware only if it is actually on */
3735 /* Stop hardware only if it is actually running */
3737 if (pm->pm_state == PMC_STATE_RUNNING &&
3736 if (pm->pm_state == PMC_STATE_RUNNING &&
3738 (pm->pm_flags & PMC_F_IS_STALLED) == 0) {
3737 pm->pm_stalled == 0) {
3739 md->pmd_read_pmc(cpu, ri, &newvalue);
3740 tmp = newvalue -
3741 PMC_PCPU_SAVED(cpu,ri);
3742
3743 mtx_pool_lock_spin(pmc_mtxpool, pm);
3744 pm->pm_gv.pm_savedvalue += tmp;
3745 pp->pp_pmcs[ri].pp_pmcval += tmp;
3746 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3747 }
3748
3749 atomic_subtract_rel_32(&pm->pm_runcount,1);
3750
3751 KASSERT((int) pm->pm_runcount >= 0,
3752 ("[pmc,%d] runcount is %d", __LINE__, ri));
3753
3754 (void) md->pmd_config_pmc(cpu, ri, NULL);
3755 }
3756
3757 /*
3758 * Inform the MD layer of this pseudo "context switch
3759 * out"
3760 */
3761 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
3762
3763 critical_exit(); /* ok to be pre-empted now */
3764
3765 /*
3766 * Unlink this process from the PMCs that are
3767 * targetting it. This will send a signal to
3768 * all PMC owner's whose PMCs are orphaned.
3769 *
3770 * Log PMC value at exit time if requested.
3771 */
3772 for (ri = 0; ri < md->pmd_npmc; ri++)
3773 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
3738 md->pmd_read_pmc(cpu, ri, &newvalue);
3739 tmp = newvalue -
3740 PMC_PCPU_SAVED(cpu,ri);
3741
3742 mtx_pool_lock_spin(pmc_mtxpool, pm);
3743 pm->pm_gv.pm_savedvalue += tmp;
3744 pp->pp_pmcs[ri].pp_pmcval += tmp;
3745 mtx_pool_unlock_spin(pmc_mtxpool, pm);
3746 }
3747
3748 atomic_subtract_rel_32(&pm->pm_runcount,1);
3749
3750 KASSERT((int) pm->pm_runcount >= 0,
3751 ("[pmc,%d] runcount is %d", __LINE__, ri));
3752
3753 (void) md->pmd_config_pmc(cpu, ri, NULL);
3754 }
3755
3756 /*
3757 * Inform the MD layer of this pseudo "context switch
3758 * out"
3759 */
3760 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
3761
3762 critical_exit(); /* ok to be pre-empted now */
3763
3764 /*
3765 * Unlink this process from the PMCs that are
3766 * targetting it. This will send a signal to
3767 * all PMC owner's whose PMCs are orphaned.
3768 *
3769 * Log PMC value at exit time if requested.
3770 */
3771 for (ri = 0; ri < md->pmd_npmc; ri++)
3772 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
3774 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE)
3773 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3774 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
3775 pmclog_process_procexit(pm, pp);
3776 pmc_unlink_target_process(pm, pp);
3777 }
3778 FREE(pp, M_PMC);
3779
3780 } else
3781 critical_exit(); /* pp == NULL */
3782
3783
3784 /*
3785 * If the process owned PMCs, free them up and free up
3786 * memory.
3787 */
3788 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
3789 pmc_remove_owner(po);
3790 pmc_destroy_owner_descriptor(po);
3791 }
3792
3793 sx_xunlock(&pmc_sx);
3794}
3795
3796/*
3797 * Handle a process fork.
3798 *
3799 * If the parent process 'p1' is under HWPMC monitoring, then copy
3800 * over any attached PMCs that have 'do_descendants' semantics.
3801 */
3802
3803static void
3804pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
3805 int flags)
3806{
3807 int is_using_hwpmcs;
3808 unsigned int ri;
3809 uint32_t do_descendants;
3810 struct pmc *pm;
3811 struct pmc_owner *po;
3812 struct pmc_process *ppnew, *ppold;
3813
3814 (void) flags; /* unused parameter */
3815
3816 PROC_LOCK(p1);
3817 is_using_hwpmcs = p1->p_flag & P_HWPMC;
3818 PROC_UNLOCK(p1);
3819
3820 /*
3821 * If there are system-wide sampling PMCs active, we need to
3822 * log all fork events to their owner's logs.
3823 */
3824
3825 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
3826 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
3827 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
3828
3829 if (!is_using_hwpmcs)
3830 return;
3831
3832 PMC_GET_SX_XLOCK();
3833 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
3834 p1->p_pid, p1->p_comm, newproc);
3835
3836 /*
3837 * If the parent process (curthread->td_proc) is a
3838 * target of any PMCs, look for PMCs that are to be
3839 * inherited, and link these into the new process
3840 * descriptor.
3841 */
3842 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
3843 PMC_FLAG_NONE)) == NULL)
3844 goto done; /* nothing to do */
3845
3846 do_descendants = 0;
3847 for (ri = 0; ri < md->pmd_npmc; ri++)
3848 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
3849 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
3850 if (do_descendants == 0) /* nothing to do */
3851 goto done;
3852
3853 /* allocate a descriptor for the new process */
3854 if ((ppnew = pmc_find_process_descriptor(newproc,
3855 PMC_FLAG_ALLOCATE)) == NULL)
3856 goto done;
3857
3858 /*
3859 * Run through all PMCs that were targeting the old process
3860 * and which specified F_DESCENDANTS and attach them to the
3861 * new process.
3862 *
3863 * Log the fork event to all owners of PMCs attached to this
3864 * process, if not already logged.
3865 */
3866 for (ri = 0; ri < md->pmd_npmc; ri++)
3867 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
3868 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3869 pmc_link_target_process(pm, ppnew);
3870 po = pm->pm_owner;
3871 if (po->po_sscount == 0 &&
3872 po->po_flags & PMC_PO_OWNS_LOGFILE)
3873 pmclog_process_procfork(po, p1->p_pid,
3874 newproc->p_pid);
3875 }
3876
3877 /*
3878 * Now mark the new process as being tracked by this driver.
3879 */
3880 PROC_LOCK(newproc);
3881 newproc->p_flag |= P_HWPMC;
3882 PROC_UNLOCK(newproc);
3883
3884 done:
3885 sx_xunlock(&pmc_sx);
3886}
3887
3888
3889/*
3890 * initialization
3891 */
3892
3893static const char *pmc_name_of_pmcclass[] = {
3894#undef __PMC_CLASS
3895#define __PMC_CLASS(N) #N ,
3896 __PMC_CLASSES()
3897};
3898
3899static int
3900pmc_initialize(void)
3901{
3902 int cpu, error, n;
3903 struct pmc_binding pb;
3904 struct pmc_samplebuffer *sb;
3905
3906 md = NULL;
3907 error = 0;
3908
3909#if DEBUG
3910 /* parse debug flags first */
3911 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
3912 pmc_debugstr, sizeof(pmc_debugstr)))
3913 pmc_debugflags_parse(pmc_debugstr,
3914 pmc_debugstr+strlen(pmc_debugstr));
3915#endif
3916
3917 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
3918
3919 /*
3920 * check sysctl parameters
3921 */
3922
3923 if (pmc_hashsize <= 0) {
3924 (void) printf("hwpmc: tunable hashsize=%d must be greater "
3925 "than zero.\n", pmc_hashsize);
3926 pmc_hashsize = PMC_HASH_SIZE;
3927 }
3928
3929 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
3930 (void) printf("hwpmc: tunable nsamples=%d out of range.\n", pmc_nsamples);
3931 pmc_nsamples = PMC_NSAMPLES;
3932 }
3933
3934 md = pmc_md_initialize();
3935
3936 if (md == NULL || md->pmd_init == NULL)
3937 return ENOSYS;
3938
3939 /* allocate space for the per-cpu array */
3940 MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
3941 M_PMC, M_WAITOK|M_ZERO);
3942
3943 /* per-cpu 'saved values' for managing process-mode PMCs */
3944 MALLOC(pmc_pcpu_saved, pmc_value_t *,
3945 sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
3946
3947 /* perform cpu dependent initialization */
3948 pmc_save_cpu_binding(&pb);
3949 for (cpu = 0; cpu < mp_ncpus; cpu++) {
3950 if (pmc_cpu_is_disabled(cpu))
3951 continue;
3952 pmc_select_cpu(cpu);
3953 if ((error = md->pmd_init(cpu)) != 0)
3954 break;
3955 }
3956 pmc_restore_cpu_binding(&pb);
3957
3958 if (error != 0)
3959 return error;
3960
3961 /* allocate space for the sample array */
3962 for (cpu = 0; cpu < mp_ncpus; cpu++) {
3963 if (pmc_cpu_is_disabled(cpu))
3964 continue;
3965 MALLOC(sb, struct pmc_samplebuffer *,
3966 sizeof(struct pmc_samplebuffer) +
3967 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
3968 M_WAITOK|M_ZERO);
3969
3970 sb->ps_read = sb->ps_write = sb->ps_samples;
3971 sb->ps_fence = sb->ps_samples + pmc_nsamples
3972;
3973 KASSERT(pmc_pcpu[cpu] != NULL,
3974 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
3975
3976 pmc_pcpu[cpu]->pc_sb = sb;
3977 }
3978
3979 /* allocate space for the row disposition array */
3980 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
3981 M_PMC, M_WAITOK|M_ZERO);
3982
3983 KASSERT(pmc_pmcdisp != NULL,
3984 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
3985
3986 /* mark all PMCs as available */
3987 for (n = 0; n < (int) md->pmd_npmc; n++)
3988 PMC_MARK_ROW_FREE(n);
3989
3990 /* allocate thread hash tables */
3991 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
3992 &pmc_ownerhashmask);
3993
3994 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
3995 &pmc_processhashmask);
3996 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc", MTX_SPIN);
3997
3998 LIST_INIT(&pmc_ss_owners);
3999 pmc_ss_count = 0;
4000
4001 /* allocate a pool of spin mutexes */
4002 pmc_mtxpool = mtx_pool_create("pmc", pmc_mtxpool_size, MTX_SPIN);
4003
4004 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4005 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4006 pmc_processhash, pmc_processhashmask);
4007
4008 /* register process {exit,fork,exec} handlers */
4009 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4010 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4011 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4012 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4013
4014 /* initialize logging */
4015 pmclog_initialize();
4016
4017 /* set hook functions */
4018 pmc_intr = md->pmd_intr;
4019 pmc_hook = pmc_hook_handler;
4020
4021 if (error == 0) {
4022 printf(PMC_MODULE_NAME ":");
4023 for (n = 0; n < (int) md->pmd_nclass; n++)
4024 printf(" %s(%d)",
4025 pmc_name_of_pmcclass[md->pmd_classes[n].pm_class],
4026 md->pmd_nclasspmcs[n]);
4027 printf("\n");
4028 }
4029
4030 return error;
4031}
4032
4033/* prepare to be unloaded */
4034static void
4035pmc_cleanup(void)
4036{
4037 int cpu;
4038 struct pmc_ownerhash *ph;
4039 struct pmc_owner *po, *tmp;
4040 struct pmc_binding pb;
4041#if DEBUG
4042 struct pmc_processhash *prh;
4043#endif
4044
4045 PMCDBG(MOD,INI,0, "%s", "cleanup");
4046
4047 /* switch off sampling */
4048 atomic_store_rel_int(&pmc_cpumask, 0);
4049 pmc_intr = NULL;
4050
4051 sx_xlock(&pmc_sx);
4052 if (pmc_hook == NULL) { /* being unloaded already */
4053 sx_xunlock(&pmc_sx);
4054 return;
4055 }
4056
4057 pmc_hook = NULL; /* prevent new threads from entering module */
4058
4059 /* deregister event handlers */
4060 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4061 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4062
4063 /* send SIGBUS to all owner threads, free up allocations */
4064 if (pmc_ownerhash)
4065 for (ph = pmc_ownerhash;
4066 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4067 ph++) {
4068 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4069 pmc_remove_owner(po);
4070
4071 /* send SIGBUS to owner processes */
4072 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4073 "(%d, %s)", po->po_owner,
4074 po->po_owner->p_pid,
4075 po->po_owner->p_comm);
4076
4077 PROC_LOCK(po->po_owner);
4078 psignal(po->po_owner, SIGBUS);
4079 PROC_UNLOCK(po->po_owner);
4080
4081 pmc_destroy_owner_descriptor(po);
4082 }
4083 }
4084
4085 /* reclaim allocated data structures */
4086 if (pmc_mtxpool)
4087 mtx_pool_destroy(&pmc_mtxpool);
4088
4089 mtx_destroy(&pmc_processhash_mtx);
4090 if (pmc_processhash) {
4091#if DEBUG
4092 struct pmc_process *pp;
4093
4094 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4095 for (prh = pmc_processhash;
4096 prh <= &pmc_processhash[pmc_processhashmask];
4097 prh++)
4098 LIST_FOREACH(pp, prh, pp_next)
4099 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4100#endif
4101
4102 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4103 pmc_processhash = NULL;
4104 }
4105
4106 if (pmc_ownerhash) {
4107 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4108 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4109 pmc_ownerhash = NULL;
4110 }
4111
4112 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4113 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4114 KASSERT(pmc_ss_count == 0,
4115 ("[pmc,%d] Global SS count not empty", __LINE__));
4116
4117 /* do processor dependent cleanup */
4118 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4119 if (md) {
4120 pmc_save_cpu_binding(&pb);
4121 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4122 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4123 cpu, pmc_pcpu[cpu]);
4124 if (pmc_cpu_is_disabled(cpu))
4125 continue;
4126 pmc_select_cpu(cpu);
4127 if (pmc_pcpu[cpu])
4128 (void) md->pmd_cleanup(cpu);
4129 }
4130 FREE(md, M_PMC);
4131 md = NULL;
4132 pmc_restore_cpu_binding(&pb);
4133 }
4134
4135 /* deallocate per-cpu structures */
4136 FREE(pmc_pcpu, M_PMC);
4137 pmc_pcpu = NULL;
4138
4139 FREE(pmc_pcpu_saved, M_PMC);
4140 pmc_pcpu_saved = NULL;
4141
4142 if (pmc_pmcdisp) {
4143 FREE(pmc_pmcdisp, M_PMC);
4144 pmc_pmcdisp = NULL;
4145 }
4146
4147 pmclog_shutdown();
4148
4149 sx_xunlock(&pmc_sx); /* we are done */
4150}
4151
4152/*
4153 * The function called at load/unload.
4154 */
4155
4156static int
4157load (struct module *module __unused, int cmd, void *arg __unused)
4158{
4159 int error;
4160
4161 error = 0;
4162
4163 switch (cmd) {
4164 case MOD_LOAD :
4165 /* initialize the subsystem */
4166 error = pmc_initialize();
4167 if (error != 0)
4168 break;
4169 PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
4170 pmc_syscall_num, mp_ncpus);
4171 break;
4172
4173
4174 case MOD_UNLOAD :
4175 case MOD_SHUTDOWN:
4176 pmc_cleanup();
4177 PMCDBG(MOD,INI,1, "%s", "unloaded");
4178 break;
4179
4180 default :
4181 error = EINVAL; /* XXX should panic(9) */
4182 break;
4183 }
4184
4185 return error;
4186}
4187
4188/* memory pool */
4189MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
3775 pmclog_process_procexit(pm, pp);
3776 pmc_unlink_target_process(pm, pp);
3777 }
3778 FREE(pp, M_PMC);
3779
3780 } else
3781 critical_exit(); /* pp == NULL */
3782
3783
3784 /*
3785 * If the process owned PMCs, free them up and free up
3786 * memory.
3787 */
3788 if ((po = pmc_find_owner_descriptor(p)) != NULL) {
3789 pmc_remove_owner(po);
3790 pmc_destroy_owner_descriptor(po);
3791 }
3792
3793 sx_xunlock(&pmc_sx);
3794}
3795
3796/*
3797 * Handle a process fork.
3798 *
3799 * If the parent process 'p1' is under HWPMC monitoring, then copy
3800 * over any attached PMCs that have 'do_descendants' semantics.
3801 */
3802
3803static void
3804pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
3805 int flags)
3806{
3807 int is_using_hwpmcs;
3808 unsigned int ri;
3809 uint32_t do_descendants;
3810 struct pmc *pm;
3811 struct pmc_owner *po;
3812 struct pmc_process *ppnew, *ppold;
3813
3814 (void) flags; /* unused parameter */
3815
3816 PROC_LOCK(p1);
3817 is_using_hwpmcs = p1->p_flag & P_HWPMC;
3818 PROC_UNLOCK(p1);
3819
3820 /*
3821 * If there are system-wide sampling PMCs active, we need to
3822 * log all fork events to their owner's logs.
3823 */
3824
3825 LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
3826 if (po->po_flags & PMC_PO_OWNS_LOGFILE)
3827 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
3828
3829 if (!is_using_hwpmcs)
3830 return;
3831
3832 PMC_GET_SX_XLOCK();
3833 PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
3834 p1->p_pid, p1->p_comm, newproc);
3835
3836 /*
3837 * If the parent process (curthread->td_proc) is a
3838 * target of any PMCs, look for PMCs that are to be
3839 * inherited, and link these into the new process
3840 * descriptor.
3841 */
3842 if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
3843 PMC_FLAG_NONE)) == NULL)
3844 goto done; /* nothing to do */
3845
3846 do_descendants = 0;
3847 for (ri = 0; ri < md->pmd_npmc; ri++)
3848 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
3849 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
3850 if (do_descendants == 0) /* nothing to do */
3851 goto done;
3852
3853 /* allocate a descriptor for the new process */
3854 if ((ppnew = pmc_find_process_descriptor(newproc,
3855 PMC_FLAG_ALLOCATE)) == NULL)
3856 goto done;
3857
3858 /*
3859 * Run through all PMCs that were targeting the old process
3860 * and which specified F_DESCENDANTS and attach them to the
3861 * new process.
3862 *
3863 * Log the fork event to all owners of PMCs attached to this
3864 * process, if not already logged.
3865 */
3866 for (ri = 0; ri < md->pmd_npmc; ri++)
3867 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
3868 (pm->pm_flags & PMC_F_DESCENDANTS)) {
3869 pmc_link_target_process(pm, ppnew);
3870 po = pm->pm_owner;
3871 if (po->po_sscount == 0 &&
3872 po->po_flags & PMC_PO_OWNS_LOGFILE)
3873 pmclog_process_procfork(po, p1->p_pid,
3874 newproc->p_pid);
3875 }
3876
3877 /*
3878 * Now mark the new process as being tracked by this driver.
3879 */
3880 PROC_LOCK(newproc);
3881 newproc->p_flag |= P_HWPMC;
3882 PROC_UNLOCK(newproc);
3883
3884 done:
3885 sx_xunlock(&pmc_sx);
3886}
3887
3888
3889/*
3890 * initialization
3891 */
3892
3893static const char *pmc_name_of_pmcclass[] = {
3894#undef __PMC_CLASS
3895#define __PMC_CLASS(N) #N ,
3896 __PMC_CLASSES()
3897};
3898
3899static int
3900pmc_initialize(void)
3901{
3902 int cpu, error, n;
3903 struct pmc_binding pb;
3904 struct pmc_samplebuffer *sb;
3905
3906 md = NULL;
3907 error = 0;
3908
3909#if DEBUG
3910 /* parse debug flags first */
3911 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
3912 pmc_debugstr, sizeof(pmc_debugstr)))
3913 pmc_debugflags_parse(pmc_debugstr,
3914 pmc_debugstr+strlen(pmc_debugstr));
3915#endif
3916
3917 PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
3918
3919 /*
3920 * check sysctl parameters
3921 */
3922
3923 if (pmc_hashsize <= 0) {
3924 (void) printf("hwpmc: tunable hashsize=%d must be greater "
3925 "than zero.\n", pmc_hashsize);
3926 pmc_hashsize = PMC_HASH_SIZE;
3927 }
3928
3929 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
3930 (void) printf("hwpmc: tunable nsamples=%d out of range.\n", pmc_nsamples);
3931 pmc_nsamples = PMC_NSAMPLES;
3932 }
3933
3934 md = pmc_md_initialize();
3935
3936 if (md == NULL || md->pmd_init == NULL)
3937 return ENOSYS;
3938
3939 /* allocate space for the per-cpu array */
3940 MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *),
3941 M_PMC, M_WAITOK|M_ZERO);
3942
3943 /* per-cpu 'saved values' for managing process-mode PMCs */
3944 MALLOC(pmc_pcpu_saved, pmc_value_t *,
3945 sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK);
3946
3947 /* perform cpu dependent initialization */
3948 pmc_save_cpu_binding(&pb);
3949 for (cpu = 0; cpu < mp_ncpus; cpu++) {
3950 if (pmc_cpu_is_disabled(cpu))
3951 continue;
3952 pmc_select_cpu(cpu);
3953 if ((error = md->pmd_init(cpu)) != 0)
3954 break;
3955 }
3956 pmc_restore_cpu_binding(&pb);
3957
3958 if (error != 0)
3959 return error;
3960
3961 /* allocate space for the sample array */
3962 for (cpu = 0; cpu < mp_ncpus; cpu++) {
3963 if (pmc_cpu_is_disabled(cpu))
3964 continue;
3965 MALLOC(sb, struct pmc_samplebuffer *,
3966 sizeof(struct pmc_samplebuffer) +
3967 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
3968 M_WAITOK|M_ZERO);
3969
3970 sb->ps_read = sb->ps_write = sb->ps_samples;
3971 sb->ps_fence = sb->ps_samples + pmc_nsamples
3972;
3973 KASSERT(pmc_pcpu[cpu] != NULL,
3974 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
3975
3976 pmc_pcpu[cpu]->pc_sb = sb;
3977 }
3978
3979 /* allocate space for the row disposition array */
3980 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
3981 M_PMC, M_WAITOK|M_ZERO);
3982
3983 KASSERT(pmc_pmcdisp != NULL,
3984 ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
3985
3986 /* mark all PMCs as available */
3987 for (n = 0; n < (int) md->pmd_npmc; n++)
3988 PMC_MARK_ROW_FREE(n);
3989
3990 /* allocate thread hash tables */
3991 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
3992 &pmc_ownerhashmask);
3993
3994 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
3995 &pmc_processhashmask);
3996 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc", MTX_SPIN);
3997
3998 LIST_INIT(&pmc_ss_owners);
3999 pmc_ss_count = 0;
4000
4001 /* allocate a pool of spin mutexes */
4002 pmc_mtxpool = mtx_pool_create("pmc", pmc_mtxpool_size, MTX_SPIN);
4003
4004 PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
4005 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
4006 pmc_processhash, pmc_processhashmask);
4007
4008 /* register process {exit,fork,exec} handlers */
4009 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
4010 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
4011 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
4012 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
4013
4014 /* initialize logging */
4015 pmclog_initialize();
4016
4017 /* set hook functions */
4018 pmc_intr = md->pmd_intr;
4019 pmc_hook = pmc_hook_handler;
4020
4021 if (error == 0) {
4022 printf(PMC_MODULE_NAME ":");
4023 for (n = 0; n < (int) md->pmd_nclass; n++)
4024 printf(" %s(%d)",
4025 pmc_name_of_pmcclass[md->pmd_classes[n].pm_class],
4026 md->pmd_nclasspmcs[n]);
4027 printf("\n");
4028 }
4029
4030 return error;
4031}
4032
4033/* prepare to be unloaded */
4034static void
4035pmc_cleanup(void)
4036{
4037 int cpu;
4038 struct pmc_ownerhash *ph;
4039 struct pmc_owner *po, *tmp;
4040 struct pmc_binding pb;
4041#if DEBUG
4042 struct pmc_processhash *prh;
4043#endif
4044
4045 PMCDBG(MOD,INI,0, "%s", "cleanup");
4046
4047 /* switch off sampling */
4048 atomic_store_rel_int(&pmc_cpumask, 0);
4049 pmc_intr = NULL;
4050
4051 sx_xlock(&pmc_sx);
4052 if (pmc_hook == NULL) { /* being unloaded already */
4053 sx_xunlock(&pmc_sx);
4054 return;
4055 }
4056
4057 pmc_hook = NULL; /* prevent new threads from entering module */
4058
4059 /* deregister event handlers */
4060 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
4061 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
4062
4063 /* send SIGBUS to all owner threads, free up allocations */
4064 if (pmc_ownerhash)
4065 for (ph = pmc_ownerhash;
4066 ph <= &pmc_ownerhash[pmc_ownerhashmask];
4067 ph++) {
4068 LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
4069 pmc_remove_owner(po);
4070
4071 /* send SIGBUS to owner processes */
4072 PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
4073 "(%d, %s)", po->po_owner,
4074 po->po_owner->p_pid,
4075 po->po_owner->p_comm);
4076
4077 PROC_LOCK(po->po_owner);
4078 psignal(po->po_owner, SIGBUS);
4079 PROC_UNLOCK(po->po_owner);
4080
4081 pmc_destroy_owner_descriptor(po);
4082 }
4083 }
4084
4085 /* reclaim allocated data structures */
4086 if (pmc_mtxpool)
4087 mtx_pool_destroy(&pmc_mtxpool);
4088
4089 mtx_destroy(&pmc_processhash_mtx);
4090 if (pmc_processhash) {
4091#if DEBUG
4092 struct pmc_process *pp;
4093
4094 PMCDBG(MOD,INI,3, "%s", "destroy process hash");
4095 for (prh = pmc_processhash;
4096 prh <= &pmc_processhash[pmc_processhashmask];
4097 prh++)
4098 LIST_FOREACH(pp, prh, pp_next)
4099 PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
4100#endif
4101
4102 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
4103 pmc_processhash = NULL;
4104 }
4105
4106 if (pmc_ownerhash) {
4107 PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
4108 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
4109 pmc_ownerhash = NULL;
4110 }
4111
4112 KASSERT(LIST_EMPTY(&pmc_ss_owners),
4113 ("[pmc,%d] Global SS owner list not empty", __LINE__));
4114 KASSERT(pmc_ss_count == 0,
4115 ("[pmc,%d] Global SS count not empty", __LINE__));
4116
4117 /* do processor dependent cleanup */
4118 PMCDBG(MOD,INI,3, "%s", "md cleanup");
4119 if (md) {
4120 pmc_save_cpu_binding(&pb);
4121 for (cpu = 0; cpu < mp_ncpus; cpu++) {
4122 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
4123 cpu, pmc_pcpu[cpu]);
4124 if (pmc_cpu_is_disabled(cpu))
4125 continue;
4126 pmc_select_cpu(cpu);
4127 if (pmc_pcpu[cpu])
4128 (void) md->pmd_cleanup(cpu);
4129 }
4130 FREE(md, M_PMC);
4131 md = NULL;
4132 pmc_restore_cpu_binding(&pb);
4133 }
4134
4135 /* deallocate per-cpu structures */
4136 FREE(pmc_pcpu, M_PMC);
4137 pmc_pcpu = NULL;
4138
4139 FREE(pmc_pcpu_saved, M_PMC);
4140 pmc_pcpu_saved = NULL;
4141
4142 if (pmc_pmcdisp) {
4143 FREE(pmc_pmcdisp, M_PMC);
4144 pmc_pmcdisp = NULL;
4145 }
4146
4147 pmclog_shutdown();
4148
4149 sx_xunlock(&pmc_sx); /* we are done */
4150}
4151
4152/*
4153 * The function called at load/unload.
4154 */
4155
4156static int
4157load (struct module *module __unused, int cmd, void *arg __unused)
4158{
4159 int error;
4160
4161 error = 0;
4162
4163 switch (cmd) {
4164 case MOD_LOAD :
4165 /* initialize the subsystem */
4166 error = pmc_initialize();
4167 if (error != 0)
4168 break;
4169 PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d",
4170 pmc_syscall_num, mp_ncpus);
4171 break;
4172
4173
4174 case MOD_UNLOAD :
4175 case MOD_SHUTDOWN:
4176 pmc_cleanup();
4177 PMCDBG(MOD,INI,1, "%s", "unloaded");
4178 break;
4179
4180 default :
4181 error = EINVAL; /* XXX should panic(9) */
4182 break;
4183 }
4184
4185 return error;
4186}
4187
4188/* memory pool */
4189MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");