Deleted Added
full compact
hwpmc_amd.c (184205) hwpmc_amd.c (184802)
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
8 *

--- 12 unchanged lines hidden (view full) ---

21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
8 *

--- 12 unchanged lines hidden (view full) ---

21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/cdefs.h>
29 */
30
31#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_amd.c 184205 2008-10-23 15:53:51Z des $");
32__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_amd.c 184802 2008-11-09 17:37:54Z jkoshy $");
34
35/* Support for the AMD K7 and later processors */
36
37#include <sys/param.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/pmc.h>

--- 17 unchanged lines hidden (view full) ---

59 uint32_t pm_perfctr; /* address of PERFCTR register */
60};
61
62static struct amd_descr amd_pmcdesc[AMD_NPMCS] =
63{
64 {
65 .pm_descr =
66 {
33
34/* Support for the AMD K7 and later processors */
35
36#include <sys/param.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/pmc.h>

--- 17 unchanged lines hidden (view full) ---

58 uint32_t pm_perfctr; /* address of PERFCTR register */
59};
60
61static struct amd_descr amd_pmcdesc[AMD_NPMCS] =
62{
63 {
64 .pm_descr =
65 {
67 .pd_name = "TSC",
68 .pd_class = PMC_CLASS_TSC,
69 .pd_caps = PMC_CAP_READ,
70 .pd_width = 64
71 },
72 .pm_evsel = MSR_TSC,
73 .pm_perfctr = 0 /* unused */
74 },
75
76 {
77 .pm_descr =
78 {
79 .pd_name = "",
80 .pd_class = -1,
81 .pd_caps = AMD_PMC_CAPS,
82 .pd_width = 48
83 },
84 .pm_evsel = AMD_PMC_EVSEL_0,
85 .pm_perfctr = AMD_PMC_PERFCTR_0
86 },

--- 166 unchanged lines hidden (view full) ---

253 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
254
255};
256
257const int amd_event_codes_size =
258 sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
259
260/*
66 .pd_name = "",
67 .pd_class = -1,
68 .pd_caps = AMD_PMC_CAPS,
69 .pd_width = 48
70 },
71 .pm_evsel = AMD_PMC_EVSEL_0,
72 .pm_perfctr = AMD_PMC_PERFCTR_0
73 },

--- 166 unchanged lines hidden (view full) ---

240 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
241
242};
243
244const int amd_event_codes_size =
245 sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
246
247/*
248 * Per-processor information
249 */
250
251struct amd_cpu {
252 struct pmc_hw pc_amdpmcs[AMD_NPMCS];
253};
254
255static struct amd_cpu **amd_pcpu;
256
257/*
261 * read a pmc register
262 */
263
264static int
265amd_read_pmc(int cpu, int ri, pmc_value_t *v)
266{
267 enum pmc_mode mode;
268 const struct amd_descr *pd;
269 struct pmc *pm;
258 * read a pmc register
259 */
260
261static int
262amd_read_pmc(int cpu, int ri, pmc_value_t *v)
263{
264 enum pmc_mode mode;
265 const struct amd_descr *pd;
266 struct pmc *pm;
270 const struct pmc_hw *phw;
271 pmc_value_t tmp;
272
273 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
274 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
275 KASSERT(ri >= 0 && ri < AMD_NPMCS,
276 ("[amd,%d] illegal row-index %d", __LINE__, ri));
267 pmc_value_t tmp;
268
269 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
270 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
271 KASSERT(ri >= 0 && ri < AMD_NPMCS,
272 ("[amd,%d] illegal row-index %d", __LINE__, ri));
273 KASSERT(amd_pcpu[cpu],
274 ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu));
277
275
278 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
279 pd = &amd_pmcdesc[ri];
280 pm = phw->phw_pmc;
276 pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
277 pd = &amd_pmcdesc[ri];
281
282 KASSERT(pm != NULL,
283 ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
284 cpu, ri));
285
286 mode = PMC_TO_MODE(pm);
287
288 PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
289
278
279 KASSERT(pm != NULL,
280 ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
281 cpu, ri));
282
283 mode = PMC_TO_MODE(pm);
284
285 PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
286
290 /* Reading the TSC is a special case */
291 if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
292 KASSERT(PMC_IS_COUNTING_MODE(mode),
293 ("[amd,%d] TSC counter in non-counting mode", __LINE__));
294 *v = rdtsc();
295 PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
296 return 0;
297 }
298
299#ifdef DEBUG
300 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
301 ("[amd,%d] unknown PMC class (%d)", __LINE__,
302 pd->pm_descr.pd_class));
303#endif
304
305 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
306 PMCDBG(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);

--- 12 unchanged lines hidden (view full) ---

319/*
320 * Write a PMC MSR.
321 */
322
323static int
324amd_write_pmc(int cpu, int ri, pmc_value_t v)
325{
326 const struct amd_descr *pd;
287#ifdef DEBUG
288 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
289 ("[amd,%d] unknown PMC class (%d)", __LINE__,
290 pd->pm_descr.pd_class));
291#endif
292
293 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
294 PMCDBG(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);

--- 12 unchanged lines hidden (view full) ---

307/*
308 * Write a PMC MSR.
309 */
310
311static int
312amd_write_pmc(int cpu, int ri, pmc_value_t v)
313{
314 const struct amd_descr *pd;
327 struct pmc *pm;
328 const struct pmc_hw *phw;
329 enum pmc_mode mode;
315 enum pmc_mode mode;
316 struct pmc *pm;
330
331 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
332 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
333 KASSERT(ri >= 0 && ri < AMD_NPMCS,
334 ("[amd,%d] illegal row-index %d", __LINE__, ri));
335
317
318 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
319 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
320 KASSERT(ri >= 0 && ri < AMD_NPMCS,
321 ("[amd,%d] illegal row-index %d", __LINE__, ri));
322
336 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
337 pd = &amd_pmcdesc[ri];
338 pm = phw->phw_pmc;
323 pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
324 pd = &amd_pmcdesc[ri];
339
340 KASSERT(pm != NULL,
341 ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
342 cpu, ri));
343
344 mode = PMC_TO_MODE(pm);
345
325
326 KASSERT(pm != NULL,
327 ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
328 cpu, ri));
329
330 mode = PMC_TO_MODE(pm);
331
346 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
347 return 0;
348
349#ifdef DEBUG
350 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
351 ("[amd,%d] unknown PMC class (%d)", __LINE__,
352 pd->pm_descr.pd_class));
353#endif
354
355 /* use 2's complement of the count for sampling mode PMCs */
356 if (PMC_IS_SAMPLING_MODE(mode))

--- 18 unchanged lines hidden (view full) ---

375
376 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
377
378 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
379 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
380 KASSERT(ri >= 0 && ri < AMD_NPMCS,
381 ("[amd,%d] illegal row-index %d", __LINE__, ri));
382
332#ifdef DEBUG
333 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
334 ("[amd,%d] unknown PMC class (%d)", __LINE__,
335 pd->pm_descr.pd_class));
336#endif
337
338 /* use 2's complement of the count for sampling mode PMCs */
339 if (PMC_IS_SAMPLING_MODE(mode))

--- 18 unchanged lines hidden (view full) ---

358
359 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
360
361 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
362 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
363 KASSERT(ri >= 0 && ri < AMD_NPMCS,
364 ("[amd,%d] illegal row-index %d", __LINE__, ri));
365
383 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
366 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
384
385 KASSERT(pm == NULL || phw->phw_pmc == NULL,
386 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
387 __LINE__, pm, phw->phw_pmc));
388
389 phw->phw_pmc = pm;
390 return 0;
391}
392
393/*
394 * Retrieve a configured PMC pointer from hardware state.
395 */
396
397static int
398amd_get_config(int cpu, int ri, struct pmc **ppm)
399{
367
368 KASSERT(pm == NULL || phw->phw_pmc == NULL,
369 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
370 __LINE__, pm, phw->phw_pmc));
371
372 phw->phw_pmc = pm;
373 return 0;
374}
375
376/*
377 * Retrieve a configured PMC pointer from hardware state.
378 */
379
380static int
381amd_get_config(int cpu, int ri, struct pmc **ppm)
382{
400 *ppm = pmc_pcpu[cpu]->pc_hwpmcs[ri]->phw_pmc;
383 *ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
401
402 return 0;
403}
404
405/*
406 * Machine dependent actions taken during the context switch in of a
407 * thread.
408 */

--- 60 unchanged lines hidden (view full) ---

469 return EINVAL;
470
471 caps = pm->pm_caps;
472
473 PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
474
475 if ((pd->pd_caps & caps) != caps)
476 return EPERM;
384
385 return 0;
386}
387
388/*
389 * Machine dependent actions taken during the context switch in of a
390 * thread.
391 */

--- 60 unchanged lines hidden (view full) ---

452 return EINVAL;
453
454 caps = pm->pm_caps;
455
456 PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
457
458 if ((pd->pd_caps & caps) != caps)
459 return EPERM;
477 if (pd->pd_class == PMC_CLASS_TSC) {
478 /* TSC's are always allocated in system-wide counting mode */
479 if (a->pm_ev != PMC_EV_TSC_TSC ||
480 a->pm_mode != PMC_MODE_SC)
481 return EINVAL;
482 return 0;
483 }
484
460
485#ifdef DEBUG
486 KASSERT(pd->pd_class == amd_pmc_class,
487 ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
488#endif
489
490 pe = a->pm_ev;
491
492 /* map ev to the correct event mask code */
493 config = allowed_unitmask = 0;
494 for (i = 0; i < amd_event_codes_size; i++)
495 if (amd_event_codes[i].pe_ev == pe) {
496 config =
497 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);

--- 53 unchanged lines hidden (view full) ---

551
552 (void) pmc;
553
554 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
555 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
556 KASSERT(ri >= 0 && ri < AMD_NPMCS,
557 ("[amd,%d] illegal row-index %d", __LINE__, ri));
558
461 pe = a->pm_ev;
462
463 /* map ev to the correct event mask code */
464 config = allowed_unitmask = 0;
465 for (i = 0; i < amd_event_codes_size; i++)
466 if (amd_event_codes[i].pe_ev == pe) {
467 config =
468 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);

--- 53 unchanged lines hidden (view full) ---

522
523 (void) pmc;
524
525 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
526 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
527 KASSERT(ri >= 0 && ri < AMD_NPMCS,
528 ("[amd,%d] illegal row-index %d", __LINE__, ri));
529
559 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
530 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
560
561 KASSERT(phw->phw_pmc == NULL,
562 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
563
564#ifdef DEBUG
565 pd = &amd_pmcdesc[ri];
566 if (pd->pm_descr.pd_class == amd_pmc_class)
567 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),

--- 15 unchanged lines hidden (view full) ---

583 struct pmc_hw *phw;
584 const struct amd_descr *pd;
585
586 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
587 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
588 KASSERT(ri >= 0 && ri < AMD_NPMCS,
589 ("[amd,%d] illegal row-index %d", __LINE__, ri));
590
531
532 KASSERT(phw->phw_pmc == NULL,
533 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
534
535#ifdef DEBUG
536 pd = &amd_pmcdesc[ri];
537 if (pd->pm_descr.pd_class == amd_pmc_class)
538 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),

--- 15 unchanged lines hidden (view full) ---

554 struct pmc_hw *phw;
555 const struct amd_descr *pd;
556
557 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
558 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
559 KASSERT(ri >= 0 && ri < AMD_NPMCS,
560 ("[amd,%d] illegal row-index %d", __LINE__, ri));
561
591 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
562 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
592 pm = phw->phw_pmc;
593 pd = &amd_pmcdesc[ri];
594
595 KASSERT(pm != NULL,
596 ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
597 cpu, ri));
598
599 PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
600
563 pm = phw->phw_pmc;
564 pd = &amd_pmcdesc[ri];
565
566 KASSERT(pm != NULL,
567 ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
568 cpu, ri));
569
570 PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
571
601 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
602 return 0; /* TSCs are always running */
603
604#ifdef DEBUG
605 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
606 ("[amd,%d] unknown PMC class (%d)", __LINE__,
607 pd->pm_descr.pd_class));
608#endif
609
610 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
611 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
612 ri, cpu, pd->pm_descr.pd_name));
613
614 /* turn on the PMC ENABLE bit */
615 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
616
617 PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);

--- 14 unchanged lines hidden (view full) ---

632 const struct amd_descr *pd;
633 uint64_t config;
634
635 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
636 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
637 KASSERT(ri >= 0 && ri < AMD_NPMCS,
638 ("[amd,%d] illegal row-index %d", __LINE__, ri));
639
572 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
573 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
574 ri, cpu, pd->pm_descr.pd_name));
575
576 /* turn on the PMC ENABLE bit */
577 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
578
579 PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);

--- 14 unchanged lines hidden (view full) ---

594 const struct amd_descr *pd;
595 uint64_t config;
596
597 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
598 ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
599 KASSERT(ri >= 0 && ri < AMD_NPMCS,
600 ("[amd,%d] illegal row-index %d", __LINE__, ri));
601
640 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
602 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
641 pm = phw->phw_pmc;
642 pd = &amd_pmcdesc[ri];
643
644 KASSERT(pm != NULL,
645 ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
646 cpu, ri));
603 pm = phw->phw_pmc;
604 pd = &amd_pmcdesc[ri];
605
606 KASSERT(pm != NULL,
607 ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
608 cpu, ri));
647
648 /* can't stop a TSC */
649 if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
650 return 0;
651
652#ifdef DEBUG
653 KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
654 ("[amd,%d] unknown PMC class (%d)", __LINE__,
655 pd->pm_descr.pd_class));
656#endif
657
658 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
659 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
660 __LINE__, ri, cpu, pd->pm_descr.pd_name));
661
662 PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
663
664 /* turn off the PMC ENABLE bit */
665 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;

--- 6 unchanged lines hidden (view full) ---

672 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
673 * to sleep or do anything a 'fast' interrupt handler is not allowed
674 * to do.
675 */
676
677static int
678amd_intr(int cpu, struct trapframe *tf)
679{
609 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
610 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
611 __LINE__, ri, cpu, pd->pm_descr.pd_name));
612
613 PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
614
615 /* turn off the PMC ENABLE bit */
616 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;

--- 6 unchanged lines hidden (view full) ---

623 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
624 * to sleep or do anything a 'fast' interrupt handler is not allowed
625 * to do.
626 */
627
628static int
629amd_intr(int cpu, struct trapframe *tf)
630{
680 int i, error, retval, ri;
631 int i, error, retval;
681 uint32_t config, evsel, perfctr;
682 struct pmc *pm;
632 uint32_t config, evsel, perfctr;
633 struct pmc *pm;
683 struct pmc_cpu *pc;
634 struct amd_cpu *pac;
684 struct pmc_hw *phw;
685 pmc_value_t v;
686
687 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
688 ("[amd,%d] out of range CPU %d", __LINE__, cpu));
689
690 PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
691 TRAPF_USERMODE(tf));
692
693 retval = 0;
694
635 struct pmc_hw *phw;
636 pmc_value_t v;
637
638 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
639 ("[amd,%d] out of range CPU %d", __LINE__, cpu));
640
641 PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
642 TRAPF_USERMODE(tf));
643
644 retval = 0;
645
695 pc = pmc_pcpu[cpu];
646 pac = amd_pcpu[cpu];
696
697 /*
698 * look for all PMCs that have interrupted:
647
648 /*
649 * look for all PMCs that have interrupted:
699 * - skip over the TSC [PMC#0]
700 * - look for a running, sampling PMC which has overflowed
701 * and which has a valid 'struct pmc' association
702 *
703 * If found, we call a helper to process the interrupt.
704 *
705 * If multiple PMCs interrupt at the same time, the AMD64
706 * processor appears to deliver as many NMIs as there are
707 * outstanding PMC interrupts. So we process only one NMI
708 * interrupt at a time.
709 */
710
650 * - look for a running, sampling PMC which has overflowed
651 * and which has a valid 'struct pmc' association
652 *
653 * If found, we call a helper to process the interrupt.
654 *
655 * If multiple PMCs interrupt at the same time, the AMD64
656 * processor appears to deliver as many NMIs as there are
657 * outstanding PMC interrupts. So we process only one NMI
658 * interrupt at a time.
659 */
660
711 for (i = 0; retval == 0 && i < AMD_NPMCS-1; i++) {
661 for (i = 0; retval == 0 && i < AMD_NPMCS; i++) {
712
662
713 ri = i + 1; /* row index; TSC is at ri == 0 */
714
715 if (!AMD_PMC_HAS_OVERFLOWED(i))
716 continue;
717
663 if (!AMD_PMC_HAS_OVERFLOWED(i))
664 continue;
665
718 phw = pc->pc_hwpmcs[ri];
666 phw = &pac->pc_amdpmcs[i];
719
720 KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
721
722 if ((pm = phw->phw_pmc) == NULL ||
723 pm->pm_state != PMC_STATE_RUNNING ||
724 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
725 continue;
726 }

--- 37 unchanged lines hidden (view full) ---

764 const struct amd_descr *pd;
765 struct pmc_hw *phw;
766
767 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
768 ("[amd,%d] illegal CPU %d", __LINE__, cpu));
769 KASSERT(ri >= 0 && ri < AMD_NPMCS,
770 ("[amd,%d] row-index %d out of range", __LINE__, ri));
771
667
668 KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
669
670 if ((pm = phw->phw_pmc) == NULL ||
671 pm->pm_state != PMC_STATE_RUNNING ||
672 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
673 continue;
674 }

--- 37 unchanged lines hidden (view full) ---

712 const struct amd_descr *pd;
713 struct pmc_hw *phw;
714
715 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
716 ("[amd,%d] illegal CPU %d", __LINE__, cpu));
717 KASSERT(ri >= 0 && ri < AMD_NPMCS,
718 ("[amd,%d] row-index %d out of range", __LINE__, ri));
719
772 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
720 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
773 pd = &amd_pmcdesc[ri];
774
775 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
776 PMC_NAME_MAX, &copied)) != 0)
777 return error;
778
779 pi->pm_class = pd->pm_descr.pd_class;
780

--- 18 unchanged lines hidden (view full) ---

799
800static int
801amd_get_msr(int ri, uint32_t *msr)
802{
803 KASSERT(ri >= 0 && ri < AMD_NPMCS,
804 ("[amd,%d] ri %d out of range", __LINE__, ri));
805
806 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
721 pd = &amd_pmcdesc[ri];
722
723 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
724 PMC_NAME_MAX, &copied)) != 0)
725 return error;
726
727 pi->pm_class = pd->pm_descr.pd_class;
728

--- 18 unchanged lines hidden (view full) ---

747
748static int
749amd_get_msr(int ri, uint32_t *msr)
750{
751 KASSERT(ri >= 0 && ri < AMD_NPMCS,
752 ("[amd,%d] ri %d out of range", __LINE__, ri));
753
754 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
807 return 0;
755
756 return (0);
808}
809
810/*
811 * processor dependent initialization.
812 */
813
757}
758
759/*
760 * processor dependent initialization.
761 */
762
814/*
815 * Per-processor data structure
816 *
817 * [common stuff]
818 * [5 struct pmc_hw pointers]
819 * [5 struct pmc_hw structures]
820 */
821
822struct amd_cpu {
823 struct pmc_cpu pc_common;
824 struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
825 struct pmc_hw pc_amdpmcs[AMD_NPMCS];
826};
827
828
829static int
763static int
830amd_init(int cpu)
764amd_pcpu_init(struct pmc_mdep *md, int cpu)
831{
765{
832 int n;
833 struct amd_cpu *pcs;
766 int classindex, first_ri, n;
767 struct pmc_cpu *pc;
768 struct amd_cpu *pac;
834 struct pmc_hw *phw;
835
836 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
837 ("[amd,%d] insane cpu number %d", __LINE__, cpu));
838
839 PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
840
769 struct pmc_hw *phw;
770
771 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
772 ("[amd,%d] insane cpu number %d", __LINE__, cpu));
773
774 PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
775
841 pcs = malloc(sizeof(struct amd_cpu), M_PMC,
776 amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC,
842 M_WAITOK|M_ZERO);
843
777 M_WAITOK|M_ZERO);
778
844 phw = &pcs->pc_amdpmcs[0];
845
846 /*
779 /*
847 * Initialize the per-cpu mutex and set the content of the
848 * hardware descriptors to a known state.
780 * Set the content of the hardware descriptors to a known
781 * state and initialize pointers in the MI per-cpu descriptor.
849 */
782 */
783 pc = pmc_pcpu[cpu];
784#if defined(__amd64__)
785 classindex = PMC_MDEP_CLASS_INDEX_K8;
786#elif defined(__i386__)
787 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ?
788 PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7;
789#endif
790 first_ri = md->pmd_classdep[classindex].pcd_ri;
850
791
851 for (n = 0; n < AMD_NPMCS; n++, phw++) {
792 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__));
793
794 for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) {
852 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
853 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
854 phw->phw_pmc = NULL;
795 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
796 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
797 phw->phw_pmc = NULL;
855 pcs->pc_hwpmcs[n] = phw;
798 pc->pc_hwpmcs[n + first_ri] = phw;
856 }
857
799 }
800
858 /* Mark the TSC as shareable */
859 pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
860
861 pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
862
863 return 0;
801 return (0);
864}
865
866
867/*
868 * processor dependent cleanup prior to the KLD
869 * being unloaded
870 */
871
872static int
802}
803
804
805/*
806 * processor dependent cleanup prior to the KLD
807 * being unloaded
808 */
809
810static int
873amd_cleanup(int cpu)
811amd_pcpu_fini(struct pmc_mdep *md, int cpu)
874{
812{
875 int i;
813 int classindex, first_ri, i;
876 uint32_t evsel;
814 uint32_t evsel;
877 struct pmc_cpu *pcs;
815 struct pmc_cpu *pc;
816 struct amd_cpu *pac;
878
879 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
880 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
881
882 PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
883
884 /*
885 * First, turn off all PMCs on this CPU.
886 */
817
818 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
819 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
820
821 PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
822
823 /*
824 * First, turn off all PMCs on this CPU.
825 */
887
888 for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
889 evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
890 evsel &= ~AMD_PMC_ENABLE;
891 wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
892 }
893
894 /*
895 * Next, free up allocated space.
896 */
826 for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
827 evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
828 evsel &= ~AMD_PMC_ENABLE;
829 wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
830 }
831
832 /*
833 * Next, free up allocated space.
834 */
835 if ((pac = amd_pcpu[cpu]) == NULL)
836 return (0);
897
837
898 if ((pcs = pmc_pcpu[cpu]) == NULL)
899 return 0;
838 amd_pcpu[cpu] = NULL;
900
901#ifdef DEBUG
839
840#ifdef DEBUG
902 /* check the TSC */
903 KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
904 ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
905 for (i = 1; i < AMD_NPMCS; i++) {
906 KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
841 for (i = 0; i < AMD_NPMCS; i++) {
842 KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL,
907 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
908 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
909 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
910 }
911#endif
912
843 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
844 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
845 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
846 }
847#endif
848
913 pmc_pcpu[cpu] = NULL;
914 free(pcs, M_PMC);
915 return 0;
849 pc = pmc_pcpu[cpu];
850 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__));
851
852#if defined(__amd64__)
853 classindex = PMC_MDEP_CLASS_INDEX_K8;
854#elif defined(__i386__)
855 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 :
856 PMC_MDEP_CLASS_INDEX_K7;
857#endif
858 first_ri = md->pmd_classdep[classindex].pcd_ri;
859
860 /*
861 * Reset pointers in the MI 'per-cpu' state.
862 */
863 for (i = 0; i < AMD_NPMCS; i++) {
864 pc->pc_hwpmcs[i + first_ri] = NULL;
865 }
866
867
868 free(pac, M_PMC);
869
870 return (0);
916}
917
918/*
919 * Initialize ourselves.
920 */
921
922struct pmc_mdep *
923pmc_amd_initialize(void)
924{
871}
872
873/*
874 * Initialize ourselves.
875 */
876
877struct pmc_mdep *
878pmc_amd_initialize(void)
879{
880 int classindex, error, i, nclasses, ncpus;
881 struct pmc_classdep *pcd;
925 enum pmc_cputype cputype;
882 enum pmc_cputype cputype;
926 enum pmc_class class;
927 struct pmc_mdep *pmc_mdep;
883 struct pmc_mdep *pmc_mdep;
884 enum pmc_class class;
928 char *name;
885 char *name;
929 int i;
930
931 /*
932 * The presence of hardware performance counters on the AMD
933 * Athlon, Duron or later processors, is _not_ indicated by
934 * any of the processor feature flags set by the 'CPUID'
935 * instruction, so we only check the 'instruction family'
936 * field returned by CPUID for instruction family >= 6.
937 */
938
939 class = cputype = -1;
940 name = NULL;
941 switch (cpu_id & 0xF00) {
886
887 /*
888 * The presence of hardware performance counters on the AMD
889 * Athlon, Duron or later processors, is _not_ indicated by
890 * any of the processor feature flags set by the 'CPUID'
891 * instruction, so we only check the 'instruction family'
892 * field returned by CPUID for instruction family >= 6.
893 */
894
895 class = cputype = -1;
896 name = NULL;
897 switch (cpu_id & 0xF00) {
898#if defined(__i386__)
942 case 0x600: /* Athlon(tm) processor */
899 case 0x600: /* Athlon(tm) processor */
900 classindex = PMC_MDEP_CLASS_INDEX_K7;
943 cputype = PMC_CPU_AMD_K7;
944 class = PMC_CLASS_K7;
945 name = "K7";
946 break;
901 cputype = PMC_CPU_AMD_K7;
902 class = PMC_CLASS_K7;
903 name = "K7";
904 break;
905#endif
947 case 0xF00: /* Athlon64/Opteron processor */
906 case 0xF00: /* Athlon64/Opteron processor */
907 classindex = PMC_MDEP_CLASS_INDEX_K8;
948 cputype = PMC_CPU_AMD_K8;
949 class = PMC_CLASS_K8;
950 name = "K8";
951 break;
952 }
953
954 if ((int) cputype == -1) {
955 (void) printf("pmc: Unknown AMD CPU.\n");
956 return NULL;
957 }
958
959#ifdef DEBUG
960 amd_pmc_class = class;
961#endif
962
908 cputype = PMC_CPU_AMD_K8;
909 class = PMC_CLASS_K8;
910 name = "K8";
911 break;
912 }
913
914 if ((int) cputype == -1) {
915 (void) printf("pmc: Unknown AMD CPU.\n");
916 return NULL;
917 }
918
919#ifdef DEBUG
920 amd_pmc_class = class;
921#endif
922
963 pmc_mdep = malloc(sizeof(struct pmc_mdep),
923 /*
924 * Allocate space for pointers to PMC HW descriptors and for
925 * the MDEP structure used by MI code.
926 */
927 amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC,
928 M_WAITOK|M_ZERO);
929
930 /*
931 * These processors have two classes of PMCs: the TSC and
932 * programmable PMCs.
933 */
934 nclasses = 2;
935 pmc_mdep = malloc(sizeof(struct pmc_mdep) + nclasses * sizeof (struct pmc_classdep),
964 M_PMC, M_WAITOK|M_ZERO);
965
936 M_PMC, M_WAITOK|M_ZERO);
937
966 pmc_mdep->pmd_cputype = cputype;
967 pmc_mdep->pmd_npmc = AMD_NPMCS;
938 pmc_mdep->pmd_cputype = cputype;
939 pmc_mdep->pmd_nclass = nclasses;
968
940
969 /* this processor has two classes of usable PMCs */
970 pmc_mdep->pmd_nclass = 2;
941 ncpus = pmc_cpu_max();
971
942
972 /* TSC */
973 pmc_mdep->pmd_classes[0].pm_class = PMC_CLASS_TSC;
974 pmc_mdep->pmd_classes[0].pm_caps = PMC_CAP_READ;
975 pmc_mdep->pmd_classes[0].pm_width = 64;
943 /* Initialize the TSC. */
944 error = pmc_tsc_initialize(pmc_mdep, ncpus);
945 if (error)
946 goto error;
976
947
977 /* AMD K7/K8 PMCs */
978 pmc_mdep->pmd_classes[1].pm_class = class;
979 pmc_mdep->pmd_classes[1].pm_caps = AMD_PMC_CAPS;
980 pmc_mdep->pmd_classes[1].pm_width = 48;
948 /* Initialize AMD K7 and K8 PMC handling. */
949 pcd = &pmc_mdep->pmd_classdep[classindex];
981
950
982 pmc_mdep->pmd_nclasspmcs[0] = 1;
983 pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
951 pcd->pcd_caps = AMD_PMC_CAPS;
952 pcd->pcd_class = class;
953 pcd->pcd_num = AMD_NPMCS;
954 pcd->pcd_ri = pmc_mdep->pmd_npmc;
955 pcd->pcd_width = 48;
984
985 /* fill in the correct pmc name and class */
956
957 /* fill in the correct pmc name and class */
986 for (i = 1; i < AMD_NPMCS; i++) {
958 for (i = 0; i < AMD_NPMCS; i++) {
987 (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
988 sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
989 name, i-1);
990 amd_pmcdesc[i].pm_descr.pd_class = class;
991 }
992
959 (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
960 sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
961 name, i-1);
962 amd_pmcdesc[i].pm_descr.pd_class = class;
963 }
964
993 pmc_mdep->pmd_init = amd_init;
994 pmc_mdep->pmd_cleanup = amd_cleanup;
995 pmc_mdep->pmd_switch_in = amd_switch_in;
996 pmc_mdep->pmd_switch_out = amd_switch_out;
997 pmc_mdep->pmd_read_pmc = amd_read_pmc;
998 pmc_mdep->pmd_write_pmc = amd_write_pmc;
999 pmc_mdep->pmd_config_pmc = amd_config_pmc;
1000 pmc_mdep->pmd_get_config = amd_get_config;
1001 pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
1002 pmc_mdep->pmd_release_pmc = amd_release_pmc;
1003 pmc_mdep->pmd_start_pmc = amd_start_pmc;
1004 pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
1005 pmc_mdep->pmd_intr = amd_intr;
1006 pmc_mdep->pmd_describe = amd_describe;
1007 pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
965 pcd->pcd_allocate_pmc = amd_allocate_pmc;
966 pcd->pcd_config_pmc = amd_config_pmc;
967 pcd->pcd_describe = amd_describe;
968 pcd->pcd_get_config = amd_get_config;
969 pcd->pcd_get_msr = amd_get_msr;
970 pcd->pcd_pcpu_fini = amd_pcpu_fini;
971 pcd->pcd_pcpu_init = amd_pcpu_init;
972 pcd->pcd_read_pmc = amd_read_pmc;
973 pcd->pcd_release_pmc = amd_release_pmc;
974 pcd->pcd_start_pmc = amd_start_pmc;
975 pcd->pcd_stop_pmc = amd_stop_pmc;
976 pcd->pcd_write_pmc = amd_write_pmc;
1008
977
978 pmc_mdep->pmd_pcpu_init = NULL;
979 pmc_mdep->pmd_pcpu_fini = NULL;
980 pmc_mdep->pmd_intr = amd_intr;
981 pmc_mdep->pmd_switch_in = amd_switch_in;
982 pmc_mdep->pmd_switch_out = amd_switch_out;
983
984 pmc_mdep->pmd_npmc += AMD_NPMCS;
985
1009 PMCDBG(MDP,INI,0,"%s","amd-initialize");
1010
986 PMCDBG(MDP,INI,0,"%s","amd-initialize");
987
1011 return pmc_mdep;
988 return (pmc_mdep);
989
990 error:
991 if (error) {
992 free(pmc_mdep, M_PMC);
993 pmc_mdep = NULL;
994 }
995
996 return (NULL);
1012}
997}
998
999/*
1000 * Finalization code for AMD CPUs.
1001 */
1002
1003void
1004pmc_amd_finalize(struct pmc_mdep *md)
1005{
1006#if defined(INVARIANTS)
1007 int classindex, i, ncpus, pmcclass;
1008#endif
1009
1010 pmc_tsc_finalize(md);
1011
1012 KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer",
1013 __LINE__));
1014
1015#if defined(INVARIANTS)
1016 switch (md->pmd_cputype) {
1017#if defined(__i386__)
1018 case PMC_CPU_AMD_K7:
1019 classindex = PMC_MDEP_CLASS_INDEX_K7;
1020 pmcclass = PMC_CLASS_K7;
1021 break;
1022#endif
1023 default:
1024 classindex = PMC_MDEP_CLASS_INDEX_K8;
1025 pmcclass = PMC_CLASS_K8;
1026 }
1027
1028 KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass,
1029 ("[amd,%d] pmc class mismatch", __LINE__));
1030
1031 ncpus = pmc_cpu_max();
1032
1033 for (i = 0; i < ncpus; i++)
1034 KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu",
1035 __LINE__));
1036#endif
1037
1038 free(amd_pcpu, M_PMC);
1039 amd_pcpu = NULL;
1040}