hwpmc_piv.c (184214) | hwpmc_piv.c (184802) |
---|---|
1/*- 2 * Copyright (c) 2003-2007 Joseph Koshy 3 * Copyright (c) 2007 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by A. Joseph Koshy under 7 * sponsorship from the FreeBSD Foundation and Google, Inc. 8 * --- 15 unchanged lines hidden (view full) --- 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 2003-2007 Joseph Koshy 3 * Copyright (c) 2007 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by A. Joseph Koshy under 7 * sponsorship from the FreeBSD Foundation and Google, Inc. 8 * --- 15 unchanged lines hidden (view full) --- 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> |
32__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_piv.c 184214 2008-10-23 20:26:15Z des $"); | 32__FBSDID("$FreeBSD: head/sys/dev/hwpmc/hwpmc_piv.c 184802 2008-11-09 17:37:54Z jkoshy $"); |
33 34#include <sys/param.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/pmc.h> 38#include <sys/pmckern.h> 39#include <sys/smp.h> 40#include <sys/systm.h> --- 107 unchanged lines hidden (view full) --- 148 * '-' : point in time where a thread is taken off a CPU. 149 * 150 * Handling HTT CONFIG 151 * 152 * Different processes attached to the same PMC may get scheduled on 153 * the two logical processors in the package. We keep track of config 154 * and de-config operations using the CFGFLAGS fields of the per-physical 155 * cpu state. | 33 34#include <sys/param.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/pmc.h> 38#include <sys/pmckern.h> 39#include <sys/smp.h> 40#include <sys/systm.h> --- 107 unchanged lines hidden (view full) --- 148 * '-' : point in time where a thread is taken off a CPU. 149 * 150 * Handling HTT CONFIG 151 * 152 * Different processes attached to the same PMC may get scheduled on 153 * the two logical processors in the package. We keep track of config 154 * and de-config operations using the CFGFLAGS fields of the per-physical 155 * cpu state. |
156 * 157 * Handling TSCs 158 * 159 * TSCs are architectural state and each CPU in a HTT pair has its own 160 * TSC register. | |
161 */ 162 163#define P4_PMCS() \ 164 P4_PMC(BPU_COUNTER0) \ 165 P4_PMC(BPU_COUNTER1) \ 166 P4_PMC(BPU_COUNTER2) \ 167 P4_PMC(BPU_COUNTER3) \ 168 P4_PMC(MS_COUNTER0) \ --- 190 unchanged lines hidden (view full) --- 359struct p4pmc_descr { 360 struct pmc_descr pm_descr; /* common information */ 361 enum pmc_p4pmc pm_pmcnum; /* PMC number */ 362 uint32_t pm_pmc_msr; /* PERFCTR MSR address */ 363 uint32_t pm_cccr_msr; /* CCCR MSR address */ 364}; 365 366static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = { | 156 */ 157 158#define P4_PMCS() \ 159 P4_PMC(BPU_COUNTER0) \ 160 P4_PMC(BPU_COUNTER1) \ 161 P4_PMC(BPU_COUNTER2) \ 162 P4_PMC(BPU_COUNTER3) \ 163 P4_PMC(MS_COUNTER0) \ --- 190 unchanged lines hidden (view full) --- 354struct p4pmc_descr { 355 struct pmc_descr pm_descr; /* common information */ 356 enum pmc_p4pmc pm_pmcnum; /* PMC number */ 357 uint32_t pm_pmc_msr; /* PERFCTR MSR address */ 358 uint32_t pm_cccr_msr; /* CCCR MSR address */ 359}; 360 361static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = { |
367 368 /* 369 * TSC descriptor 370 */ 371 372 { 373 .pm_descr = 374 { 375 .pd_name = "TSC", 376 .pd_class = PMC_CLASS_TSC, 377 .pd_caps = PMC_CAP_READ | PMC_CAP_WRITE, 378 .pd_width = 64 379 }, 380 .pm_pmcnum = ~0, 381 .pm_cccr_msr = ~0, 382 .pm_pmc_msr = 0x10, 383 }, 384 385 /* 386 * P4 PMCS 387 */ 388 | |
389#define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \ 390 PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \ 391 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \ 392 PMC_CAP_TAGGING | PMC_CAP_CASCADE) 393 394#define P4_PMCDESCR(N, PMC, CCCR) \ 395 { \ 396 .pm_descr = \ --- 33 unchanged lines hidden (view full) --- 430/* HTT support */ 431#define P4_NHTT 2 /* logical processors/chip */ 432 433static int p4_system_has_htt; 434 435/* 436 * Per-CPU data structure for P4 class CPUs 437 * | 362#define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \ 363 PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \ 364 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \ 365 PMC_CAP_TAGGING | PMC_CAP_CASCADE) 366 367#define P4_PMCDESCR(N, PMC, CCCR) \ 368 { \ 369 .pm_descr = \ --- 33 unchanged lines hidden (view full) --- 403/* HTT support */ 404#define P4_NHTT 2 /* logical processors/chip */ 405 406static int p4_system_has_htt; 407 408/* 409 * Per-CPU data structure for P4 class CPUs 410 * |
438 * [common stuff] 439 * [19 struct pmc_hw pointers] | |
440 * [19 struct pmc_hw structures] 441 * [45 ESCRs status bytes] 442 * [per-cpu spin mutex] 443 * [19 flag fields for holding config flags and a runcount] 444 * [19*2 hw value fields] (Thread mode PMC support) 445 * or 446 * [19*2 EIP values] (Sampling mode PMCs) 447 * [19*2 pmc value fields] (Thread mode PMC support)) 448 */ 449 450struct p4_cpu { | 411 * [19 struct pmc_hw structures] 412 * [45 ESCRs status bytes] 413 * [per-cpu spin mutex] 414 * [19 flag fields for holding config flags and a runcount] 415 * [19*2 hw value fields] (Thread mode PMC support) 416 * or 417 * [19*2 EIP values] (Sampling mode PMCs) 418 * [19*2 pmc value fields] (Thread mode PMC support)) 419 */ 420 421struct p4_cpu { |
451 struct pmc_cpu pc_common; 452 struct pmc_hw *pc_hwpmcs[P4_NPMCS]; | |
453 struct pmc_hw pc_p4pmcs[P4_NPMCS]; 454 char pc_escrs[P4_NESCR]; 455 struct mtx pc_mtx; /* spin lock */ 456 uint32_t pc_intrflag; /* NMI handler flags */ 457 unsigned int pc_intrlock; /* NMI handler spin lock */ 458 unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */ 459 union { 460 pmc_value_t pc_hw[P4_NPMCS * P4_NHTT]; 461 uintptr_t pc_ip[P4_NPMCS * P4_NHTT]; 462 } pc_si; 463 pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT]; 464}; 465 | 422 struct pmc_hw pc_p4pmcs[P4_NPMCS]; 423 char pc_escrs[P4_NESCR]; 424 struct mtx pc_mtx; /* spin lock */ 425 uint32_t pc_intrflag; /* NMI handler flags */ 426 unsigned int pc_intrlock; /* NMI handler spin lock */ 427 unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */ 428 union { 429 pmc_value_t pc_hw[P4_NPMCS * P4_NHTT]; 430 uintptr_t pc_ip[P4_NPMCS * P4_NHTT]; 431 } pc_si; 432 pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT]; 433}; 434 |
466/* 467 * A 'logical' CPU shares PMC resources with partner 'physical' CPU, 468 * except the TSC, which is architectural and hence seperate. The 469 * 'logical' CPU descriptor thus has pointers to the physical CPUs 470 * descriptor state except for the TSC (rowindex 0) which is not 471 * shared. 472 */ | 435static struct p4_cpu **p4_pcpu; |
473 | 436 |
474struct p4_logicalcpu { 475 struct pmc_cpu pc_common; 476 struct pmc_hw *pc_hwpmcs[P4_NPMCS]; 477 struct pmc_hw pc_tsc; 478}; 479 | |
480#define P4_PCPU_PMC_VALUE(PC,RI,CPU) (PC)->pc_pmc_values[(RI)*((CPU) & 1)] 481#define P4_PCPU_HW_VALUE(PC,RI,CPU) (PC)->pc_si.pc_hw[(RI)*((CPU) & 1)] 482#define P4_PCPU_SAVED_IP(PC,RI,CPU) (PC)->pc_si.pc_ip[(RI)*((CPU) & 1)] 483 484#define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK)) 485#define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \ 486 char _tmp; \ 487 _tmp = (PC)->pc_flags[(RI)]; \ --- 86 unchanged lines hidden (view full) --- 574p4_find_event(enum pmc_event ev) 575{ 576 int n; 577 578 for (n = 0; n < P4_NEVENTS; n++) 579 if (p4_events[n].pm_event == ev) 580 break; 581 if (n == P4_NEVENTS) | 437#define P4_PCPU_PMC_VALUE(PC,RI,CPU) (PC)->pc_pmc_values[(RI)*((CPU) & 1)] 438#define P4_PCPU_HW_VALUE(PC,RI,CPU) (PC)->pc_si.pc_hw[(RI)*((CPU) & 1)] 439#define P4_PCPU_SAVED_IP(PC,RI,CPU) (PC)->pc_si.pc_ip[(RI)*((CPU) & 1)] 440 441#define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK)) 442#define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \ 443 char _tmp; \ 444 _tmp = (PC)->pc_flags[(RI)]; \ --- 86 unchanged lines hidden (view full) --- 531p4_find_event(enum pmc_event ev) 532{ 533 int n; 534 535 for (n = 0; n < P4_NEVENTS; n++) 536 if (p4_events[n].pm_event == ev) 537 break; 538 if (n == P4_NEVENTS) |
582 return NULL; 583 return &p4_events[n]; | 539 return (NULL); 540 return (&p4_events[n]); |
584} 585 586/* 587 * Initialize per-cpu state 588 */ 589 590static int | 541} 542 543/* 544 * Initialize per-cpu state 545 */ 546 547static int |
591p4_init(int cpu) | 548p4_pcpu_init(struct pmc_mdep *md, int cpu) |
592{ | 549{ |
593 int n, phycpu; | |
594 char *pescr; | 550 char *pescr; |
595 struct p4_cpu *pcs; 596 struct p4_logicalcpu *plcs; | 551 int n, first_ri, phycpu; |
597 struct pmc_hw *phw; | 552 struct pmc_hw *phw; |
553 struct p4_cpu *p4c; 554 struct pmc_cpu *pc, *plc; |
|
598 599 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 600 ("[p4,%d] insane cpu number %d", __LINE__, cpu)); 601 602 PMCDBG(MDP,INI,0, "p4-init cpu=%d is-primary=%d", cpu, 603 pmc_cpu_is_primary(cpu) != 0); 604 | 555 556 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 557 ("[p4,%d] insane cpu number %d", __LINE__, cpu)); 558 559 PMCDBG(MDP,INI,0, "p4-init cpu=%d is-primary=%d", cpu, 560 pmc_cpu_is_primary(cpu) != 0); 561 |
562 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri; 563 |
|
605 /* 606 * The two CPUs in an HT pair share their per-cpu state. 607 * 608 * For HT capable CPUs, we assume that the two logical 609 * processors in the HT pair get two consecutive CPU ids 610 * starting with an even id #. 611 * 612 * The primary CPU (the even numbered CPU of the pair) would 613 * have been initialized prior to the initialization for the 614 * secondary. 615 */ 616 617 if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) { 618 619 p4_system_has_htt = 1; 620 621 phycpu = P4_TO_HTT_PRIMARY(cpu); | 564 /* 565 * The two CPUs in an HT pair share their per-cpu state. 566 * 567 * For HT capable CPUs, we assume that the two logical 568 * processors in the HT pair get two consecutive CPU ids 569 * starting with an even id #. 570 * 571 * The primary CPU (the even numbered CPU of the pair) would 572 * have been initialized prior to the initialization for the 573 * secondary. 574 */ 575 576 if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) { 577 578 p4_system_has_htt = 1; 579 580 phycpu = P4_TO_HTT_PRIMARY(cpu); |
622 pcs = (struct p4_cpu *) pmc_pcpu[phycpu]; 623 PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pcs=%p", 624 cpu, phycpu, pcs); 625 KASSERT(pcs, 626 ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d", __LINE__, 627 cpu, phycpu)); 628 if (pcs == NULL) /* decline to init */ 629 return ENXIO; | 581 pc = pmc_pcpu[phycpu]; 582 plc = pmc_pcpu[cpu]; |
630 | 583 |
631 plcs = malloc(sizeof(struct p4_logicalcpu), 632 M_PMC, M_WAITOK|M_ZERO); | 584 KASSERT(plc != pc, ("[p4,%d] per-cpu config error", __LINE__)); |
633 | 585 |
634 /* The TSC is architectural state and is not shared */ 635 plcs->pc_hwpmcs[0] = &plcs->pc_tsc; 636 plcs->pc_tsc.phw_state = PMC_PHW_FLAG_IS_ENABLED | 637 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(0) | 638 PMC_PHW_FLAG_IS_SHAREABLE; | 586 PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pc=%p", cpu, 587 phycpu, pc); 588 KASSERT(pc, ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d", 589 __LINE__, cpu, phycpu)); |
639 | 590 |
640 /* Other PMCs are shared with the physical CPU */ 641 for (n = 1; n < P4_NPMCS; n++) 642 plcs->pc_hwpmcs[n] = pcs->pc_hwpmcs[n]; | 591 /* PMCs are shared with the physical CPU. */ 592 for (n = 0; n < P4_NPMCS; n++) 593 plc->pc_hwpmcs[n + first_ri] = 594 pc->pc_hwpmcs[n + first_ri]; |
643 | 595 |
644 pmc_pcpu[cpu] = (struct pmc_cpu *) plcs; 645 return 0; | 596 return (0); |
646 } 647 | 597 } 598 |
648 pcs = malloc(sizeof(struct p4_cpu), M_PMC, M_WAITOK|M_ZERO); | 599 p4c = malloc(sizeof(struct p4_cpu), M_PMC, M_WAITOK|M_ZERO); |
649 | 600 |
650 if (pcs == NULL) 651 return ENOMEM; 652 phw = pcs->pc_p4pmcs; | 601 if (p4c == NULL) 602 return (ENOMEM); |
653 | 603 |
604 pc = pmc_pcpu[cpu]; 605 606 KASSERT(pc != NULL, ("[p4,%d] cpu %d null per-cpu", __LINE__, cpu)); 607 608 p4_pcpu[cpu] = p4c; 609 phw = p4c->pc_p4pmcs; 610 |
|
654 for (n = 0; n < P4_NPMCS; n++, phw++) { 655 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 656 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 657 phw->phw_pmc = NULL; | 611 for (n = 0; n < P4_NPMCS; n++, phw++) { 612 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 613 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 614 phw->phw_pmc = NULL; |
658 pcs->pc_hwpmcs[n] = phw; | 615 pc->pc_hwpmcs[n + first_ri] = phw; |
659 } 660 | 616 } 617 |
661 /* Mark the TSC as shareable */ 662 pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE; 663 664 pescr = pcs->pc_escrs; | 618 pescr = p4c->pc_escrs; |
665 for (n = 0; n < P4_NESCR; n++) 666 *pescr++ = P4_INVALID_PMC_INDEX; | 619 for (n = 0; n < P4_NESCR; n++) 620 *pescr++ = P4_INVALID_PMC_INDEX; |
667 pmc_pcpu[cpu] = (struct pmc_cpu *) pcs; | |
668 | 621 |
669 mtx_init(&pcs->pc_mtx, "p4-pcpu", "pmc-leaf", MTX_SPIN); | 622 mtx_init(&p4c->pc_mtx, "p4-pcpu", "pmc-leaf", MTX_SPIN); |
670 | 623 |
671 return 0; | 624 return (0); |
672} 673 674/* 675 * Destroy per-cpu state. 676 */ 677 678static int | 625} 626 627/* 628 * Destroy per-cpu state. 629 */ 630 631static int |
679p4_cleanup(int cpu) | 632p4_pcpu_fini(struct pmc_mdep *md, int cpu) |
680{ | 633{ |
681 int i; 682 struct p4_cpu *pcs; | 634 int first_ri, i; 635 struct p4_cpu *p4c; 636 struct pmc_cpu *pc; |
683 684 PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu); 685 | 637 638 PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu); 639 |
686 if ((pcs = (struct p4_cpu *) pmc_pcpu[cpu]) == NULL) 687 return 0; | 640 pc = pmc_pcpu[cpu]; 641 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri; |
688 | 642 |
643 for (i = 0; i < P4_NPMCS; i++) 644 pc->pc_hwpmcs[i + first_ri] = NULL; 645 646 if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) 647 return (0); 648 649 p4c = p4_pcpu[cpu]; 650 651 KASSERT(p4c != NULL, ("[p4,%d] NULL pcpu", __LINE__)); 652 |
|
689 /* Turn off all PMCs on this CPU */ 690 for (i = 0; i < P4_NPMCS - 1; i++) 691 wrmsr(P4_CCCR_MSR_FIRST + i, 692 rdmsr(P4_CCCR_MSR_FIRST + i) & ~P4_CCCR_ENABLE); 693 | 653 /* Turn off all PMCs on this CPU */ 654 for (i = 0; i < P4_NPMCS - 1; i++) 655 wrmsr(P4_CCCR_MSR_FIRST + i, 656 rdmsr(P4_CCCR_MSR_FIRST + i) & ~P4_CCCR_ENABLE); 657 |
694 /* 695 * If the CPU is physical we need to teardown the 696 * full MD state. 697 */ 698 if (!P4_CPU_IS_HTT_SECONDARY(cpu)) 699 mtx_destroy(&pcs->pc_mtx); | 658 mtx_destroy(&p4c->pc_mtx); |
700 | 659 |
701 free(pcs, M_PMC); | 660 free(p4c, M_PMC); |
702 | 661 |
703 pmc_pcpu[cpu] = NULL; | 662 p4_pcpu[cpu] = NULL; |
704 | 663 |
705 return 0; | 664 return (0); |
706} 707 708/* | 665} 666 667/* |
709 * Context switch in. 710 */ 711 712static int 713p4_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) 714{ 715 (void) pc; 716 717 PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, 718 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0); 719 720 /* enable the RDPMC instruction */ 721 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 722 load_cr4(rcr4() | CR4_PCE); 723 724 PMCDBG(MDP,SWI,2, "cr4=0x%x", (uint32_t) rcr4()); 725 726 return 0; 727} 728 729/* 730 * Context switch out. 731 */ 732 733static int 734p4_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) 735{ 736 (void) pc; 737 (void) pp; /* can be null */ 738 739 PMCDBG(MDP,SWO,1, "pc=%p pp=%p", pc, pp); 740 741 /* always disallow the RDPMC instruction */ 742 load_cr4(rcr4() & ~CR4_PCE); 743 744 PMCDBG(MDP,SWO,2, "cr4=0x%x", (uint32_t) rcr4()); 745 746 return 0; 747} 748 749/* | |
750 * Read a PMC 751 */ 752 753static int 754p4_read_pmc(int cpu, int ri, pmc_value_t *v) 755{ | 668 * Read a PMC 669 */ 670 671static int 672p4_read_pmc(int cpu, int ri, pmc_value_t *v) 673{ |
756 enum pmc_mode mode; 757 struct p4pmc_descr *pd; | |
758 struct pmc *pm; | 674 struct pmc *pm; |
759 struct p4_cpu *pc; 760 struct pmc_hw *phw; | |
761 pmc_value_t tmp; | 675 pmc_value_t tmp; |
676 struct p4_cpu *pc; 677 enum pmc_mode mode; 678 struct p4pmc_descr *pd; |
|
762 763 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 764 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 765 KASSERT(ri >= 0 && ri < P4_NPMCS, 766 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 767 | 679 680 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 681 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 682 KASSERT(ri >= 0 && ri < P4_NPMCS, 683 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 684 |
685 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 686 pm = pc->pc_p4pmcs[ri].phw_pmc; 687 pd = &p4_pmcdesc[ri]; |
|
768 | 688 |
769 if (ri == 0) { /* TSC */ 770#ifdef DEBUG 771 pc = (struct p4_cpu *) pmc_pcpu[cpu]; 772 phw = pc->pc_hwpmcs[ri]; 773 pm = phw->phw_pmc; 774 775 KASSERT(pm, ("[p4,%d] cpu=%d ri=%d not configured", __LINE__, 776 cpu, ri)); 777 KASSERT(PMC_TO_CLASS(pm) == PMC_CLASS_TSC, 778 ("[p4,%d] cpu=%d ri=%d not a TSC (%d)", __LINE__, cpu, ri, 779 PMC_TO_CLASS(pm))); 780 KASSERT(PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)), 781 ("[p4,%d] TSC counter in non-counting mode", __LINE__)); 782#endif 783 *v = rdtsc(); 784 PMCDBG(MDP,REA,2, "p4-read -> %jx", *v); 785 return 0; 786 } 787 788 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 789 phw = pc->pc_hwpmcs[ri]; 790 pd = &p4_pmcdesc[ri]; 791 pm = phw->phw_pmc; 792 | |
793 KASSERT(pm != NULL, | 689 KASSERT(pm != NULL, |
794 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, 795 cpu, ri)); | 690 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri)); |
796 797 KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm), 798 ("[p4,%d] class mismatch pd %d != id class %d", __LINE__, | 691 692 KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm), 693 ("[p4,%d] class mismatch pd %d != id class %d", __LINE__, |
799 pd->pm_descr.pd_class, PMC_TO_CLASS(pm))); | 694 pd->pm_descr.pd_class, PMC_TO_CLASS(pm))); |
800 801 mode = PMC_TO_MODE(pm); 802 803 PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode); 804 805 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 806 ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class)); 807 --- 9 unchanged lines hidden (view full) --- 817 } 818 819 if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */ 820 *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); 821 else 822 *v = tmp; 823 824 PMCDBG(MDP,REA,2, "p4-read -> %jx", *v); | 695 696 mode = PMC_TO_MODE(pm); 697 698 PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode); 699 700 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 701 ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class)); 702 --- 9 unchanged lines hidden (view full) --- 712 } 713 714 if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */ 715 *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); 716 else 717 *v = tmp; 718 719 PMCDBG(MDP,REA,2, "p4-read -> %jx", *v); |
825 return 0; | 720 721 return (0); |
826} 827 828/* 829 * Write a PMC 830 */ 831 832static int 833p4_write_pmc(int cpu, int ri, pmc_value_t v) --- 4 unchanged lines hidden (view full) --- 838 const struct pmc_hw *phw; 839 const struct p4pmc_descr *pd; 840 841 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 842 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 843 KASSERT(ri >= 0 && ri < P4_NPMCS, 844 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 845 | 722} 723 724/* 725 * Write a PMC 726 */ 727 728static int 729p4_write_pmc(int cpu, int ri, pmc_value_t v) --- 4 unchanged lines hidden (view full) --- 734 const struct pmc_hw *phw; 735 const struct p4pmc_descr *pd; 736 737 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 738 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 739 KASSERT(ri >= 0 && ri < P4_NPMCS, 740 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 741 |
846 847 /* 848 * The P4's TSC register is writeable, but we don't allow a 849 * write as changing the TSC's value could interfere with 850 * timekeeping and other system functions. 851 */ 852 if (ri == 0) { 853#ifdef DEBUG 854 pc = (struct p4_cpu *) pmc_pcpu[cpu]; 855 phw = pc->pc_hwpmcs[ri]; 856 pm = phw->phw_pmc; 857 KASSERT(pm, ("[p4,%d] cpu=%d ri=%d not configured", __LINE__, 858 cpu, ri)); 859 KASSERT(PMC_TO_CLASS(pm) == PMC_CLASS_TSC, 860 ("[p4,%d] cpu=%d ri=%d not a TSC (%d)", __LINE__, 861 cpu, ri, PMC_TO_CLASS(pm))); 862#endif 863 return 0; 864 } 865 866 /* Shared PMCs */ 867 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 868 phw = pc->pc_hwpmcs[ri]; | 742 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 743 phw = &pc->pc_p4pmcs[ri]; |
869 pm = phw->phw_pmc; 870 pd = &p4_pmcdesc[ri]; 871 872 KASSERT(pm != NULL, 873 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, 874 cpu, ri)); 875 876 mode = PMC_TO_MODE(pm); --- 9 unchanged lines hidden (view full) --- 886 if (PMC_IS_SAMPLING_MODE(mode)) 887 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(v); 888 889 if (PMC_IS_SYSTEM_MODE(mode)) 890 wrmsr(pd->pm_pmc_msr, v); 891 else 892 P4_PCPU_PMC_VALUE(pc,ri,cpu) = v; 893 | 744 pm = phw->phw_pmc; 745 pd = &p4_pmcdesc[ri]; 746 747 KASSERT(pm != NULL, 748 ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, 749 cpu, ri)); 750 751 mode = PMC_TO_MODE(pm); --- 9 unchanged lines hidden (view full) --- 761 if (PMC_IS_SAMPLING_MODE(mode)) 762 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(v); 763 764 if (PMC_IS_SYSTEM_MODE(mode)) 765 wrmsr(pd->pm_pmc_msr, v); 766 else 767 P4_PCPU_PMC_VALUE(pc,ri,cpu) = v; 768 |
894 return 0; | 769 return (0); |
895} 896 897/* 898 * Configure a PMC 'pm' on the given CPU and row-index. 899 * 900 * 'pm' may be NULL to indicate de-configuration. 901 * 902 * On HTT systems, a PMC may get configured twice, once for each --- 6 unchanged lines hidden (view full) --- 909p4_config_pmc(int cpu, int ri, struct pmc *pm) 910{ 911 struct pmc_hw *phw; 912 struct p4_cpu *pc; 913 int cfgflags, cpuflag; 914 915 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 916 ("[p4,%d] illegal CPU %d", __LINE__, cpu)); | 770} 771 772/* 773 * Configure a PMC 'pm' on the given CPU and row-index. 774 * 775 * 'pm' may be NULL to indicate de-configuration. 776 * 777 * On HTT systems, a PMC may get configured twice, once for each --- 6 unchanged lines hidden (view full) --- 784p4_config_pmc(int cpu, int ri, struct pmc *pm) 785{ 786 struct pmc_hw *phw; 787 struct p4_cpu *pc; 788 int cfgflags, cpuflag; 789 790 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 791 ("[p4,%d] illegal CPU %d", __LINE__, cpu)); |
792 |
|
917 KASSERT(ri >= 0 && ri < P4_NPMCS, 918 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 919 920 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 921 | 793 KASSERT(ri >= 0 && ri < P4_NPMCS, 794 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 795 796 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 797 |
922 if (ri == 0) { /* TSC */ 923 pc = (struct p4_cpu *) pmc_pcpu[cpu]; 924 phw = pc->pc_hwpmcs[ri]; | 798 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 799 phw = &pc->pc_p4pmcs[ri]; |
925 | 800 |
926 KASSERT(pm == NULL || phw->phw_pmc == NULL, 927 ("[p4,%d] hwpmc doubly config'ed", __LINE__)); 928 phw->phw_pmc = pm; 929 return 0; 930 } 931 932 /* Shared PMCs */ 933 934 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 935 phw = pc->pc_hwpmcs[ri]; 936 | |
937 KASSERT(pm == NULL || phw->phw_pmc == NULL || 938 (p4_system_has_htt && phw->phw_pmc == pm), 939 ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__)); 940 941 mtx_lock_spin(&pc->pc_mtx); 942 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri); 943 944 KASSERT(cfgflags >= 0 || cfgflags <= 3, --- 25 unchanged lines hidden (view full) --- 970 KASSERT(cfgflags >= 0 || cfgflags <= 3, 971 ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__, 972 cfgflags, cpu, ri)); 973 974 P4_PCPU_SET_CFGFLAGS(pc,ri,cfgflags); 975 976 mtx_unlock_spin(&pc->pc_mtx); 977 | 801 KASSERT(pm == NULL || phw->phw_pmc == NULL || 802 (p4_system_has_htt && phw->phw_pmc == pm), 803 ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__)); 804 805 mtx_lock_spin(&pc->pc_mtx); 806 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri); 807 808 KASSERT(cfgflags >= 0 || cfgflags <= 3, --- 25 unchanged lines hidden (view full) --- 834 KASSERT(cfgflags >= 0 || cfgflags <= 3, 835 ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__, 836 cfgflags, cpu, ri)); 837 838 P4_PCPU_SET_CFGFLAGS(pc,ri,cfgflags); 839 840 mtx_unlock_spin(&pc->pc_mtx); 841 |
978 return 0; | 842 return (0); |
979} 980 981/* 982 * Retrieve a configured PMC pointer from hardware state. 983 */ 984 985static int 986p4_get_config(int cpu, int ri, struct pmc **ppm) 987{ | 843} 844 845/* 846 * Retrieve a configured PMC pointer from hardware state. 847 */ 848 849static int 850p4_get_config(int cpu, int ri, struct pmc **ppm) 851{ |
988 struct p4_cpu *pc; 989 struct pmc_hw *phw; | |
990 int cfgflags; | 852 int cfgflags; |
853 struct p4_cpu *pc; |
|
991 | 854 |
992 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 993 phw = pc->pc_hwpmcs[ri]; | 855 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 856 ("[p4,%d] illegal CPU %d", __LINE__, cpu)); 857 KASSERT(ri >= 0 && ri < P4_NPMCS, 858 ("[p4,%d] illegal row-index %d", __LINE__, ri)); |
994 | 859 |
860 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 861 |
|
995 mtx_lock_spin(&pc->pc_mtx); 996 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri); 997 mtx_unlock_spin(&pc->pc_mtx); 998 999 if (cfgflags & P4_CPU_TO_FLAG(cpu)) | 862 mtx_lock_spin(&pc->pc_mtx); 863 cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri); 864 mtx_unlock_spin(&pc->pc_mtx); 865 866 if (cfgflags & P4_CPU_TO_FLAG(cpu)) |
1000 *ppm = phw->phw_pmc; /* PMC config'ed on this CPU */ | 867 *ppm = pc->pc_p4pmcs[ri].phw_pmc; /* PMC config'ed on this CPU */ |
1001 else 1002 *ppm = NULL; 1003 1004 return 0; 1005} 1006 1007/* 1008 * Allocate a PMC. --- 48 unchanged lines hidden (view full) --- 1057 pd = &p4_pmcdesc[ri]; 1058 1059 PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x " 1060 "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps, 1061 pm->pm_caps); 1062 1063 /* check class */ 1064 if (pd->pm_descr.pd_class != a->pm_class) | 868 else 869 *ppm = NULL; 870 871 return 0; 872} 873 874/* 875 * Allocate a PMC. --- 48 unchanged lines hidden (view full) --- 924 pd = &p4_pmcdesc[ri]; 925 926 PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x " 927 "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps, 928 pm->pm_caps); 929 930 /* check class */ 931 if (pd->pm_descr.pd_class != a->pm_class) |
1065 return EINVAL; | 932 return (EINVAL); |
1066 1067 /* check requested capabilities */ 1068 caps = a->pm_caps; 1069 if ((pd->pm_descr.pd_caps & caps) != caps) | 933 934 /* check requested capabilities */ 935 caps = a->pm_caps; 936 if ((pd->pm_descr.pd_caps & caps) != caps) |
1070 return EPERM; | 937 return (EPERM); |
1071 | 938 |
1072 if (pd->pm_descr.pd_class == PMC_CLASS_TSC) { 1073 /* TSC's are always allocated in system-wide counting mode */ 1074 if (a->pm_ev != PMC_EV_TSC_TSC || 1075 a->pm_mode != PMC_MODE_SC) 1076 return EINVAL; 1077 return 0; 1078 } 1079 | |
1080 /* 1081 * If the system has HTT enabled, and the desired allocation 1082 * mode is process-private, and the PMC row disposition is not | 939 /* 940 * If the system has HTT enabled, and the desired allocation 941 * mode is process-private, and the PMC row disposition is not |
1083 * free (0), decline the allocation. | 942 * FREE (0), decline the allocation. |
1084 */ 1085 1086 if (p4_system_has_htt && 1087 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && 1088 pmc_getrowdisp(ri) != 0) | 943 */ 944 945 if (p4_system_has_htt && 946 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && 947 pmc_getrowdisp(ri) != 0) |
1089 return EBUSY; | 948 return (EBUSY); |
1090 1091 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 1092 ("[p4,%d] unknown PMC class %d", __LINE__, 1093 pd->pm_descr.pd_class)); 1094 1095 if (pm->pm_event < PMC_EV_P4_FIRST || 1096 pm->pm_event > PMC_EV_P4_LAST) | 949 950 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 951 ("[p4,%d] unknown PMC class %d", __LINE__, 952 pd->pm_descr.pd_class)); 953 954 if (pm->pm_event < PMC_EV_P4_FIRST || 955 pm->pm_event > PMC_EV_P4_LAST) |
1097 return EINVAL; | 956 return (EINVAL); |
1098 1099 if ((pevent = p4_find_event(pm->pm_event)) == NULL) | 957 958 if ((pevent = p4_find_event(pm->pm_event)) == NULL) |
1100 return ESRCH; | 959 return (ESRCH); |
1101 1102 PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}", 1103 pevent->pm_event, pevent->pm_escr_eventselect, 1104 pevent->pm_cccr_select, pevent->pm_is_ti_event); 1105 1106 /* 1107 * Some PMC events are 'thread independent'and therefore 1108 * cannot be used for process-private modes if HTT is being 1109 * used. 1110 */ 1111 1112 if (P4_EVENT_IS_TI(pevent) && 1113 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && 1114 p4_system_has_htt) | 960 961 PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}", 962 pevent->pm_event, pevent->pm_escr_eventselect, 963 pevent->pm_cccr_select, pevent->pm_is_ti_event); 964 965 /* 966 * Some PMC events are 'thread independent'and therefore 967 * cannot be used for process-private modes if HTT is being 968 * used. 969 */ 970 971 if (P4_EVENT_IS_TI(pevent) && 972 PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && 973 p4_system_has_htt) |
1115 return EINVAL; | 974 return (EINVAL); |
1116 | 975 |
1117 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; | 976 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; |
1118 1119 found = 0; 1120 1121 /* look for a suitable ESCR for this event */ 1122 for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) { 1123 if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE) 1124 break; /* out of ESCRs */ 1125 /* --- 39 unchanged lines hidden (view full) --- 1165 for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++) 1166 if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) { 1167 found = 1; 1168 break; 1169 } 1170 } 1171 1172 if (found == 0) | 977 978 found = 0; 979 980 /* look for a suitable ESCR for this event */ 981 for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) { 982 if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE) 983 break; /* out of ESCRs */ 984 /* --- 39 unchanged lines hidden (view full) --- 1024 for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++) 1025 if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) { 1026 found = 1; 1027 break; 1028 } 1029 } 1030 1031 if (found == 0) |
1173 return ESRCH; | 1032 return (ESRCH); |
1174 1175 KASSERT((int) escr >= 0 && escr < P4_NESCR, 1176 ("[p4,%d] illegal ESCR value %d", __LINE__, escr)); 1177 1178 /* mark ESCR row mode */ 1179 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { 1180 pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */ 1181 P4_ESCR_MARK_ROW_STANDALONE(escr); --- 56 unchanged lines hidden (view full) --- 1238 1239 pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue; 1240 pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue; 1241 1242 PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x " 1243 "escr=%d escrmsr=0x%x escrval=0x%x", pevent->pm_cccr_select, 1244 cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue); 1245 | 1033 1034 KASSERT((int) escr >= 0 && escr < P4_NESCR, 1035 ("[p4,%d] illegal ESCR value %d", __LINE__, escr)); 1036 1037 /* mark ESCR row mode */ 1038 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { 1039 pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */ 1040 P4_ESCR_MARK_ROW_STANDALONE(escr); --- 56 unchanged lines hidden (view full) --- 1097 1098 pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue; 1099 pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue; 1100 1101 PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x " 1102 "escr=%d escrmsr=0x%x escrval=0x%x", pevent->pm_cccr_select, 1103 cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue); 1104 |
1246 return 0; | 1105 return (0); |
1247} 1248 1249/* 1250 * release a PMC. 1251 */ 1252 1253static int 1254p4_release_pmc(int cpu, int ri, struct pmc *pm) 1255{ 1256 enum pmc_p4escr escr; | 1106} 1107 1108/* 1109 * release a PMC. 1110 */ 1111 1112static int 1113p4_release_pmc(int cpu, int ri, struct pmc *pm) 1114{ 1115 enum pmc_p4escr escr; |
1257 struct pmc_hw *phw; | |
1258 struct p4_cpu *pc; 1259 | 1116 struct p4_cpu *pc; 1117 |
1260 if (p4_pmcdesc[ri].pm_descr.pd_class == PMC_CLASS_TSC) 1261 return 0; | 1118 KASSERT(ri >= 0 && ri < P4_NPMCS, 1119 ("[p4,%d] illegal row-index %d", __LINE__, ri)); |
1262 1263 escr = pm->pm_md.pm_p4.pm_p4_escr; 1264 1265 PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr); 1266 1267 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { | 1120 1121 escr = pm->pm_md.pm_p4.pm_p4_escr; 1122 1123 PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr); 1124 1125 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { |
1268 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 1269 phw = pc->pc_hwpmcs[ri]; | 1126 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; |
1270 | 1127 |
1271 KASSERT(phw->phw_pmc == NULL, | 1128 KASSERT(pc->pc_p4pmcs[ri].phw_pmc == NULL, |
1272 ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri)); 1273 1274 P4_ESCR_UNMARK_ROW_STANDALONE(escr); 1275 KASSERT(pc->pc_escrs[escr] == ri, 1276 ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__, 1277 escr, ri)); 1278 pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */ 1279 } else 1280 P4_ESCR_UNMARK_ROW_THREAD(escr); 1281 | 1129 ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri)); 1130 1131 P4_ESCR_UNMARK_ROW_STANDALONE(escr); 1132 KASSERT(pc->pc_escrs[escr] == ri, 1133 ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__, 1134 escr, ri)); 1135 pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */ 1136 } else 1137 P4_ESCR_UNMARK_ROW_THREAD(escr); 1138 |
1282 return 0; | 1139 return (0); |
1283} 1284 1285/* 1286 * Start a PMC 1287 */ 1288 1289static int 1290p4_start_pmc(int cpu, int ri) 1291{ 1292 int rc; | 1140} 1141 1142/* 1143 * Start a PMC 1144 */ 1145 1146static int 1147p4_start_pmc(int cpu, int ri) 1148{ 1149 int rc; |
1293 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; | |
1294 struct pmc *pm; 1295 struct p4_cpu *pc; | 1150 struct pmc *pm; 1151 struct p4_cpu *pc; |
1296 struct pmc_hw *phw; | |
1297 struct p4pmc_descr *pd; | 1152 struct p4pmc_descr *pd; |
1153 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; |
|
1298 1299 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1300 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 1301 KASSERT(ri >= 0 && ri < P4_NPMCS, 1302 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 1303 | 1154 1155 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1156 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 1157 KASSERT(ri >= 0 && ri < P4_NPMCS, 1158 ("[p4,%d] illegal row-index %d", __LINE__, ri)); 1159 |
1304 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 1305 phw = pc->pc_hwpmcs[ri]; 1306 pm = phw->phw_pmc; 1307 pd = &p4_pmcdesc[ri]; | 1160 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 1161 pm = pc->pc_p4pmcs[ri].phw_pmc; 1162 pd = &p4_pmcdesc[ri]; |
1308 1309 KASSERT(pm != NULL, | 1163 1164 KASSERT(pm != NULL, |
1310 ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__, 1311 cpu, ri)); | 1165 ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__, cpu, ri)); |
1312 1313 PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri); 1314 | 1166 1167 PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri); 1168 |
1315 if (pd->pm_descr.pd_class == PMC_CLASS_TSC) /* TSC are always on */ 1316 return 0; 1317 | |
1318 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 1319 ("[p4,%d] wrong PMC class %d", __LINE__, 1320 pd->pm_descr.pd_class)); 1321 1322 /* retrieve the desired CCCR/ESCR values from the PMC */ 1323 cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue; 1324 escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue; 1325 escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr; --- 99 unchanged lines hidden (view full) --- 1425 1426 mtx_unlock_spin(&pc->pc_mtx); 1427 1428 PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d " 1429 "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc, 1430 ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue, 1431 cccrvalue, P4_PCPU_HW_VALUE(pc,ri,cpu)); 1432 | 1169 KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, 1170 ("[p4,%d] wrong PMC class %d", __LINE__, 1171 pd->pm_descr.pd_class)); 1172 1173 /* retrieve the desired CCCR/ESCR values from the PMC */ 1174 cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue; 1175 escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue; 1176 escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr; --- 99 unchanged lines hidden (view full) --- 1276 1277 mtx_unlock_spin(&pc->pc_mtx); 1278 1279 PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d " 1280 "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc, 1281 ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue, 1282 cccrvalue, P4_PCPU_HW_VALUE(pc,ri,cpu)); 1283 |
1433 return 0; | 1284 return (0); |
1434} 1435 1436/* 1437 * Stop a PMC. 1438 */ 1439 1440static int 1441p4_stop_pmc(int cpu, int ri) 1442{ 1443 int rc; 1444 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; 1445 struct pmc *pm; 1446 struct p4_cpu *pc; | 1285} 1286 1287/* 1288 * Stop a PMC. 1289 */ 1290 1291static int 1292p4_stop_pmc(int cpu, int ri) 1293{ 1294 int rc; 1295 uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; 1296 struct pmc *pm; 1297 struct p4_cpu *pc; |
1447 struct pmc_hw *phw; | |
1448 struct p4pmc_descr *pd; 1449 pmc_value_t tmp; 1450 1451 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1452 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 1453 KASSERT(ri >= 0 && ri < P4_NPMCS, 1454 ("[p4,%d] illegal row index %d", __LINE__, ri)); 1455 | 1298 struct p4pmc_descr *pd; 1299 pmc_value_t tmp; 1300 1301 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1302 ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); 1303 KASSERT(ri >= 0 && ri < P4_NPMCS, 1304 ("[p4,%d] illegal row index %d", __LINE__, ri)); 1305 |
1456 pd = &p4_pmcdesc[ri]; | 1306 pd = &p4_pmcdesc[ri]; 1307 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 1308 pm = pc->pc_p4pmcs[ri].phw_pmc; |
1457 | 1309 |
1458 if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 1459 return 0; 1460 1461 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; 1462 phw = pc->pc_hwpmcs[ri]; 1463 1464 KASSERT(phw != NULL, 1465 ("[p4,%d] null phw for cpu%d, ri%d", __LINE__, cpu, ri)); 1466 1467 pm = phw->phw_pmc; 1468 | |
1469 KASSERT(pm != NULL, 1470 ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri)); 1471 1472 PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri); 1473 1474 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { 1475 wrmsr(pd->pm_cccr_msr, 1476 pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE); | 1310 KASSERT(pm != NULL, 1311 ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri)); 1312 1313 PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri); 1314 1315 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { 1316 wrmsr(pd->pm_cccr_msr, 1317 pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE); |
1477 return 0; | 1318 return (0); |
1478 } 1479 1480 /* 1481 * Thread mode PMCs. 1482 * 1483 * On HTT machines, this PMC may be in use by two threads 1484 * running on two logical CPUS. Thus we look at the 1485 * 'runcount' field and only turn off the appropriate TO/T1 --- 78 unchanged lines hidden (view full) --- 1564 * will get serialized using a per-cpu spinlock dedicated for use in 1565 * the NMI handler. 1566 */ 1567 1568static int 1569p4_intr(int cpu, struct trapframe *tf) 1570{ 1571 uint32_t cccrval, ovf_mask, ovf_partner; | 1319 } 1320 1321 /* 1322 * Thread mode PMCs. 1323 * 1324 * On HTT machines, this PMC may be in use by two threads 1325 * running on two logical CPUS. Thus we look at the 1326 * 'runcount' field and only turn off the appropriate TO/T1 --- 78 unchanged lines hidden (view full) --- 1405 * will get serialized using a per-cpu spinlock dedicated for use in 1406 * the NMI handler. 1407 */ 1408 1409static int 1410p4_intr(int cpu, struct trapframe *tf) 1411{ 1412 uint32_t cccrval, ovf_mask, ovf_partner; |
1572 int i, did_interrupt, error, ri; 1573 struct pmc_hw *phw; | 1413 int did_interrupt, error, ri; |
1574 struct p4_cpu *pc; 1575 struct pmc *pm; 1576 pmc_value_t v; 1577 1578 PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf, 1579 TRAPF_USERMODE(tf)); 1580 | 1414 struct p4_cpu *pc; 1415 struct pmc *pm; 1416 pmc_value_t v; 1417 1418 PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf, 1419 TRAPF_USERMODE(tf)); 1420 |
1581 pc = (struct p4_cpu *) pmc_pcpu[P4_TO_HTT_PRIMARY(cpu)]; | 1421 pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; |
1582 1583 ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ? 1584 P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0; 1585 ovf_mask |= P4_CCCR_OVF; 1586 if (p4_system_has_htt) 1587 ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ? 1588 P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1; 1589 else 1590 ovf_partner = 0; 1591 did_interrupt = 0; 1592 1593 if (p4_system_has_htt) 1594 P4_PCPU_ACQ_INTR_SPINLOCK(pc); 1595 1596 /* 1597 * Loop through all CCCRs, looking for ones that have 1598 * interrupted this CPU. 1599 */ | 1422 1423 ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ? 1424 P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0; 1425 ovf_mask |= P4_CCCR_OVF; 1426 if (p4_system_has_htt) 1427 ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ? 1428 P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1; 1429 else 1430 ovf_partner = 0; 1431 did_interrupt = 0; 1432 1433 if (p4_system_has_htt) 1434 P4_PCPU_ACQ_INTR_SPINLOCK(pc); 1435 1436 /* 1437 * Loop through all CCCRs, looking for ones that have 1438 * interrupted this CPU. 1439 */ |
1600 for (i = 0; i < P4_NPMCS-1; i++) { | 1440 for (ri = 0; ri < P4_NPMCS; ri++) { |
1601 | 1441 |
1602 ri = i + 1; /* row index */ 1603 | |
1604 /* 1605 * Check if our partner logical CPU has already marked 1606 * this PMC has having interrupted it. If so, reset 1607 * the flag and process the interrupt, but leave the 1608 * hardware alone. 1609 */ 1610 if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) { 1611 P4_PCPU_SET_INTRFLAG(pc,ri,0); 1612 did_interrupt = 1; 1613 1614 /* 1615 * Ignore de-configured or stopped PMCs. 1616 * Ignore PMCs not in sampling mode. 1617 */ | 1442 /* 1443 * Check if our partner logical CPU has already marked 1444 * this PMC has having interrupted it. If so, reset 1445 * the flag and process the interrupt, but leave the 1446 * hardware alone. 1447 */ 1448 if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) { 1449 P4_PCPU_SET_INTRFLAG(pc,ri,0); 1450 did_interrupt = 1; 1451 1452 /* 1453 * Ignore de-configured or stopped PMCs. 1454 * Ignore PMCs not in sampling mode. 1455 */ |
1618 phw = pc->pc_hwpmcs[ri]; 1619 pm = phw->phw_pmc; | 1456 pm = pc->pc_p4pmcs[ri].phw_pmc; |
1620 if (pm == NULL || 1621 pm->pm_state != PMC_STATE_RUNNING || 1622 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1623 continue; 1624 } 1625 (void) pmc_process_interrupt(cpu, pm, tf, 1626 TRAPF_USERMODE(tf)); 1627 continue; 1628 } 1629 1630 /* 1631 * Fresh interrupt. Look for the CCCR_OVF bit 1632 * and the OVF_Tx bit for this logical 1633 * processor being set. 1634 */ | 1457 if (pm == NULL || 1458 pm->pm_state != PMC_STATE_RUNNING || 1459 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1460 continue; 1461 } 1462 (void) pmc_process_interrupt(cpu, pm, tf, 1463 TRAPF_USERMODE(tf)); 1464 continue; 1465 } 1466 1467 /* 1468 * Fresh interrupt. Look for the CCCR_OVF bit 1469 * and the OVF_Tx bit for this logical 1470 * processor being set. 1471 */ |
1635 cccrval = rdmsr(P4_CCCR_MSR_FIRST + i); | 1472 cccrval = rdmsr(P4_CCCR_MSR_FIRST + ri); |
1636 1637 if ((cccrval & ovf_mask) != ovf_mask) 1638 continue; 1639 1640 /* 1641 * If the other logical CPU would also have been 1642 * interrupted due to the PMC being shared, record 1643 * this fact in the per-cpu saved interrupt flag 1644 * bitmask. 1645 */ 1646 if (p4_system_has_htt && (cccrval & ovf_partner)) 1647 P4_PCPU_SET_INTRFLAG(pc, ri, 1); 1648 | 1473 1474 if ((cccrval & ovf_mask) != ovf_mask) 1475 continue; 1476 1477 /* 1478 * If the other logical CPU would also have been 1479 * interrupted due to the PMC being shared, record 1480 * this fact in the per-cpu saved interrupt flag 1481 * bitmask. 1482 */ 1483 if (p4_system_has_htt && (cccrval & ovf_partner)) 1484 P4_PCPU_SET_INTRFLAG(pc, ri, 1); 1485 |
1649 v = rdmsr(P4_PERFCTR_MSR_FIRST + i); | 1486 v = rdmsr(P4_PERFCTR_MSR_FIRST + ri); |
1650 1651 PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v); 1652 1653 /* Stop the counter, and reset the overflow bit */ 1654 cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE); | 1487 1488 PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v); 1489 1490 /* Stop the counter, and reset the overflow bit */ 1491 cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE); |
1655 wrmsr(P4_CCCR_MSR_FIRST + i, cccrval); | 1492 wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval); |
1656 1657 did_interrupt = 1; 1658 1659 /* 1660 * Ignore de-configured or stopped PMCs. Ignore PMCs 1661 * not in sampling mode. 1662 */ | 1493 1494 did_interrupt = 1; 1495 1496 /* 1497 * Ignore de-configured or stopped PMCs. Ignore PMCs 1498 * not in sampling mode. 1499 */ |
1663 phw = pc->pc_hwpmcs[ri]; 1664 pm = phw->phw_pmc; | 1500 pm = pc->pc_p4pmcs[ri].phw_pmc; |
1665 1666 if (pm == NULL || 1667 pm->pm_state != PMC_STATE_RUNNING || 1668 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1669 continue; 1670 } 1671 1672 /* --- 5 unchanged lines hidden (view full) --- 1678 1679 /* 1680 * Only the first processor executing the NMI handler 1681 * in a HTT pair will restart a PMC, and that too 1682 * only if there were no errors. 1683 */ 1684 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE( 1685 pm->pm_sc.pm_reloadcount); | 1501 1502 if (pm == NULL || 1503 pm->pm_state != PMC_STATE_RUNNING || 1504 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1505 continue; 1506 } 1507 1508 /* --- 5 unchanged lines hidden (view full) --- 1514 1515 /* 1516 * Only the first processor executing the NMI handler 1517 * in a HTT pair will restart a PMC, and that too 1518 * only if there were no errors. 1519 */ 1520 v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE( 1521 pm->pm_sc.pm_reloadcount); |
1686 wrmsr(P4_PERFCTR_MSR_FIRST + i, v); | 1522 wrmsr(P4_PERFCTR_MSR_FIRST + ri, v); |
1687 if (error == 0) | 1523 if (error == 0) |
1688 wrmsr(P4_CCCR_MSR_FIRST + i, | 1524 wrmsr(P4_CCCR_MSR_FIRST + ri, |
1689 cccrval | P4_CCCR_ENABLE); 1690 } 1691 1692 /* allow the other CPU to proceed */ 1693 if (p4_system_has_htt) 1694 P4_PCPU_REL_INTR_SPINLOCK(pc); 1695 1696 /* --- 16 unchanged lines hidden (view full) --- 1713 */ 1714 1715static int 1716p4_describe(int cpu, int ri, struct pmc_info *pi, 1717 struct pmc **ppmc) 1718{ 1719 int error; 1720 size_t copied; | 1525 cccrval | P4_CCCR_ENABLE); 1526 } 1527 1528 /* allow the other CPU to proceed */ 1529 if (p4_system_has_htt) 1530 P4_PCPU_REL_INTR_SPINLOCK(pc); 1531 1532 /* --- 16 unchanged lines hidden (view full) --- 1549 */ 1550 1551static int 1552p4_describe(int cpu, int ri, struct pmc_info *pi, 1553 struct pmc **ppmc) 1554{ 1555 int error; 1556 size_t copied; |
1721 struct pmc_hw *phw; | |
1722 const struct p4pmc_descr *pd; 1723 1724 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1725 ("[p4,%d] illegal CPU %d", __LINE__, cpu)); 1726 KASSERT(ri >= 0 && ri < P4_NPMCS, 1727 ("[p4,%d] row-index %d out of range", __LINE__, ri)); 1728 1729 PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri); 1730 1731 if (P4_CPU_IS_HTT_SECONDARY(cpu)) | 1557 const struct p4pmc_descr *pd; 1558 1559 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1560 ("[p4,%d] illegal CPU %d", __LINE__, cpu)); 1561 KASSERT(ri >= 0 && ri < P4_NPMCS, 1562 ("[p4,%d] row-index %d out of range", __LINE__, ri)); 1563 1564 PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri); 1565 1566 if (P4_CPU_IS_HTT_SECONDARY(cpu)) |
1732 return EINVAL; | 1567 return (EINVAL); |
1733 | 1568 |
1734 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; | |
1735 pd = &p4_pmcdesc[ri]; 1736 1737 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, | 1569 pd = &p4_pmcdesc[ri]; 1570 1571 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, |
1738 PMC_NAME_MAX, &copied)) != 0) 1739 return error; | 1572 PMC_NAME_MAX, &copied)) != 0) 1573 return (error); |
1740 1741 pi->pm_class = pd->pm_descr.pd_class; 1742 | 1574 1575 pi->pm_class = pd->pm_descr.pd_class; 1576 |
1743 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { | 1577 if (p4_pcpu[cpu]->pc_p4pmcs[ri].phw_state & PMC_PHW_FLAG_IS_ENABLED) { |
1744 pi->pm_enabled = TRUE; | 1578 pi->pm_enabled = TRUE; |
1745 *ppmc = phw->phw_pmc; | 1579 *ppmc = p4_pcpu[cpu]->pc_p4pmcs[ri].phw_pmc; |
1746 } else { 1747 pi->pm_enabled = FALSE; 1748 *ppmc = NULL; 1749 } 1750 | 1580 } else { 1581 pi->pm_enabled = FALSE; 1582 *ppmc = NULL; 1583 } 1584 |
1751 return 0; | 1585 return (0); |
1752} 1753 1754/* 1755 * Get MSR# for use with RDPMC. 1756 */ 1757 1758static int 1759p4_get_msr(int ri, uint32_t *msr) --- 5 unchanged lines hidden (view full) --- 1765 1766 PMCDBG(MDP,OPS, 1, "ri=%d getmsr=0x%x", ri, *msr); 1767 1768 return 0; 1769} 1770 1771 1772int | 1586} 1587 1588/* 1589 * Get MSR# for use with RDPMC. 1590 */ 1591 1592static int 1593p4_get_msr(int ri, uint32_t *msr) --- 5 unchanged lines hidden (view full) --- 1599 1600 PMCDBG(MDP,OPS, 1, "ri=%d getmsr=0x%x", ri, *msr); 1601 1602 return 0; 1603} 1604 1605 1606int |
1773pmc_initialize_p4(struct pmc_mdep *pmc_mdep) | 1607pmc_p4_initialize(struct pmc_mdep *md, int ncpus) |
1774{ | 1608{ |
1609 struct pmc_classdep *pcd; |
|
1775 struct p4_event_descr *pe; 1776 | 1610 struct p4_event_descr *pe; 1611 |
1612 KASSERT(md != NULL, ("[p4,%d] md is NULL", __LINE__)); |
|
1777 KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0, 1778 ("[p4,%d] Initializing non-intel processor", __LINE__)); 1779 1780 PMCDBG(MDP,INI,1, "%s", "p4-initialize"); 1781 | 1613 KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0, 1614 ("[p4,%d] Initializing non-intel processor", __LINE__)); 1615 1616 PMCDBG(MDP,INI,1, "%s", "p4-initialize"); 1617 |
1782 switch (pmc_mdep->pmd_cputype) { | 1618 /* Allocate space for pointers to per-cpu descriptors. */ 1619 p4_pcpu = malloc(sizeof(struct p4_cpu **) * ncpus, M_PMC, 1620 M_ZERO|M_WAITOK); 1621 1622 /* Fill in the class dependent descriptor. */ 1623 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4]; 1624 1625 switch (md->pmd_cputype) { |
1783 case PMC_CPU_INTEL_PIV: 1784 | 1626 case PMC_CPU_INTEL_PIV: 1627 |
1785 pmc_mdep->pmd_npmc = P4_NPMCS; 1786 pmc_mdep->pmd_classes[1].pm_class = PMC_CLASS_P4; 1787 pmc_mdep->pmd_classes[1].pm_caps = P4_PMC_CAPS; 1788 pmc_mdep->pmd_classes[1].pm_width = 40; 1789 pmc_mdep->pmd_nclasspmcs[1] = 18; | 1628 pcd->pcd_caps = P4_PMC_CAPS; 1629 pcd->pcd_class = PMC_CLASS_P4; 1630 pcd->pcd_num = P4_NPMCS; 1631 pcd->pcd_ri = md->pmd_npmc; 1632 pcd->pcd_width = 40; |
1790 | 1633 |
1791 pmc_mdep->pmd_init = p4_init; 1792 pmc_mdep->pmd_cleanup = p4_cleanup; 1793 pmc_mdep->pmd_switch_in = p4_switch_in; 1794 pmc_mdep->pmd_switch_out = p4_switch_out; 1795 pmc_mdep->pmd_read_pmc = p4_read_pmc; 1796 pmc_mdep->pmd_write_pmc = p4_write_pmc; 1797 pmc_mdep->pmd_config_pmc = p4_config_pmc; 1798 pmc_mdep->pmd_get_config = p4_get_config; 1799 pmc_mdep->pmd_allocate_pmc = p4_allocate_pmc; 1800 pmc_mdep->pmd_release_pmc = p4_release_pmc; 1801 pmc_mdep->pmd_start_pmc = p4_start_pmc; 1802 pmc_mdep->pmd_stop_pmc = p4_stop_pmc; 1803 pmc_mdep->pmd_intr = p4_intr; 1804 pmc_mdep->pmd_describe = p4_describe; 1805 pmc_mdep->pmd_get_msr = p4_get_msr; /* i386 */ | 1634 pcd->pcd_allocate_pmc = p4_allocate_pmc; 1635 pcd->pcd_config_pmc = p4_config_pmc; 1636 pcd->pcd_describe = p4_describe; 1637 pcd->pcd_get_config = p4_get_config; 1638 pcd->pcd_get_msr = p4_get_msr; 1639 pcd->pcd_pcpu_fini = p4_pcpu_fini; 1640 pcd->pcd_pcpu_init = p4_pcpu_init; 1641 pcd->pcd_read_pmc = p4_read_pmc; 1642 pcd->pcd_release_pmc = p4_release_pmc; 1643 pcd->pcd_start_pmc = p4_start_pmc; 1644 pcd->pcd_stop_pmc = p4_stop_pmc; 1645 pcd->pcd_write_pmc = p4_write_pmc; |
1806 | 1646 |
1807 /* model specific munging */ | 1647 md->pmd_pcpu_fini = NULL; 1648 md->pmd_pcpu_init = NULL; 1649 md->pmd_intr = p4_intr; 1650 md->pmd_npmc += P4_NPMCS; 1651 1652 /* model specific configuration */ |
1808 if ((cpu_id & 0xFFF) < 0xF27) { 1809 1810 /* 1811 * On P4 and Xeon with CPUID < (Family 15, 1812 * Model 2, Stepping 7), only one ESCR is 1813 * available for the IOQ_ALLOCATION event. 1814 */ 1815 1816 pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION); 1817 pe->pm_escrs[1] = P4_ESCR_NONE; 1818 } 1819 1820 break; 1821 1822 default: 1823 KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__)); 1824 return ENOSYS; 1825 } 1826 | 1653 if ((cpu_id & 0xFFF) < 0xF27) { 1654 1655 /* 1656 * On P4 and Xeon with CPUID < (Family 15, 1657 * Model 2, Stepping 7), only one ESCR is 1658 * available for the IOQ_ALLOCATION event. 1659 */ 1660 1661 pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION); 1662 pe->pm_escrs[1] = P4_ESCR_NONE; 1663 } 1664 1665 break; 1666 1667 default: 1668 KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__)); 1669 return ENOSYS; 1670 } 1671 |
1827 return 0; | 1672 return (0); |
1828} | 1673} |
1674 1675void 1676pmc_p4_finalize(struct pmc_mdep *md) 1677{ 1678#if defined(INVARIANTS) 1679 int i, ncpus; 1680#endif 1681 1682 KASSERT(p4_pcpu != NULL, 1683 ("[p4,%d] NULL p4_pcpu", __LINE__)); 1684 1685#if defined(INVARIANTS) 1686 ncpus = pmc_cpu_max(); 1687 for (i = 0; i < ncpus; i++) 1688 KASSERT(p4_pcpu[i] == NULL, ("[p4,%d] non-null pcpu %d", 1689 __LINE__, i)); 1690#endif 1691 1692 free(p4_pcpu, M_PMC); 1693 p4_pcpu = NULL; 1694} |
|