1/* 2 * Cell Broadband Engine OProfile Support 3 * 4 * (C) Copyright IBM Corporation 2006 5 * 6 * Author: David Erb (djerb@us.ibm.com) 7 * Modifications: 8 * Carl Love <carll@us.ibm.com> 9 * Maynard Johnson <maynardj@us.ibm.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#include <linux/cpufreq.h> 18#include <linux/delay.h> 19#include <linux/init.h> 20#include <linux/jiffies.h> 21#include <linux/kthread.h> 22#include <linux/oprofile.h> 23#include <linux/percpu.h> 24#include <linux/smp.h> 25#include <linux/spinlock.h> 26#include <linux/timer.h> 27#include <asm/cell-pmu.h> 28#include <asm/cputable.h> 29#include <asm/firmware.h> 30#include <asm/io.h> 31#include <asm/oprofile_impl.h> 32#include <asm/processor.h> 33#include <asm/prom.h> 34#include <asm/ptrace.h> 35#include <asm/reg.h> 36#include <asm/rtas.h> 37#include <asm/system.h> 38#include <asm/cell-regs.h> 39 40#include "../platforms/cell/interrupt.h" 41#include "cell/pr_util.h" 42 43#define PPU_PROFILING 0 44#define SPU_PROFILING_CYCLES 1 45#define SPU_PROFILING_EVENTS 2 46 47#define SPU_EVENT_NUM_START 4100 48#define SPU_EVENT_NUM_STOP 4399 49#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */ 50#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */ 51#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */ 52 53#define NUM_SPUS_PER_NODE 8 54#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ 55 56#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ 57#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying 58 * PPU_CYCLES event 59 */ 60#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ 61 62#define NUM_THREADS 2 /* number of physical threads in 63 * physical processor 64 */ 65#define NUM_DEBUG_BUS_WORDS 4 66#define NUM_INPUT_BUS_WORDS 2 67 68#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ 69 70/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. 71 * To configure counter to send value every N cycles set counter to 72 * 2^32 - 1 - N. 73 */ 74#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10 75 76/* 77 * spu_cycle_reset is the number of cycles between samples. 78 * This variable is used for SPU profiling and should ONLY be set 79 * at the beginning of cell_reg_setup; otherwise, it's read-only. 80 */ 81static unsigned int spu_cycle_reset; 82static unsigned int profiling_mode; 83static int spu_evnt_phys_spu_indx; 84 85struct pmc_cntrl_data { 86 unsigned long vcntr; 87 unsigned long evnts; 88 unsigned long masks; 89 unsigned long enabled; 90}; 91 92/* 93 * ibm,cbe-perftools rtas parameters 94 */ 95struct pm_signal { 96 u16 cpu; /* Processor to modify */ 97 u16 sub_unit; /* hw subunit this applies to (if applicable)*/ 98 short int signal_group; /* Signal Group to Enable/Disable */ 99 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event 100 * Bus Word(s) (bitmask) 101 */ 102 u8 bit; /* Trigger/Event bit (if applicable) */ 103}; 104 105/* 106 * rtas call arguments 107 */ 108enum { 109 SUBFUNC_RESET = 1, 110 SUBFUNC_ACTIVATE = 2, 111 SUBFUNC_DEACTIVATE = 3, 112 113 PASSTHRU_IGNORE = 0, 114 PASSTHRU_ENABLE = 1, 115 PASSTHRU_DISABLE = 2, 116}; 117 118struct pm_cntrl { 119 u16 enable; 120 u16 stop_at_max; 121 u16 trace_mode; 122 u16 freeze; 123 u16 count_mode; 124 u16 spu_addr_trace; 125 u8 trace_buf_ovflw; 126}; 127 128static struct { 129 u32 group_control; 130 u32 debug_bus_control; 131 struct pm_cntrl pm_cntrl; 132 u32 pm07_cntrl[NR_PHYS_CTRS]; 133} pm_regs; 134 135#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12) 136#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4) 137#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8) 138#define GET_POLARITY(x) ((x & 0x00000002) >> 1) 139#define GET_COUNT_CYCLES(x) (x & 0x00000001) 140#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) 141 142static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); 143static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE]; 144static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; 145 146/* 147 * The CELL profiling code makes rtas calls to setup the debug bus to 148 * route the performance signals. Additionally, SPU profiling requires 149 * a second rtas call to setup the hardware to capture the SPU PCs. 150 * The EIO error value is returned if the token lookups or the rtas 151 * call fail. The EIO error number is the best choice of the existing 152 * error numbers. The probability of rtas related error is very low. But 153 * by returning EIO and printing additional information to dmsg the user 154 * will know that OProfile did not start and dmesg will tell them why. 155 * OProfile does not support returning errors on Stop. Not a huge issue 156 * since failure to reset the debug bus or stop the SPU PC collection is 157 * not a fatel issue. Chances are if the Stop failed, Start doesn't work 158 * either. 159 */ 160 161static u32 hdw_thread; 162 163static u32 virt_cntr_inter_mask; 164static struct timer_list timer_virt_cntr; 165static struct timer_list timer_spu_event_swap; 166 167/* 168 * pm_signal needs to be global since it is initialized in 169 * cell_reg_setup at the time when the necessary information 170 * is available. 171 */ 172static struct pm_signal pm_signal[NR_PHYS_CTRS]; 173static int pm_rtas_token; /* token for debug bus setup call */ 174static int spu_rtas_token; /* token for SPU cycle profiling */ 175 176static u32 reset_value[NR_PHYS_CTRS]; 177static int num_counters; 178static int oprofile_running; 179static DEFINE_SPINLOCK(cntr_lock); 180 181static u32 ctr_enabled; 182 183static unsigned char input_bus[NUM_INPUT_BUS_WORDS]; 184 185/* 186 * Firmware interface functions 187 */ 188static int 189rtas_ibm_cbe_perftools(int subfunc, int passthru, 190 void *address, unsigned long length) 191{ 192 u64 paddr = __pa(address); 193 194 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, 195 passthru, paddr >> 32, paddr & 0xffffffff, length); 196} 197 198static void pm_rtas_reset_signals(u32 node) 199{ 200 int ret; 201 struct pm_signal pm_signal_local; 202 203 /* 204 * The debug bus is being set to the passthru disable state. 205 * However, the FW still expects atleast one legal signal routing 206 * entry or it will return an error on the arguments. If we don't 207 * supply a valid entry, we must ignore all return values. Ignoring 208 * all return values means we might miss an error we should be 209 * concerned about. 210 */ 211 212 /* fw expects physical cpu #. */ 213 pm_signal_local.cpu = node; 214 pm_signal_local.signal_group = 21; 215 pm_signal_local.bus_word = 1; 216 pm_signal_local.sub_unit = 0; 217 pm_signal_local.bit = 0; 218 219 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE, 220 &pm_signal_local, 221 sizeof(struct pm_signal)); 222 223 if (unlikely(ret)) 224 /* 225 * Not a fatal error. For Oprofile stop, the oprofile 226 * functions do not support returning an error for 227 * failure to stop OProfile. 228 */ 229 printk(KERN_WARNING "%s: rtas returned: %d\n", 230 __func__, ret); 231} 232 233static int pm_rtas_activate_signals(u32 node, u32 count) 234{ 235 int ret; 236 int i, j; 237 struct pm_signal pm_signal_local[NR_PHYS_CTRS]; 238 239 /* 240 * There is no debug setup required for the cycles event. 241 * Note that only events in the same group can be used. 242 * Otherwise, there will be conflicts in correctly routing 243 * the signals on the debug bus. It is the responsibility 244 * of the OProfile user tool to check the events are in 245 * the same group. 246 */ 247 i = 0; 248 for (j = 0; j < count; j++) { 249 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) { 250 251 /* fw expects physical cpu # */ 252 pm_signal_local[i].cpu = node; 253 pm_signal_local[i].signal_group 254 = pm_signal[j].signal_group; 255 pm_signal_local[i].bus_word = pm_signal[j].bus_word; 256 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit; 257 pm_signal_local[i].bit = pm_signal[j].bit; 258 i++; 259 } 260 } 261 262 if (i != 0) { 263 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE, 264 pm_signal_local, 265 i * sizeof(struct pm_signal)); 266 267 if (unlikely(ret)) { 268 printk(KERN_WARNING "%s: rtas returned: %d\n", 269 __func__, ret); 270 return -EIO; 271 } 272 } 273 274 return 0; 275} 276 277/* 278 * PM Signal functions 279 */ 280static void set_pm_event(u32 ctr, int event, u32 unit_mask) 281{ 282 struct pm_signal *p; 283 u32 signal_bit; 284 u32 bus_word, bus_type, count_cycles, polarity, input_control; 285 int j, i; 286 287 if (event == PPU_CYCLES_EVENT_NUM) { 288 /* Special Event: Count all cpu cycles */ 289 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES; 290 p = &(pm_signal[ctr]); 291 p->signal_group = PPU_CYCLES_GRP_NUM; 292 p->bus_word = 1; 293 p->sub_unit = 0; 294 p->bit = 0; 295 goto out; 296 } else { 297 pm_regs.pm07_cntrl[ctr] = 0; 298 } 299 300 bus_word = GET_BUS_WORD(unit_mask); 301 bus_type = GET_BUS_TYPE(unit_mask); 302 count_cycles = GET_COUNT_CYCLES(unit_mask); 303 polarity = GET_POLARITY(unit_mask); 304 input_control = GET_INPUT_CONTROL(unit_mask); 305 signal_bit = (event % 100); 306 307 p = &(pm_signal[ctr]); 308 309 p->signal_group = event / 100; 310 p->bus_word = bus_word; 311 p->sub_unit = GET_SUB_UNIT(unit_mask); 312 313 pm_regs.pm07_cntrl[ctr] = 0; 314 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles); 315 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity); 316 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control); 317 318 /* 319 * Some of the islands signal selection is based on 64 bit words. 320 * The debug bus words are 32 bits, the input words to the performance 321 * counters are defined as 32 bits. Need to convert the 64 bit island 322 * specification to the appropriate 32 input bit and bus word for the 323 * performance counter event selection. See the CELL Performance 324 * monitoring signals manual and the Perf cntr hardware descriptions 325 * for the details. 326 */ 327 if (input_control == 0) { 328 if (signal_bit > 31) { 329 signal_bit -= 32; 330 if (bus_word == 0x3) 331 bus_word = 0x2; 332 else if (bus_word == 0xc) 333 bus_word = 0x8; 334 } 335 336 if ((bus_type == 0) && p->signal_group >= 60) 337 bus_type = 2; 338 if ((bus_type == 1) && p->signal_group >= 50) 339 bus_type = 0; 340 341 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit); 342 } else { 343 pm_regs.pm07_cntrl[ctr] = 0; 344 p->bit = signal_bit; 345 } 346 347 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { 348 if (bus_word & (1 << i)) { 349 pm_regs.debug_bus_control |= 350 (bus_type << (30 - (2 * i))); 351 352 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { 353 if (input_bus[j] == 0xff) { 354 input_bus[j] = i; 355 pm_regs.group_control |= 356 (i << (30 - (2 * j))); 357 358 break; 359 } 360 } 361 } 362 } 363out: 364 ; 365} 366 367static void write_pm_cntrl(int cpu) 368{ 369 /* 370 * Oprofile will use 32 bit counters, set bits 7:10 to 0 371 * pmregs.pm_cntrl is a global 372 */ 373 374 u32 val = 0; 375 if (pm_regs.pm_cntrl.enable == 1) 376 val |= CBE_PM_ENABLE_PERF_MON; 377 378 if (pm_regs.pm_cntrl.stop_at_max == 1) 379 val |= CBE_PM_STOP_AT_MAX; 380 381 if (pm_regs.pm_cntrl.trace_mode != 0) 382 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); 383 384 if (pm_regs.pm_cntrl.trace_buf_ovflw == 1) 385 val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw); 386 if (pm_regs.pm_cntrl.freeze == 1) 387 val |= CBE_PM_FREEZE_ALL_CTRS; 388 389 val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace); 390 391 /* 392 * Routine set_count_mode must be called previously to set 393 * the count mode based on the user selection of user and kernel. 394 */ 395 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode); 396 cbe_write_pm(cpu, pm_control, val); 397} 398 399static inline void 400set_count_mode(u32 kernel, u32 user) 401{ 402 /* 403 * The user must specify user and kernel if they want them. If 404 * neither is specified, OProfile will count in hypervisor mode. 405 * pm_regs.pm_cntrl is a global 406 */ 407 if (kernel) { 408 if (user) 409 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES; 410 else 411 pm_regs.pm_cntrl.count_mode = 412 CBE_COUNT_SUPERVISOR_MODE; 413 } else { 414 if (user) 415 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE; 416 else 417 pm_regs.pm_cntrl.count_mode = 418 CBE_COUNT_HYPERVISOR_MODE; 419 } 420} 421 422static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl) 423{ 424 425 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE; 426 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); 427} 428 429/* 430 * Oprofile is expected to collect data on all CPUs simultaneously. 431 * However, there is one set of performance counters per node. There are 432 * two hardware threads or virtual CPUs on each node. Hence, OProfile must 433 * multiplex in time the performance counter collection on the two virtual 434 * CPUs. The multiplexing of the performance counters is done by this 435 * virtual counter routine. 436 * 437 * The pmc_values used below is defined as 'per-cpu' but its use is 438 * more akin to 'per-node'. We need to store two sets of counter 439 * values per node -- one for the previous run and one for the next. 440 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even 441 * pair of per-cpu arrays is used for storing the previous and next 442 * pmc values for a given node. 443 * NOTE: We use the per-cpu variable to improve cache performance. 444 * 445 * This routine will alternate loading the virtual counters for 446 * virtual CPUs 447 */ 448static void cell_virtual_cntr(unsigned long data) 449{ 450 int i, prev_hdw_thread, next_hdw_thread; 451 u32 cpu; 452 unsigned long flags; 453 454 /* 455 * Make sure that the interrupt_hander and the virt counter are 456 * not both playing with the counters on the same node. 457 */ 458 459 spin_lock_irqsave(&cntr_lock, flags); 460 461 prev_hdw_thread = hdw_thread; 462 463 /* switch the cpu handling the interrupts */ 464 hdw_thread = 1 ^ hdw_thread; 465 next_hdw_thread = hdw_thread; 466 467 pm_regs.group_control = 0; 468 pm_regs.debug_bus_control = 0; 469 470 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) 471 input_bus[i] = 0xff; 472 473 /* 474 * There are some per thread events. Must do the 475 * set event, for the thread that is being started 476 */ 477 for (i = 0; i < num_counters; i++) 478 set_pm_event(i, 479 pmc_cntrl[next_hdw_thread][i].evnts, 480 pmc_cntrl[next_hdw_thread][i].masks); 481 482 /* 483 * The following is done only once per each node, but 484 * we need cpu #, not node #, to pass to the cbe_xxx functions. 485 */ 486 for_each_online_cpu(cpu) { 487 if (cbe_get_hw_thread_id(cpu)) 488 continue; 489 490 /* 491 * stop counters, save counter values, restore counts 492 * for previous thread 493 */ 494 cbe_disable_pm(cpu); 495 cbe_disable_pm_interrupts(cpu); 496 for (i = 0; i < num_counters; i++) { 497 per_cpu(pmc_values, cpu + prev_hdw_thread)[i] 498 = cbe_read_ctr(cpu, i); 499 500 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] 501 == 0xFFFFFFFF) 502 /* If the cntr value is 0xffffffff, we must 503 * reset that to 0xfffffff0 when the current 504 * thread is restarted. This will generate a 505 * new interrupt and make sure that we never 506 * restore the counters to the max value. If 507 * the counters were restored to the max value, 508 * they do not increment and no interrupts are 509 * generated. Hence no more samples will be 510 * collected on that cpu. 511 */ 512 cbe_write_ctr(cpu, i, 0xFFFFFFF0); 513 else 514 cbe_write_ctr(cpu, i, 515 per_cpu(pmc_values, 516 cpu + 517 next_hdw_thread)[i]); 518 } 519 520 /* 521 * Switch to the other thread. Change the interrupt 522 * and control regs to be scheduled on the CPU 523 * corresponding to the thread to execute. 524 */ 525 for (i = 0; i < num_counters; i++) { 526 if (pmc_cntrl[next_hdw_thread][i].enabled) { 527 /* 528 * There are some per thread events. 529 * Must do the set event, enable_cntr 530 * for each cpu. 531 */ 532 enable_ctr(cpu, i, 533 pm_regs.pm07_cntrl); 534 } else { 535 cbe_write_pm07_control(cpu, i, 0); 536 } 537 } 538 539 /* Enable interrupts on the CPU thread that is starting */ 540 cbe_enable_pm_interrupts(cpu, next_hdw_thread, 541 virt_cntr_inter_mask); 542 cbe_enable_pm(cpu); 543 } 544 545 spin_unlock_irqrestore(&cntr_lock, flags); 546 547 mod_timer(&timer_virt_cntr, jiffies + HZ / 10); 548} 549 550static void start_virt_cntrs(void) 551{ 552 init_timer(&timer_virt_cntr); 553 timer_virt_cntr.function = cell_virtual_cntr; 554 timer_virt_cntr.data = 0UL; 555 timer_virt_cntr.expires = jiffies + HZ / 10; 556 add_timer(&timer_virt_cntr); 557} 558 559static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr, 560 struct op_system_config *sys, int num_ctrs) 561{ 562 spu_cycle_reset = ctr[0].count; 563 564 /* 565 * Each node will need to make the rtas call to start 566 * and stop SPU profiling. Get the token once and store it. 567 */ 568 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); 569 570 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { 571 printk(KERN_ERR 572 "%s: rtas token ibm,cbe-spu-perftools unknown\n", 573 __func__); 574 return -EIO; 575 } 576 return 0; 577} 578 579/* Unfortunately, the hardware will only support event profiling 580 * on one SPU per node at a time. Therefore, we must time slice 581 * the profiling across all SPUs in the node. Note, we do this 582 * in parallel for each node. The following routine is called 583 * periodically based on kernel timer to switch which SPU is 584 * being monitored in a round robbin fashion. 585 */ 586static void spu_evnt_swap(unsigned long data) 587{ 588 int node; 589 int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx; 590 unsigned long flags; 591 int cpu; 592 int ret; 593 u32 interrupt_mask; 594 595 596 /* enable interrupts on cntr 0 */ 597 interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0); 598 599 hdw_thread = 0; 600 601 /* Make sure spu event interrupt handler and spu event swap 602 * don't access the counters simultaneously. 603 */ 604 spin_lock_irqsave(&cntr_lock, flags); 605 606 cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx; 607 608 if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE) 609 spu_evnt_phys_spu_indx = 0; 610 611 pm_signal[0].sub_unit = spu_evnt_phys_spu_indx; 612 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; 613 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; 614 615 /* switch the SPU being profiled on each node */ 616 for_each_online_cpu(cpu) { 617 if (cbe_get_hw_thread_id(cpu)) 618 continue; 619 620 node = cbe_cpu_to_node(cpu); 621 cur_phys_spu = (node * NUM_SPUS_PER_NODE) 622 + cur_spu_evnt_phys_spu_indx; 623 nxt_phys_spu = (node * NUM_SPUS_PER_NODE) 624 + spu_evnt_phys_spu_indx; 625 626 /* 627 * stop counters, save counter values, restore counts 628 * for previous physical SPU 629 */ 630 cbe_disable_pm(cpu); 631 cbe_disable_pm_interrupts(cpu); 632 633 spu_pm_cnt[cur_phys_spu] 634 = cbe_read_ctr(cpu, 0); 635 636 /* restore previous count for the next spu to sample */ 637 /* NOTE, hardware issue, counter will not start if the 638 * counter value is at max (0xFFFFFFFF). 639 */ 640 if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF) 641 cbe_write_ctr(cpu, 0, 0xFFFFFFF0); 642 else 643 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]); 644 645 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 646 647 /* setup the debug bus measure the one event and 648 * the two events to route the next SPU's PC on 649 * the debug bus 650 */ 651 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3); 652 if (ret) 653 printk(KERN_ERR "%s: pm_rtas_activate_signals failed, " 654 "SPU event swap\n", __func__); 655 656 /* clear the trace buffer, don't want to take PC for 657 * previous SPU*/ 658 cbe_write_pm(cpu, trace_address, 0); 659 660 enable_ctr(cpu, 0, pm_regs.pm07_cntrl); 661 662 /* Enable interrupts on the CPU thread that is starting */ 663 cbe_enable_pm_interrupts(cpu, hdw_thread, 664 interrupt_mask); 665 cbe_enable_pm(cpu); 666 } 667 668 spin_unlock_irqrestore(&cntr_lock, flags); 669 670 /* swap approximately every 0.1 seconds */ 671 mod_timer(&timer_spu_event_swap, jiffies + HZ / 25); 672} 673 674static void start_spu_event_swap(void) 675{ 676 init_timer(&timer_spu_event_swap); 677 timer_spu_event_swap.function = spu_evnt_swap; 678 timer_spu_event_swap.data = 0UL; 679 timer_spu_event_swap.expires = jiffies + HZ / 25; 680 add_timer(&timer_spu_event_swap); 681} 682 683static int cell_reg_setup_spu_events(struct op_counter_config *ctr, 684 struct op_system_config *sys, int num_ctrs) 685{ 686 int i; 687 688 /* routine is called once for all nodes */ 689 690 spu_evnt_phys_spu_indx = 0; 691 /* 692 * For all events except PPU CYCLEs, each node will need to make 693 * the rtas cbe-perftools call to setup and reset the debug bus. 694 * Make the token lookup call once and store it in the global 695 * variable pm_rtas_token. 696 */ 697 pm_rtas_token = rtas_token("ibm,cbe-perftools"); 698 699 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { 700 printk(KERN_ERR 701 "%s: rtas token ibm,cbe-perftools unknown\n", 702 __func__); 703 return -EIO; 704 } 705 706 /* setup the pm_control register settings, 707 * settings will be written per node by the 708 * cell_cpu_setup() function. 709 */ 710 pm_regs.pm_cntrl.trace_buf_ovflw = 1; 711 712 /* Use the occurrence trace mode to have SPU PC saved 713 * to the trace buffer. Occurrence data in trace buffer 714 * is not used. Bit 2 must be set to store SPU addresses. 715 */ 716 pm_regs.pm_cntrl.trace_mode = 2; 717 718 pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus 719 event 2 & 3 */ 720 721 /* setup the debug bus event array with the SPU PC routing events. 722 * Note, pm_signal[0] will be filled in by set_pm_event() call below. 723 */ 724 pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100; 725 pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A); 726 pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100; 727 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; 728 729 pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100; 730 pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B); 731 pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100; 732 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; 733 734 /* Set the user selected spu event to profile on, 735 * note, only one SPU profiling event is supported 736 */ 737 num_counters = 1; /* Only support one SPU event at a time */ 738 set_pm_event(0, ctr[0].event, ctr[0].unit_mask); 739 740 reset_value[0] = 0xFFFFFFFF - ctr[0].count; 741 742 /* global, used by cell_cpu_setup */ 743 ctr_enabled |= 1; 744 745 /* Initialize the count for each SPU to the reset value */ 746 for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++) 747 spu_pm_cnt[i] = reset_value[0]; 748 749 return 0; 750} 751 752static int cell_reg_setup_ppu(struct op_counter_config *ctr, 753 struct op_system_config *sys, int num_ctrs) 754{ 755 /* routine is called once for all nodes */ 756 int i, j, cpu; 757 758 num_counters = num_ctrs; 759 760 if (unlikely(num_ctrs > NR_PHYS_CTRS)) { 761 printk(KERN_ERR 762 "%s: Oprofile, number of specified events " \ 763 "exceeds number of physical counters\n", 764 __func__); 765 return -EIO; 766 } 767 768 set_count_mode(sys->enable_kernel, sys->enable_user); 769 770 /* Setup the thread 0 events */ 771 for (i = 0; i < num_ctrs; ++i) { 772 773 pmc_cntrl[0][i].evnts = ctr[i].event; 774 pmc_cntrl[0][i].masks = ctr[i].unit_mask; 775 pmc_cntrl[0][i].enabled = ctr[i].enabled; 776 pmc_cntrl[0][i].vcntr = i; 777 778 for_each_possible_cpu(j) 779 per_cpu(pmc_values, j)[i] = 0; 780 } 781 782 /* 783 * Setup the thread 1 events, map the thread 0 event to the 784 * equivalent thread 1 event. 785 */ 786 for (i = 0; i < num_ctrs; ++i) { 787 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111)) 788 pmc_cntrl[1][i].evnts = ctr[i].event + 19; 789 else if (ctr[i].event == 2203) 790 pmc_cntrl[1][i].evnts = ctr[i].event; 791 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215)) 792 pmc_cntrl[1][i].evnts = ctr[i].event + 16; 793 else 794 pmc_cntrl[1][i].evnts = ctr[i].event; 795 796 pmc_cntrl[1][i].masks = ctr[i].unit_mask; 797 pmc_cntrl[1][i].enabled = ctr[i].enabled; 798 pmc_cntrl[1][i].vcntr = i; 799 } 800 801 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) 802 input_bus[i] = 0xff; 803 804 /* 805 * Our counters count up, and "count" refers to 806 * how much before the next interrupt, and we interrupt 807 * on overflow. So we calculate the starting value 808 * which will give us "count" until overflow. 809 * Then we set the events on the enabled counters. 810 */ 811 for (i = 0; i < num_counters; ++i) { 812 /* start with virtual counter set 0 */ 813 if (pmc_cntrl[0][i].enabled) { 814 /* Using 32bit counters, reset max - count */ 815 reset_value[i] = 0xFFFFFFFF - ctr[i].count; 816 set_pm_event(i, 817 pmc_cntrl[0][i].evnts, 818 pmc_cntrl[0][i].masks); 819 820 /* global, used by cell_cpu_setup */ 821 ctr_enabled |= (1 << i); 822 } 823 } 824 825 /* initialize the previous counts for the virtual cntrs */ 826 for_each_online_cpu(cpu) 827 for (i = 0; i < num_counters; ++i) { 828 per_cpu(pmc_values, cpu)[i] = reset_value[i]; 829 } 830 831 return 0; 832} 833 834 835/* This function is called once for all cpus combined */ 836static int cell_reg_setup(struct op_counter_config *ctr, 837 struct op_system_config *sys, int num_ctrs) 838{ 839 int ret=0; 840 spu_cycle_reset = 0; 841 842 /* initialize the spu_arr_trace value, will be reset if 843 * doing spu event profiling. 844 */ 845 pm_regs.group_control = 0; 846 pm_regs.debug_bus_control = 0; 847 pm_regs.pm_cntrl.stop_at_max = 1; 848 pm_regs.pm_cntrl.trace_mode = 0; 849 pm_regs.pm_cntrl.freeze = 1; 850 pm_regs.pm_cntrl.trace_buf_ovflw = 0; 851 pm_regs.pm_cntrl.spu_addr_trace = 0; 852 853 /* 854 * For all events except PPU CYCLEs, each node will need to make 855 * the rtas cbe-perftools call to setup and reset the debug bus. 856 * Make the token lookup call once and store it in the global 857 * variable pm_rtas_token. 858 */ 859 pm_rtas_token = rtas_token("ibm,cbe-perftools"); 860 861 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { 862 printk(KERN_ERR 863 "%s: rtas token ibm,cbe-perftools unknown\n", 864 __func__); 865 return -EIO; 866 } 867 868 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { 869 profiling_mode = SPU_PROFILING_CYCLES; 870 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs); 871 } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && 872 (ctr[0].event <= SPU_EVENT_NUM_STOP)) { 873 profiling_mode = SPU_PROFILING_EVENTS; 874 spu_cycle_reset = ctr[0].count; 875 876 /* for SPU event profiling, need to setup the 877 * pm_signal array with the events to route the 878 * SPU PC before making the FW call. Note, only 879 * one SPU event for profiling can be specified 880 * at a time. 881 */ 882 cell_reg_setup_spu_events(ctr, sys, num_ctrs); 883 } else { 884 profiling_mode = PPU_PROFILING; 885 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs); 886 } 887 888 return ret; 889} 890 891 892 893/* This function is called once for each cpu */ 894static int cell_cpu_setup(struct op_counter_config *cntr) 895{ 896 u32 cpu = smp_processor_id(); 897 u32 num_enabled = 0; 898 int i; 899 int ret; 900 901 /* Cycle based SPU profiling does not use the performance 902 * counters. The trace array is configured to collect 903 * the data. 904 */ 905 if (profiling_mode == SPU_PROFILING_CYCLES) 906 return 0; 907 908 /* There is one performance monitor per processor chip (i.e. node), 909 * so we only need to perform this function once per node. 910 */ 911 if (cbe_get_hw_thread_id(cpu)) 912 return 0; 913 914 /* Stop all counters */ 915 cbe_disable_pm(cpu); 916 cbe_disable_pm_interrupts(cpu); 917 918 cbe_write_pm(cpu, pm_start_stop, 0); 919 cbe_write_pm(cpu, group_control, pm_regs.group_control); 920 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); 921 write_pm_cntrl(cpu); 922 923 for (i = 0; i < num_counters; ++i) { 924 if (ctr_enabled & (1 << i)) { 925 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu); 926 num_enabled++; 927 } 928 } 929 930 /* 931 * The pm_rtas_activate_signals will return -EIO if the FW 932 * call failed. 933 */ 934 if (profiling_mode == SPU_PROFILING_EVENTS) { 935 /* For SPU event profiling also need to setup the 936 * pm interval timer 937 */ 938 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 939 num_enabled+2); 940 /* store PC from debug bus to Trace buffer as often 941 * as possible (every 10 cycles) 942 */ 943 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); 944 return ret; 945 } else 946 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 947 num_enabled); 948} 949 950#define ENTRIES 303 951#define MAXLFSR 0xFFFFFF 952 953/* precomputed table of 24 bit LFSR values */ 954static int initial_lfsr[] = { 955 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424, 956 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716, 957 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547, 958 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392, 959 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026, 960 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556, 961 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769, 962 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893, 963 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017, 964 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756, 965 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558, 966 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401, 967 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720, 968 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042, 969 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955, 970 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934, 971 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783, 972 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278, 973 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051, 974 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741, 975 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972, 976 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302, 977 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384, 978 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469, 979 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697, 980 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398, 981 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140, 982 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214, 983 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386, 984 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087, 985 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130, 986 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300, 987 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475, 988 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950, 989 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003, 990 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375, 991 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426, 992 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607 993}; 994 995/* 996 * The hardware uses an LFSR counting sequence to determine when to capture 997 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence 998 * where each number occurs once in the sequence but the sequence is not in 999 * numerical order. The SPU PC capture is done when the LFSR sequence reaches 1000 * the last value in the sequence. Hence the user specified value N 1001 * corresponds to the LFSR number that is N from the end of the sequence. 1002 * 1003 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit 1004 * LFSR sequence is broken into four ranges. The spacing of the precomputed 1005 * values is adjusted in each range so the error between the user specifed 1006 * number (N) of events between samples and the actual number of events based 1007 * on the precomputed value will be les then about 6.2%. Note, if the user 1008 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used. 1009 * This is to prevent the loss of samples because the trace buffer is full. 1010 * 1011 * User specified N Step between Index in 1012 * precomputed values precomputed 1013 * table 1014 * 0 to 2^16-1 ---- 0 1015 * 2^16 to 2^16+2^19-1 2^12 1 to 128 1016 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256 1017 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302 1018 * 1019 * 1020 * For example, the LFSR values in the second range are computed for 2^16, 1021 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies 1022 * 1, 2,..., 127, 128. 1023 * 1024 * The 24 bit LFSR value for the nth number in the sequence can be 1025 * calculated using the following code: 1026 * 1027 * #define size 24 1028 * int calculate_lfsr(int n) 1029 * { 1030 * int i; 1031 * unsigned int newlfsr0; 1032 * unsigned int lfsr = 0xFFFFFF; 1033 * unsigned int howmany = n; 1034 * 1035 * for (i = 2; i < howmany + 2; i++) { 1036 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^ 1037 * ((lfsr >> (size - 1 - 1)) & 1) ^ 1038 * (((lfsr >> (size - 1 - 6)) & 1) ^ 1039 * ((lfsr >> (size - 1 - 23)) & 1))); 1040 * 1041 * lfsr >>= 1; 1042 * lfsr = lfsr | (newlfsr0 << (size - 1)); 1043 * } 1044 * return lfsr; 1045 * } 1046 */ 1047 1048#define V2_16 (0x1 << 16) 1049#define V2_19 (0x1 << 19) 1050#define V2_22 (0x1 << 22) 1051 1052static int calculate_lfsr(int n) 1053{ 1054 /* 1055 * The ranges and steps are in powers of 2 so the calculations 1056 * can be done using shifts rather then divide. 1057 */ 1058 int index; 1059 1060 if ((n >> 16) == 0) 1061 index = 0; 1062 else if (((n - V2_16) >> 19) == 0) 1063 index = ((n - V2_16) >> 12) + 1; 1064 else if (((n - V2_16 - V2_19) >> 22) == 0) 1065 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128; 1066 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0) 1067 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256; 1068 else 1069 index = ENTRIES-1; 1070 1071 /* make sure index is valid */ 1072 if ((index >= ENTRIES) || (index < 0)) 1073 index = ENTRIES-1; 1074 1075 return initial_lfsr[index]; 1076} 1077 1078static int pm_rtas_activate_spu_profiling(u32 node) 1079{ 1080 int ret, i; 1081 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE]; 1082 1083 /* 1084 * Set up the rtas call to configure the debug bus to 1085 * route the SPU PCs. Setup the pm_signal for each SPU 1086 */ 1087 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) { 1088 pm_signal_local[i].cpu = node; 1089 pm_signal_local[i].signal_group = 41; 1090 /* spu i on word (i/2) */ 1091 pm_signal_local[i].bus_word = 1 << i / 2; 1092 /* spu i */ 1093 pm_signal_local[i].sub_unit = i; 1094 pm_signal_local[i].bit = 63; 1095 } 1096 1097 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, 1098 PASSTHRU_ENABLE, pm_signal_local, 1099 (ARRAY_SIZE(pm_signal_local) 1100 * sizeof(struct pm_signal))); 1101 1102 if (unlikely(ret)) { 1103 printk(KERN_WARNING "%s: rtas returned: %d\n", 1104 __func__, ret); 1105 return -EIO; 1106 } 1107 1108 return 0; 1109} 1110 1111#ifdef CONFIG_CPU_FREQ 1112static int 1113oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data) 1114{ 1115 int ret = 0; 1116 struct cpufreq_freqs *frq = data; 1117 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) || 1118 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) || 1119 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) 1120 set_spu_profiling_frequency(frq->new, spu_cycle_reset); 1121 return ret; 1122} 1123 1124static struct notifier_block cpu_freq_notifier_block = { 1125 .notifier_call = oprof_cpufreq_notify 1126}; 1127#endif 1128 1129/* 1130 * Note the generic OProfile stop calls do not support returning 1131 * an error on stop. Hence, will not return an error if the FW 1132 * calls fail on stop. Failure to reset the debug bus is not an issue. 1133 * Failure to disable the SPU profiling is not an issue. The FW calls 1134 * to enable the performance counters and debug bus will work even if 1135 * the hardware was not cleanly reset. 1136 */ 1137static void cell_global_stop_spu_cycles(void) 1138{ 1139 int subfunc, rtn_value; 1140 unsigned int lfsr_value; 1141 int cpu; 1142 1143 oprofile_running = 0; 1144 smp_wmb(); 1145 1146#ifdef CONFIG_CPU_FREQ 1147 cpufreq_unregister_notifier(&cpu_freq_notifier_block, 1148 CPUFREQ_TRANSITION_NOTIFIER); 1149#endif 1150 1151 for_each_online_cpu(cpu) { 1152 if (cbe_get_hw_thread_id(cpu)) 1153 continue; 1154 1155 subfunc = 3; /* 1156 * 2 - activate SPU tracing, 1157 * 3 - deactivate 1158 */ 1159 lfsr_value = 0x8f100000; 1160 1161 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, 1162 subfunc, cbe_cpu_to_node(cpu), 1163 lfsr_value); 1164 1165 if (unlikely(rtn_value != 0)) { 1166 printk(KERN_ERR 1167 "%s: rtas call ibm,cbe-spu-perftools " \ 1168 "failed, return = %d\n", 1169 __func__, rtn_value); 1170 } 1171 1172 /* Deactivate the signals */ 1173 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 1174 } 1175 1176 stop_spu_profiling_cycles(); 1177} 1178 1179static void cell_global_stop_spu_events(void) 1180{ 1181 int cpu; 1182 oprofile_running = 0; 1183 1184 stop_spu_profiling_events(); 1185 smp_wmb(); 1186 1187 for_each_online_cpu(cpu) { 1188 if (cbe_get_hw_thread_id(cpu)) 1189 continue; 1190 1191 cbe_sync_irq(cbe_cpu_to_node(cpu)); 1192 /* Stop the counters */ 1193 cbe_disable_pm(cpu); 1194 cbe_write_pm07_control(cpu, 0, 0); 1195 1196 /* Deactivate the signals */ 1197 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 1198 1199 /* Deactivate interrupts */ 1200 cbe_disable_pm_interrupts(cpu); 1201 } 1202 del_timer_sync(&timer_spu_event_swap); 1203} 1204 1205static void cell_global_stop_ppu(void) 1206{ 1207 int cpu; 1208 1209 /* 1210 * This routine will be called once for the system. 1211 * There is one performance monitor per node, so we 1212 * only need to perform this function once per node. 1213 */ 1214 del_timer_sync(&timer_virt_cntr); 1215 oprofile_running = 0; 1216 smp_wmb(); 1217 1218 for_each_online_cpu(cpu) { 1219 if (cbe_get_hw_thread_id(cpu)) 1220 continue; 1221 1222 cbe_sync_irq(cbe_cpu_to_node(cpu)); 1223 /* Stop the counters */ 1224 cbe_disable_pm(cpu); 1225 1226 /* Deactivate the signals */ 1227 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 1228 1229 /* Deactivate interrupts */ 1230 cbe_disable_pm_interrupts(cpu); 1231 } 1232} 1233 1234static void cell_global_stop(void) 1235{ 1236 if (profiling_mode == PPU_PROFILING) 1237 cell_global_stop_ppu(); 1238 else if (profiling_mode == SPU_PROFILING_EVENTS) 1239 cell_global_stop_spu_events(); 1240 else 1241 cell_global_stop_spu_cycles(); 1242} 1243 1244static int cell_global_start_spu_cycles(struct op_counter_config *ctr) 1245{ 1246 int subfunc; 1247 unsigned int lfsr_value; 1248 int cpu; 1249 int ret; 1250 int rtas_error; 1251 unsigned int cpu_khzfreq = 0; 1252 1253 /* The SPU profiling uses time-based profiling based on 1254 * cpu frequency, so if configured with the CPU_FREQ 1255 * option, we should detect frequency changes and react 1256 * accordingly. 1257 */ 1258#ifdef CONFIG_CPU_FREQ 1259 ret = cpufreq_register_notifier(&cpu_freq_notifier_block, 1260 CPUFREQ_TRANSITION_NOTIFIER); 1261 if (ret < 0) 1262 /* this is not a fatal error */ 1263 printk(KERN_ERR "CPU freq change registration failed: %d\n", 1264 ret); 1265 1266 else 1267 cpu_khzfreq = cpufreq_quick_get(smp_processor_id()); 1268#endif 1269 1270 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset); 1271 1272 for_each_online_cpu(cpu) { 1273 if (cbe_get_hw_thread_id(cpu)) 1274 continue; 1275 1276 /* 1277 * Setup SPU cycle-based profiling. 1278 * Set perf_mon_control bit 0 to a zero before 1279 * enabling spu collection hardware. 1280 */ 1281 cbe_write_pm(cpu, pm_control, 0); 1282 1283 if (spu_cycle_reset > MAX_SPU_COUNT) 1284 /* use largest possible value */ 1285 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1); 1286 else 1287 lfsr_value = calculate_lfsr(spu_cycle_reset); 1288 1289 /* must use a non zero value. Zero disables data collection. */ 1290 if (lfsr_value == 0) 1291 lfsr_value = calculate_lfsr(1); 1292 1293 lfsr_value = lfsr_value << 8; /* shift lfsr to correct 1294 * register location 1295 */ 1296 1297 /* debug bus setup */ 1298 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu)); 1299 1300 if (unlikely(ret)) { 1301 rtas_error = ret; 1302 goto out; 1303 } 1304 1305 1306 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */ 1307 1308 /* start profiling */ 1309 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, 1310 cbe_cpu_to_node(cpu), lfsr_value); 1311 1312 if (unlikely(ret != 0)) { 1313 printk(KERN_ERR 1314 "%s: rtas call ibm,cbe-spu-perftools failed, " \ 1315 "return = %d\n", __func__, ret); 1316 rtas_error = -EIO; 1317 goto out; 1318 } 1319 } 1320 1321 rtas_error = start_spu_profiling_cycles(spu_cycle_reset); 1322 if (rtas_error) 1323 goto out_stop; 1324 1325 oprofile_running = 1; 1326 return 0; 1327 1328out_stop: 1329 cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */ 1330out: 1331 return rtas_error; 1332} 1333 1334static int cell_global_start_spu_events(struct op_counter_config *ctr) 1335{ 1336 int cpu; 1337 u32 interrupt_mask = 0; 1338 int rtn = 0; 1339 1340 hdw_thread = 0; 1341 1342 /* spu event profiling, uses the performance counters to generate 1343 * an interrupt. The hardware is setup to store the SPU program 1344 * counter into the trace array. The occurrence mode is used to 1345 * enable storing data to the trace buffer. The bits are set 1346 * to send/store the SPU address in the trace buffer. The debug 1347 * bus must be setup to route the SPU program counter onto the 1348 * debug bus. The occurrence data in the trace buffer is not used. 1349 */ 1350 1351 /* This routine gets called once for the system. 1352 * There is one performance monitor per node, so we 1353 * only need to perform this function once per node. 1354 */ 1355 1356 for_each_online_cpu(cpu) { 1357 if (cbe_get_hw_thread_id(cpu)) 1358 continue; 1359 1360 /* 1361 * Setup SPU event-based profiling. 1362 * Set perf_mon_control bit 0 to a zero before 1363 * enabling spu collection hardware. 1364 * 1365 * Only support one SPU event on one SPU per node. 1366 */ 1367 if (ctr_enabled & 1) { 1368 cbe_write_ctr(cpu, 0, reset_value[0]); 1369 enable_ctr(cpu, 0, pm_regs.pm07_cntrl); 1370 interrupt_mask |= 1371 CBE_PM_CTR_OVERFLOW_INTR(0); 1372 } else { 1373 /* Disable counter */ 1374 cbe_write_pm07_control(cpu, 0, 0); 1375 } 1376 1377 cbe_get_and_clear_pm_interrupts(cpu); 1378 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); 1379 cbe_enable_pm(cpu); 1380 1381 /* clear the trace buffer */ 1382 cbe_write_pm(cpu, trace_address, 0); 1383 } 1384 1385 /* Start the timer to time slice collecting the event profile 1386 * on each of the SPUs. Note, can collect profile on one SPU 1387 * per node at a time. 1388 */ 1389 start_spu_event_swap(); 1390 start_spu_profiling_events(); 1391 oprofile_running = 1; 1392 smp_wmb(); 1393 1394 return rtn; 1395} 1396 1397static int cell_global_start_ppu(struct op_counter_config *ctr) 1398{ 1399 u32 cpu, i; 1400 u32 interrupt_mask = 0; 1401 1402 /* This routine gets called once for the system. 1403 * There is one performance monitor per node, so we 1404 * only need to perform this function once per node. 1405 */ 1406 for_each_online_cpu(cpu) { 1407 if (cbe_get_hw_thread_id(cpu)) 1408 continue; 1409 1410 interrupt_mask = 0; 1411 1412 for (i = 0; i < num_counters; ++i) { 1413 if (ctr_enabled & (1 << i)) { 1414 cbe_write_ctr(cpu, i, reset_value[i]); 1415 enable_ctr(cpu, i, pm_regs.pm07_cntrl); 1416 interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i); 1417 } else { 1418 /* Disable counter */ 1419 cbe_write_pm07_control(cpu, i, 0); 1420 } 1421 } 1422 1423 cbe_get_and_clear_pm_interrupts(cpu); 1424 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); 1425 cbe_enable_pm(cpu); 1426 } 1427 1428 virt_cntr_inter_mask = interrupt_mask; 1429 oprofile_running = 1; 1430 smp_wmb(); 1431 1432 /* 1433 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being 1434 * executed which manipulates the PMU. We start the "virtual counter" 1435 * here so that we do not need to synchronize access to the PMU in 1436 * the above for-loop. 1437 */ 1438 start_virt_cntrs(); 1439 1440 return 0; 1441} 1442 1443static int cell_global_start(struct op_counter_config *ctr) 1444{ 1445 if (profiling_mode == SPU_PROFILING_CYCLES) 1446 return cell_global_start_spu_cycles(ctr); 1447 else if (profiling_mode == SPU_PROFILING_EVENTS) 1448 return cell_global_start_spu_events(ctr); 1449 else 1450 return cell_global_start_ppu(ctr); 1451} 1452 1453 1454/* The SPU interrupt handler 1455 * 1456 * SPU event profiling works as follows: 1457 * The pm_signal[0] holds the one SPU event to be measured. It is routed on 1458 * the debug bus using word 0 or 1. The value of pm_signal[1] and 1459 * pm_signal[2] contain the necessary events to route the SPU program 1460 * counter for the selected SPU onto the debug bus using words 2 and 3. 1461 * The pm_interval register is setup to write the SPU PC value into the 1462 * trace buffer at the maximum rate possible. The trace buffer is configured 1463 * to store the PCs, wrapping when it is full. The performance counter is 1464 * intialized to the max hardware count minus the number of events, N, between 1465 * samples. Once the N events have occured, a HW counter overflow occurs 1466 * causing the generation of a HW counter interrupt which also stops the 1467 * writing of the SPU PC values to the trace buffer. Hence the last PC 1468 * written to the trace buffer is the SPU PC that we want. Unfortunately, 1469 * we have to read from the beginning of the trace buffer to get to the 1470 * last value written. We just hope the PPU has nothing better to do then 1471 * service this interrupt. The PC for the specific SPU being profiled is 1472 * extracted from the trace buffer processed and stored. The trace buffer 1473 * is cleared, interrupts are cleared, the counter is reset to max - N. 1474 * A kernel timer is used to periodically call the routine spu_evnt_swap() 1475 * to switch to the next physical SPU in the node to profile in round robbin 1476 * order. This way data is collected for all SPUs on the node. It does mean 1477 * that we need to use a relatively small value of N to ensure enough samples 1478 * on each SPU are collected each SPU is being profiled 1/8 of the time. 1479 * It may also be necessary to use a longer sample collection period. 1480 */ 1481static void cell_handle_interrupt_spu(struct pt_regs *regs, 1482 struct op_counter_config *ctr) 1483{ 1484 u32 cpu, cpu_tmp; 1485 u64 trace_entry; 1486 u32 interrupt_mask; 1487 u64 trace_buffer[2]; 1488 u64 last_trace_buffer; 1489 u32 sample; 1490 u32 trace_addr; 1491 unsigned long sample_array_lock_flags; 1492 int spu_num; 1493 unsigned long flags; 1494 1495 /* Make sure spu event interrupt handler and spu event swap 1496 * don't access the counters simultaneously. 1497 */ 1498 cpu = smp_processor_id(); 1499 spin_lock_irqsave(&cntr_lock, flags); 1500 1501 cpu_tmp = cpu; 1502 cbe_disable_pm(cpu); 1503 1504 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); 1505 1506 sample = 0xABCDEF; 1507 trace_entry = 0xfedcba; 1508 last_trace_buffer = 0xdeadbeaf; 1509 1510 if ((oprofile_running == 1) && (interrupt_mask != 0)) { 1511 /* disable writes to trace buff */ 1512 cbe_write_pm(cpu, pm_interval, 0); 1513 1514 /* only have one perf cntr being used, cntr 0 */ 1515 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0)) 1516 && ctr[0].enabled) 1517 /* The SPU PC values will be read 1518 * from the trace buffer, reset counter 1519 */ 1520 1521 cbe_write_ctr(cpu, 0, reset_value[0]); 1522 1523 trace_addr = cbe_read_pm(cpu, trace_address); 1524 1525 while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) { 1526 /* There is data in the trace buffer to process 1527 * Read the buffer until you get to the last 1528 * entry. This is the value we want. 1529 */ 1530 1531 cbe_read_trace_buffer(cpu, trace_buffer); 1532 trace_addr = cbe_read_pm(cpu, trace_address); 1533 } 1534 1535 /* SPU Address 16 bit count format for 128 bit 1536 * HW trace buffer is used for the SPU PC storage 1537 * HDR bits 0:15 1538 * SPU Addr 0 bits 16:31 1539 * SPU Addr 1 bits 32:47 1540 * unused bits 48:127 1541 * 1542 * HDR: bit4 = 1 SPU Address 0 valid 1543 * HDR: bit5 = 1 SPU Address 1 valid 1544 * - unfortunately, the valid bits don't seem to work 1545 * 1546 * Note trace_buffer[0] holds bits 0:63 of the HW 1547 * trace buffer, trace_buffer[1] holds bits 64:127 1548 */ 1549 1550 trace_entry = trace_buffer[0] 1551 & 0x00000000FFFF0000; 1552 1553 /* only top 16 of the 18 bit SPU PC address 1554 * is stored in trace buffer, hence shift right 1555 * by 16 -2 bits */ 1556 sample = trace_entry >> 14; 1557 last_trace_buffer = trace_buffer[0]; 1558 1559 spu_num = spu_evnt_phys_spu_indx 1560 + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE); 1561 1562 /* make sure only one process at a time is calling 1563 * spu_sync_buffer() 1564 */ 1565 spin_lock_irqsave(&oprof_spu_smpl_arry_lck, 1566 sample_array_lock_flags); 1567 spu_sync_buffer(spu_num, &sample, 1); 1568 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, 1569 sample_array_lock_flags); 1570 1571 smp_wmb(); /* insure spu event buffer updates are written 1572 * don't want events intermingled... */ 1573 1574 /* The counters were frozen by the interrupt. 1575 * Reenable the interrupt and restart the counters. 1576 */ 1577 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); 1578 cbe_enable_pm_interrupts(cpu, hdw_thread, 1579 virt_cntr_inter_mask); 1580 1581 /* clear the trace buffer, re-enable writes to trace buff */ 1582 cbe_write_pm(cpu, trace_address, 0); 1583 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); 1584 1585 /* The writes to the various performance counters only writes 1586 * to a latch. The new values (interrupt setting bits, reset 1587 * counter value etc.) are not copied to the actual registers 1588 * until the performance monitor is enabled. In order to get 1589 * this to work as desired, the performance monitor needs to 1590 * be disabled while writing to the latches. This is a 1591 * HW design issue. 1592 */ 1593 write_pm_cntrl(cpu); 1594 cbe_enable_pm(cpu); 1595 } 1596 spin_unlock_irqrestore(&cntr_lock, flags); 1597} 1598 1599static void cell_handle_interrupt_ppu(struct pt_regs *regs, 1600 struct op_counter_config *ctr) 1601{ 1602 u32 cpu; 1603 u64 pc; 1604 int is_kernel; 1605 unsigned long flags = 0; 1606 u32 interrupt_mask; 1607 int i; 1608 1609 cpu = smp_processor_id(); 1610 1611 /* 1612 * Need to make sure the interrupt handler and the virt counter 1613 * routine are not running at the same time. See the 1614 * cell_virtual_cntr() routine for additional comments. 1615 */ 1616 spin_lock_irqsave(&cntr_lock, flags); 1617 1618 /* 1619 * Need to disable and reenable the performance counters 1620 * to get the desired behavior from the hardware. This 1621 * is hardware specific. 1622 */ 1623 1624 cbe_disable_pm(cpu); 1625 1626 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); 1627 1628 /* 1629 * If the interrupt mask has been cleared, then the virt cntr 1630 * has cleared the interrupt. When the thread that generated 1631 * the interrupt is restored, the data count will be restored to 1632 * 0xffffff0 to cause the interrupt to be regenerated. 1633 */ 1634 1635 if ((oprofile_running == 1) && (interrupt_mask != 0)) { 1636 pc = regs->nip; 1637 is_kernel = is_kernel_addr(pc); 1638 1639 for (i = 0; i < num_counters; ++i) { 1640 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i)) 1641 && ctr[i].enabled) { 1642 oprofile_add_ext_sample(pc, regs, i, is_kernel); 1643 cbe_write_ctr(cpu, i, reset_value[i]); 1644 } 1645 } 1646 1647 /* 1648 * The counters were frozen by the interrupt. 1649 * Reenable the interrupt and restart the counters. 1650 * If there was a race between the interrupt handler and 1651 * the virtual counter routine. The virutal counter 1652 * routine may have cleared the interrupts. Hence must 1653 * use the virt_cntr_inter_mask to re-enable the interrupts. 1654 */ 1655 cbe_enable_pm_interrupts(cpu, hdw_thread, 1656 virt_cntr_inter_mask); 1657 1658 /* 1659 * The writes to the various performance counters only writes 1660 * to a latch. The new values (interrupt setting bits, reset 1661 * counter value etc.) are not copied to the actual registers 1662 * until the performance monitor is enabled. In order to get 1663 * this to work as desired, the performance monitor needs to 1664 * be disabled while writing to the latches. This is a 1665 * HW design issue. 1666 */ 1667 cbe_enable_pm(cpu); 1668 } 1669 spin_unlock_irqrestore(&cntr_lock, flags); 1670} 1671 1672static void cell_handle_interrupt(struct pt_regs *regs, 1673 struct op_counter_config *ctr) 1674{ 1675 if (profiling_mode == PPU_PROFILING) 1676 cell_handle_interrupt_ppu(regs, ctr); 1677 else 1678 cell_handle_interrupt_spu(regs, ctr); 1679} 1680 1681/* 1682 * This function is called from the generic OProfile 1683 * driver. When profiling PPUs, we need to do the 1684 * generic sync start; otherwise, do spu_sync_start. 1685 */ 1686static int cell_sync_start(void) 1687{ 1688 if ((profiling_mode == SPU_PROFILING_CYCLES) || 1689 (profiling_mode == SPU_PROFILING_EVENTS)) 1690 return spu_sync_start(); 1691 else 1692 return DO_GENERIC_SYNC; 1693} 1694 1695static int cell_sync_stop(void) 1696{ 1697 if ((profiling_mode == SPU_PROFILING_CYCLES) || 1698 (profiling_mode == SPU_PROFILING_EVENTS)) 1699 return spu_sync_stop(); 1700 else 1701 return 1; 1702} 1703 1704struct op_powerpc_model op_model_cell = { 1705 .reg_setup = cell_reg_setup, 1706 .cpu_setup = cell_cpu_setup, 1707 .global_start = cell_global_start, 1708 .global_stop = cell_global_stop, 1709 .sync_start = cell_sync_start, 1710 .sync_stop = cell_sync_stop, 1711 .handle_interrupt = cell_handle_interrupt, 1712}; 1713