1/* 2 * Code to operate on PCI/E core, in NIC mode 3 * Implements pci_api.h 4 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * $Id: nicpci.c 401759 2013-05-13 16:08:08Z $ 19 */ 20 21#include <bcm_cfg.h> 22#include <typedefs.h> 23#include <bcmdefs.h> 24#include <osl.h> 25#include <bcmutils.h> 26#include <siutils.h> 27#include <hndsoc.h> 28#include <bcmdevs.h> 29#include <sbchipc.h> 30#include <pci_core.h> 31#include <pcie_core.h> 32#include <nicpci.h> 33#include <pcicfg.h> 34 35typedef struct { 36 union { 37 sbpcieregs_t *pcieregs; 38 sbpciregs_t *pciregs; 39 } regs; /* Memory mapped register to the core */ 40 41 si_t *sih; /* System interconnect handle */ 42 osl_t *osh; /* OSL handle */ 43 uint8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */ 44 uint8 pciecap_devctrl_offset; /* PCIE DevControl reg offset in the config space */ 45 bool pcie_pr42767; 46 uint8 pcie_polarity; 47 uint8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */ 48 uint8 pmecap_offset; /* PM Capability offset in the config space */ 49 bool pmecap; /* Capable of generating PME */ 50 bool pcie_power_save; 51 uint16 pmebits; 52 uint16 pcie_reqsize; 53 uint16 pcie_mps; 54 uint8 pciecap_devctrl2_offset; /* PCIE DevControl2 reg offset in the config space */ 55 uint32 pciecap_ltr0_reg_offset; /* PCIE LTR0 reg offset in the config space */ 56 uint32 pciecap_ltr1_reg_offset; /* PCIE LTR1 reg offset in the config space */ 57 uint32 pciecap_ltr2_reg_offset; /* PCIE LTR2 reg offset in the config space */ 58 uint8 pcie_configspace[PCI_CONFIG_SPACE_SIZE]; 59} pcicore_info_t; 60 61/* debug/trace */ 62#define PCI_ERROR(args) 63 64/* routines to access mdio slave device registers */ 65static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk); 66static int pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val); 67static int pciegen1_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, 68 uint *val); 69static int pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, 70 uint *val, bool slave_bypass); 71static int pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint readdr, uint val); 72static int pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint readdr, uint *ret_val); 73 74static void pcie_extendL1timer(pcicore_info_t *pi, bool extend); 75static void pcie_clkreq_upd(pcicore_info_t *pi, uint state); 76 77static void pcie_war_aspm_clkreq(pcicore_info_t *pi); 78static void pcie_war_serdes(pcicore_info_t *pi); 79static void pcie_war_noplldown(pcicore_info_t *pi); 80static void pcie_war_polarity(pcicore_info_t *pi); 81static void pcie_war_pci_setup(pcicore_info_t *pi); 82static void pcie_power_save_upd(pcicore_info_t *pi, bool up); 83 84static bool pcicore_pmecap(pcicore_info_t *pi); 85static void pcicore_fixlatencytimer(pcicore_info_t* pch, uint8 timer_val); 86 87#define PCIE_GEN1(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ 88 ((sih)->buscoretype == PCIE_CORE_ID)) 89#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ 90 ((sih)->buscoretype == PCIE2_CORE_ID)) 91#define PCIE(sih) (PCIE_GEN1(sih) || PCIE_GEN2(sih)) 92 93#define PCIEGEN1_ASPM(sih) ((PCIE_GEN1(sih)) && \ 94 (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5))) 95 96#define DWORD_ALIGN(x) (x & ~(0x03)) 97#define BYTE_POS(x) (x & 0x3) 98#define WORD_POS(x) (x & 0x1) 99 100#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) 101#define WORD_SHIFT(x) (16 * WORD_POS(x)) 102 103#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) 104#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) 105 106#define read_pci_cfg_byte(a) \ 107 (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) 108 109#define read_pci_cfg_word(a) \ 110 (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) 111 112#define write_pci_cfg_byte(a, val) do { \ 113 uint32 tmpval; \ 114 tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ 115 val << BYTE_POS(a); \ 116 OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ 117 } while (0) 118 119#define write_pci_cfg_word(a, val) do { \ 120 uint32 tmpval; \ 121 tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ 122 val << WORD_POS(a); \ 123 OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ 124 } while (0) 125 126/* delay needed between the mdio control/ mdiodata register data access */ 127#define PR28829_DELAY() OSL_DELAY(10) 128 129/** 130 * Initialize the PCI core. It's caller's responsibility to make sure that this is done 131 * only once 132 */ 133void * 134pcicore_init(si_t *sih, osl_t *osh, void *regs) 135{ 136 pcicore_info_t *pi; 137 uint8 cap_ptr; 138 139 ASSERT(sih->bustype == PCI_BUS); 140 141 /* alloc pcicore_info_t */ 142 if ((pi = MALLOC(osh, sizeof(pcicore_info_t))) == NULL) { 143 PCI_ERROR(("pci_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); 144 return (NULL); 145 } 146 147 bzero(pi, sizeof(pcicore_info_t)); 148 149 pi->sih = sih; 150 pi->osh = osh; 151 152 if (sih->buscoretype == PCIE2_CORE_ID) { 153 pi->regs.pcieregs = (sbpcieregs_t*)regs; 154 cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL); 155 ASSERT(cap_ptr); 156 pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET; 157 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; 158 pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET; 159 pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET; 160 pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET; 161 pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET; 162 } else if (sih->buscoretype == PCIE_CORE_ID) { 163 pi->regs.pcieregs = (sbpcieregs_t*)regs; 164 cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL); 165 ASSERT(cap_ptr); 166 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; 167 pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET; 168 pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET; 169 pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET; 170 pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET; 171 pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET; 172 pi->pcie_power_save = TRUE; /* Enable pcie_power_save by default */ 173 } else 174 pi->regs.pciregs = (sbpciregs_t*)regs; 175 176 return pi; 177} 178 179void 180pcicore_deinit(void *pch) 181{ 182 pcicore_info_t *pi = (pcicore_info_t *)pch; 183 184 185 if (pi == NULL) 186 return; 187 MFREE(pi->osh, pi, sizeof(pcicore_info_t)); 188} 189 190/** return cap_offset if requested capability exists in the PCI config space */ 191/* Note that it's caller's responsibility to make sure it's a pci bus */ 192uint8 193pcicore_find_pci_capability(osl_t *osh, uint8 req_cap_id, uchar *buf, uint32 *buflen) 194{ 195 uint8 cap_id; 196 uint8 cap_ptr = 0; 197 uint32 bufsize; 198 uint8 byte_val; 199 200 /* check for Header type 0 */ 201 byte_val = read_pci_cfg_byte(PCI_CFG_HDR); 202 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) 203 goto end; 204 205 /* check if the capability pointer field exists */ 206 byte_val = read_pci_cfg_byte(PCI_CFG_STAT); 207 if (!(byte_val & PCI_CAPPTR_PRESENT)) 208 goto end; 209 210 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); 211 /* check if the capability pointer is 0x00 */ 212 if (cap_ptr == 0x00) 213 goto end; 214 215 /* loop thr'u the capability list and see if the pcie capabilty exists */ 216 217 cap_id = read_pci_cfg_byte(cap_ptr); 218 219 while (cap_id != req_cap_id) { 220 cap_ptr = read_pci_cfg_byte((cap_ptr+1)); 221 if (cap_ptr == 0x00) break; 222 cap_id = read_pci_cfg_byte(cap_ptr); 223 } 224 if (cap_id != req_cap_id) { 225 goto end; 226 } 227 /* found the caller requested capability */ 228 if ((buf != NULL) && (buflen != NULL)) { 229 uint8 cap_data; 230 231 bufsize = *buflen; 232 if (!bufsize) goto end; 233 *buflen = 0; 234 /* copy the cpability data excluding cap ID and next ptr */ 235 cap_data = cap_ptr + 2; 236 if ((bufsize + cap_data) > SZPCR) 237 bufsize = SZPCR - cap_data; 238 *buflen = bufsize; 239 while (bufsize--) { 240 *buf = read_pci_cfg_byte(cap_data); 241 cap_data++; 242 buf++; 243 } 244 } 245end: 246 return cap_ptr; 247} 248 249/** Register Access API */ 250uint 251pcie_readreg(si_t *sih, sbpcieregs_t *pcieregs, uint addrtype, uint offset) 252{ 253 uint retval = 0xFFFFFFFF; 254 osl_t *osh = si_osh(sih); 255 256 ASSERT(pcieregs != NULL); 257 BCM_REFERENCE(osh); 258 259 if ((BUSTYPE(sih->bustype) == SI_BUS) || PCIE_GEN1(sih)) { 260 switch (addrtype) { 261 case PCIE_CONFIGREGS: 262 W_REG(osh, (&pcieregs->configaddr), offset); 263 (void)R_REG(osh, (&pcieregs->configaddr)); 264 retval = R_REG(osh, &(pcieregs->configdata)); 265 break; 266 case PCIE_PCIEREGS: 267 W_REG(osh, &(pcieregs->u.pcie1.pcieindaddr), offset); 268 (void)R_REG(osh, (&pcieregs->u.pcie1.pcieindaddr)); 269 retval = R_REG(osh, &(pcieregs->u.pcie1.pcieinddata)); 270 break; 271 default: 272 ASSERT(0); 273 break; 274 } 275 } 276 else if (PCIE_GEN2(sih)) { 277 W_REG(osh, (&pcieregs->configaddr), offset); 278 (void)R_REG(osh, (&pcieregs->configaddr)); 279 retval = R_REG(osh, &(pcieregs->configdata)); 280 } 281 282 return retval; 283} 284 285uint 286pcie_writereg(si_t *sih, sbpcieregs_t *pcieregs, uint addrtype, uint offset, uint val) 287{ 288 osl_t *osh = si_osh(sih); 289 290 ASSERT(pcieregs != NULL); 291 BCM_REFERENCE(osh); 292 293 if ((BUSTYPE(sih->bustype) == SI_BUS) || PCIE_GEN1(sih)) { 294 switch (addrtype) { 295 case PCIE_CONFIGREGS: 296 W_REG(osh, (&pcieregs->configaddr), offset); 297 W_REG(osh, (&pcieregs->configdata), val); 298 break; 299 case PCIE_PCIEREGS: 300 W_REG(osh, (&pcieregs->u.pcie1.pcieindaddr), offset); 301 W_REG(osh, (&pcieregs->u.pcie1.pcieinddata), val); 302 break; 303 default: 304 ASSERT(0); 305 break; 306 } 307 } 308 else if (PCIE_GEN2(sih)) { 309 W_REG(osh, (&pcieregs->configaddr), offset); 310 W_REG(osh, (&pcieregs->configdata), val); 311 } 312 return 0; 313} 314 315static bool 316pcie_mdiosetblock(pcicore_info_t *pi, uint blk) 317{ 318 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 319 uint mdiodata, i = 0; 320 uint pcie_serdes_spinwait = 200; 321 322 mdiodata = MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | 323 (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk << 4); 324 W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata); 325 326 PR28829_DELAY(); 327 /* retry till the transaction is complete */ 328 while (i < pcie_serdes_spinwait) { 329 if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) { 330 break; 331 } 332 OSL_DELAY(1000); 333 i++; 334 } 335 336 if (i >= pcie_serdes_spinwait) { 337 PCI_ERROR(("pcie_mdiosetblock: timed out\n")); 338 return FALSE; 339 } 340 341 return TRUE; 342} 343 344static bool 345pcie2_mdiosetblock(pcicore_info_t *pi, uint blk) 346{ 347 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 348 uint mdiodata, mdioctrl, i = 0; 349 uint pcie_serdes_spinwait = 200; 350 351 mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); 352 W_REG(pi->osh, &pcieregs->u.pcie2.mdiocontrol, mdioctrl); 353 354 mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; 355 W_REG(pi->osh, &pcieregs->u.pcie2.mdiowrdata, mdiodata); 356 357 PR28829_DELAY(); 358 /* retry till the transaction is complete */ 359 while (i < pcie_serdes_spinwait) { 360 if (!(R_REG(pi->osh, &(pcieregs->u.pcie2.mdiowrdata)) & MDIODATA2_DONE)) { 361 break; 362 } 363 OSL_DELAY(1000); 364 i++; 365 } 366 367 if (i >= pcie_serdes_spinwait) { 368 PCI_ERROR(("pcie_mdiosetblock: timed out\n")); 369 return FALSE; 370 } 371 372 return TRUE; 373} 374 375static int 376pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val) 377{ 378 if (PCIE_GEN1(pi->sih)) 379 return (pciegen1_mdioop(pi, physmedia, regaddr, write, val)); 380 else if (PCIE_GEN2(pi->sih)) 381 return (pciegen2_mdioop(pi, physmedia, regaddr, write, val, 0)); 382 else 383 return 0xFFFFFFFF; 384} 385 386static int 387pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val, 388 bool slave_bypass) 389{ 390 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 391 uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; 392 uint32 *reg32; 393 394 if (!PCIE_GEN2(pi->sih)) 395 ASSERT(0); 396 397 pcie2_mdiosetblock(pi, physmedia); 398 399 /* enable mdio access to SERDES */ 400 mdio_ctrl = MDIOCTL2_DIVISOR_VAL; 401 mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); 402 403 if (slave_bypass) 404 mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; 405 406 if (!write) 407 mdio_ctrl |= MDIOCTL2_READ; 408 409 W_REG(pi->osh, (&pcieregs->u.pcie2.mdiocontrol), mdio_ctrl); 410 if (write) { 411 reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiowrdata); 412 W_REG(pi->osh, reg32, *val | MDIODATA2_DONE); 413 } 414 else 415 reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiorddata); 416 417 /* retry till the transaction is complete */ 418 while (i < pcie_serdes_spinwait) { 419 if (!(R_REG(pi->osh, reg32) & MDIODATA2_DONE)) { 420 if (!write) 421 *val = (R_REG(pi->osh, reg32) & MDIODATA2_MASK); 422 return 0; 423 } 424 OSL_DELAY(1000); 425 i++; 426 } 427 return 0; 428} 429 430static int 431pciegen1_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val) 432{ 433 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 434 uint mdiodata; 435 uint i = 0; 436 uint pcie_serdes_spinwait = 10; 437 438 if (!PCIE_GEN1(pi->sih)) 439 ASSERT(0); 440 441 /* enable mdio access to SERDES */ 442 W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); 443 444 if (pi->sih->buscorerev >= 10) { 445 /* new serdes is slower in rw, using two layers of reg address mapping */ 446 if (!pcie_mdiosetblock(pi, physmedia)) 447 return 1; 448 mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | 449 (regaddr << MDIODATA_REGADDR_SHF); 450 pcie_serdes_spinwait *= 20; 451 } else { 452 mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) | 453 (regaddr << MDIODATA_REGADDR_SHF_OLD); 454 } 455 456 if (!write) 457 mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA); 458 else 459 mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val); 460 461 W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata); 462 463 PR28829_DELAY(); 464 465 /* retry till the transaction is complete */ 466 while (i < pcie_serdes_spinwait) { 467 if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) { 468 if (!write) { 469 PR28829_DELAY(); 470 *val = (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiodata)) & 471 MDIODATA_MASK); 472 } 473 /* Disable mdio access to SERDES */ 474 W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), 0); 475 return 0; 476 } 477 OSL_DELAY(1000); 478 i++; 479 } 480 481 PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write)); 482 /* Disable mdio access to SERDES */ 483 W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), 0); 484 return 1; 485} 486 487/** use the mdio interface to read from mdio slaves */ 488static int 489pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint regaddr, uint *regval) 490{ 491 return pcie_mdioop(pi, physmedia, regaddr, FALSE, regval); 492} 493 494/** use the mdio interface to write to mdio slaves */ 495static int 496pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint regaddr, uint val) 497{ 498 return pcie_mdioop(pi, physmedia, regaddr, TRUE, &val); 499} 500 501/* ***** Support functions ***** */ 502 503/** 504 * By default, PCIe devices are not allowed to create payloads of greater than 128 bytes. 505 * Maximum Read Request Size is a PCIe parameter that is advertized to the host, so the host can 506 * choose a balance between high throughput and low 'chunkiness' on the bus. Regardless of the 507 * setting of this (hardware) field, the core does not initiate read requests larger than 512 bytes. 508 */ 509static uint32 510pcie_devcontrol_mrrs(void *pch, uint32 mask, uint32 val) 511{ 512 pcicore_info_t *pi = (pcicore_info_t *)pch; 513 uint32 reg_val; 514 uint8 offset; 515 516 offset = pi->pciecap_devctrl_offset; 517 if (!offset) 518 return 0; 519 520 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 521 /* set operation */ 522 if (mask) { 523 if (val > PCIE_CAP_DEVCTRL_MRRS_128B) { 524 if (PCIE_GEN1(pi->sih) && (pi->sih->buscorerev < 18)) { 525 PCI_ERROR(("%s pcie corerev %d doesn't support >128B MRRS", 526 __FUNCTION__, pi->sih->buscorerev)); 527 val = PCIE_CAP_DEVCTRL_MRRS_128B; 528 } 529 } 530 531 reg_val &= ~PCIE_CAP_DEVCTRL_MRRS_MASK; 532 reg_val |= (val << PCIE_CAP_DEVCTRL_MRRS_SHIFT) & PCIE_CAP_DEVCTRL_MRRS_MASK; 533 534 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val); 535 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 536 } 537 return reg_val; 538} 539 540static uint32 541pcie_devcontrol_mps(void *pch, uint32 mask, uint32 val) 542{ 543 pcicore_info_t *pi = (pcicore_info_t *)pch; 544 uint32 reg_val; 545 uint8 offset; 546 547 offset = pi->pciecap_devctrl_offset; 548 if (!offset) 549 return 0; 550 551 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 552 /* set operation */ 553 if (mask) { 554 reg_val &= ~PCIE_CAP_DEVCTRL_MPS_MASK; 555 reg_val |= (val << PCIE_CAP_DEVCTRL_MPS_SHIFT) & PCIE_CAP_DEVCTRL_MPS_MASK; 556 557 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val); 558 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 559 } 560 return reg_val; 561} 562 563uint8 564pcie_clkreq(void *pch, uint32 mask, uint32 val) 565{ 566 pcicore_info_t *pi = (pcicore_info_t *)pch; 567 uint32 reg_val; 568 uint8 offset; 569 570 offset = pi->pciecap_lcreg_offset; 571 if (!offset) 572 return 0; 573 574 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 575 /* set operation */ 576 if (mask) { 577 if (val) 578 reg_val |= PCIE_CLKREQ_ENAB; 579 else 580 reg_val &= ~PCIE_CLKREQ_ENAB; 581 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val); 582 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 583 } 584 if (reg_val & PCIE_CLKREQ_ENAB) 585 return 1; 586 else 587 return 0; 588} 589 590uint8 591pcie_ltrenable(void *pch, uint32 mask, uint32 val) 592{ 593 pcicore_info_t *pi = (pcicore_info_t *)pch; 594 uint32 reg_val; 595 uint8 offset; 596 597 offset = pi->pciecap_devctrl2_offset; 598 if (!offset) 599 return 0; 600 601 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 602 603 /* set operation */ 604 if (mask) { 605 if (val) 606 reg_val |= PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK; 607 else 608 reg_val &= ~PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK; 609 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val); 610 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 611 } 612 if (reg_val & PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK) 613 return 1; 614 else 615 return 0; 616} 617 618/* JIRA:SWWLAN-28745 619 val and return value: 620 0 Disabled 621 1 Enable using Message signaling[Var A] 622 2 Enable using Message signaling[Var B] 623 3 Enable using WAKE# signaling 624*/ 625uint8 626pcie_obffenable(void *pch, uint32 mask, uint32 val) 627{ 628 pcicore_info_t *pi = (pcicore_info_t *)pch; 629 uint32 reg_val; 630 uint8 offset; 631 632 offset = pi->pciecap_devctrl2_offset; 633 if (!offset) 634 return 0; 635 636 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 637 638 /* set operation */ 639 if (mask) { 640 reg_val = (reg_val & ~PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK) | 641 ((val << PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT) & 642 PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK); 643 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val); 644 reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 645 } 646 647 return (reg_val & PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK) >> PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT; 648} 649 650uint32 651pcie_ltr_reg(void *pch, uint32 reg, uint32 mask, uint32 val) 652{ 653 pcicore_info_t *pi = (pcicore_info_t *)pch; 654 uint32 reg_val; 655 uint32 offset; 656 657 if (PCIE_GEN1(pi->sih)) 658 return 0; 659 660 if (reg == PCIE_CAP_LTR0_REG) 661 offset = pi->pciecap_ltr0_reg_offset; 662 else if (reg == PCIE_CAP_LTR1_REG) 663 offset = pi->pciecap_ltr1_reg_offset; 664 else if (reg == PCIE_CAP_LTR2_REG) 665 offset = pi->pciecap_ltr2_reg_offset; 666 else { 667 PCI_ERROR(("pcie_ltr_reg: unsupported LTR register offset %d\n", 668 reg)); 669 return 0; 670 } 671 672 if (!offset) 673 return 0; 674 675 if (mask) { /* set operation */ 676 reg_val = val; 677 pcie_writereg(pi->sih, pi->regs.pcieregs, PCIE_CONFIGREGS, offset, reg_val); 678 } 679 else { /* get operation */ 680 reg_val = pcie_readreg(pi->sih, pi->regs.pcieregs, PCIE_CONFIGREGS, offset); 681 } 682 683 return reg_val; 684} 685 686uint32 687pcieltrspacing_reg(void *pch, uint32 mask, uint32 val) 688{ 689 pcicore_info_t *pi = (pcicore_info_t *)pch; 690 si_t *sih = pi->sih; 691 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 692 uint32 retval; 693 694 if (PCIE_GEN1(sih)) 695 return 0; 696 697 ASSERT(pcieregs != NULL); 698 699 if (mask) { /* set operation */ 700 retval = val; 701 W_REG(pi->osh, &(pcieregs->ltrspacing), val); 702 } 703 else { /* get operation */ 704 retval = R_REG(pi->osh, &(pcieregs->ltrspacing)); 705 } 706 707 return retval; 708} 709 710uint32 711pcieltrhysteresiscnt_reg(void *pch, uint32 mask, uint32 val) 712{ 713 pcicore_info_t *pi = (pcicore_info_t *)pch; 714 si_t *sih = pi->sih; 715 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 716 uint32 retval; 717 718 if (PCIE_GEN1(sih)) 719 return 0; 720 721 ASSERT(pcieregs != NULL); 722 723 if (mask) { /* set operation */ 724 retval = val; 725 W_REG(pi->osh, &(pcieregs->ltrhysteresiscnt), val); 726 } 727 else { /* get operation */ 728 retval = R_REG(pi->osh, &(pcieregs->ltrhysteresiscnt)); 729 } 730 731 return retval; 732} 733 734static void 735pcie_extendL1timer(pcicore_info_t *pi, bool extend) 736{ 737 uint32 w; 738 si_t *sih = pi->sih; 739 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 740 741 if (!PCIE_GEN1(sih)) 742 return; 743 744 w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); 745 746 if (extend && sih->buscorerev >= 7) 747 w |= PCIE_ASPMTIMER_EXTEND; 748 else 749 w &= ~PCIE_ASPMTIMER_EXTEND; 750 pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); 751 w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); 752} 753 754/** centralized clkreq control policy */ 755static void 756pcie_clkreq_upd(pcicore_info_t *pi, uint state) 757{ 758 si_t *sih = pi->sih; 759 ASSERT(PCIE(sih)); 760 761 if (!PCIE_GEN1(sih)) 762 return; 763 764 switch (state) { 765 case SI_DOATTACH: 766 if (PCIEGEN1_ASPM(sih)) 767 pcie_clkreq((void *)pi, 1, 0); 768 break; 769 case SI_PCIDOWN: 770 if (sih->buscorerev == 6) { /* turn on serdes PLL down */ 771 si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr), 772 ~0, 0); 773 si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data), 774 ~0x40, 0); 775 } else if (pi->pcie_pr42767) { 776 pcie_clkreq((void *)pi, 1, 1); 777 } 778 break; 779 case SI_PCIUP: 780 if (sih->buscorerev == 6) { /* turn off serdes PLL down */ 781 si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr), 782 ~0, 0); 783 si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data), 784 ~0x40, 0x40); 785 } else if (PCIEGEN1_ASPM(sih)) { /* disable clkreq */ 786 pcie_clkreq((void *)pi, 1, 0); 787 } 788 break; 789 default: 790 ASSERT(0); 791 break; 792 } 793} 794 795/* ***** PCI core WARs ***** */ 796/* Done only once at attach time */ 797static void 798pcie_war_polarity(pcicore_info_t *pi) 799{ 800 uint32 w; 801 802 if (pi->pcie_polarity != 0) 803 return; 804 805 w = pcie_readreg(pi->sih, pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); 806 807 /* Detect the current polarity at attach and force that polarity and 808 * disable changing the polarity 809 */ 810 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0) 811 pi->pcie_polarity = (SERDES_RX_CTRL_FORCE); 812 else 813 pi->pcie_polarity = (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY); 814} 815 816/** 817 * enable ASPM and CLKREQ if srom doesn't have it. 818 * Needs to happen when update to shadow SROM is needed 819 * : Coming out of 'standby'/'hibernate' 820 * : If pcie_war_aspm_ovr state changed 821 */ 822static void 823pcie_war_aspm_clkreq(pcicore_info_t *pi) 824{ 825 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 826 si_t *sih = pi->sih; 827 uint16 val16, *reg16; 828 uint32 w; 829 830 if (!PCIEGEN1_ASPM(sih)) 831 return; 832 833 /* bypass this on QT or VSIM */ 834 if (!ISSIM_ENAB(sih)) { 835 836 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; 837 val16 = R_REG(pi->osh, reg16); 838 839 val16 &= ~SRSH_ASPM_ENB; 840 if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) 841 val16 |= SRSH_ASPM_ENB; 842 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB) 843 val16 |= SRSH_ASPM_L1_ENB; 844 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) 845 val16 |= SRSH_ASPM_L0s_ENB; 846 847 W_REG(pi->osh, reg16, val16); 848 849 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); 850 w &= ~PCIE_ASPM_ENAB; 851 w |= pi->pcie_war_aspm_ovr; 852 OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); 853 } 854 855 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; 856 val16 = R_REG(pi->osh, reg16); 857 858 if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { 859 val16 |= SRSH_CLKREQ_ENB; 860 pi->pcie_pr42767 = TRUE; 861 } else 862 val16 &= ~SRSH_CLKREQ_ENB; 863 864 W_REG(pi->osh, reg16, val16); 865} 866 867static void 868pcie_war_pmebits(pcicore_info_t *pi) 869{ 870 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 871 uint16 val16, *reg16; 872 873 if (pi->sih->buscorerev != 18 && pi->sih->buscorerev != 19) 874 return; 875 876 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV8]; 877 val16 = R_REG(pi->osh, reg16); 878 if (val16 != pi->pmebits) { 879 PCI_ERROR(("pcie_war_pmebits: pmebits mismatch 0x%x (was 0x%x)\n", 880 val16, pi->pmebits)); 881 pi->pmebits = 0x1f30; 882 W_REG(pi->osh, reg16, pi->pmebits); 883 val16 = R_REG(pi->osh, reg16); 884 PCI_ERROR(("pcie_war_pmebits: update pmebits to 0x%x\n", val16)); 885 } 886} 887 888/** Apply the polarity determined at the start */ 889/* Needs to happen when coming out of 'standby'/'hibernate' */ 890static void 891pcie_war_serdes(pcicore_info_t *pi) 892{ 893 uint32 w = 0; 894 895 if (pi->pcie_polarity != 0) 896 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL, pi->pcie_polarity); 897 898 pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w); 899 if (w & PLL_CTRL_FREQDET_EN) { 900 w &= ~PLL_CTRL_FREQDET_EN; 901 pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w); 902 } 903} 904 905/** Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */ 906/* Needs to happen when coming out of 'standby'/'hibernate' */ 907static void 908BCMINITFN(pcie_misc_config_fixup)(pcicore_info_t *pi) 909{ 910 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 911 uint16 val16, *reg16; 912 913 reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG]; 914 val16 = R_REG(pi->osh, reg16); 915 916 if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) { 917 val16 |= SRSH_L23READY_EXIT_NOPERST; 918 W_REG(pi->osh, reg16, val16); 919 } 920} 921 922/* Needs to happen when coming out of 'standby'/'hibernate' */ 923static void 924pcie_war_noplldown(pcicore_info_t *pi) 925{ 926 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 927 uint16 *reg16; 928 929 ASSERT(pi->sih->buscorerev == 7); 930 931 /* turn off serdes PLL down */ 932 si_corereg(pi->sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol), 933 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); 934 935 /* clear srom shadow backdoor */ 936 reg16 = &pcieregs->sprom[SRSH_BD_OFFSET]; 937 W_REG(pi->osh, reg16, 0); 938} 939 940/** Needs to happen when coming out of 'standby'/'hibernate' */ 941static void 942pcie_war_pci_setup(pcicore_info_t *pi) 943{ 944 si_t *sih = pi->sih; 945 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 946 uint32 w; 947 948 if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) { 949 w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG); 950 w |= 0x8; 951 pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, w); 952 } 953 954 if (sih->buscorerev == 1) { 955 w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG); 956 w |= (0x40); 957 pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); 958 } 959 960 if (sih->buscorerev == 0) { 961 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128); 962 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100); 963 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466); 964 } else if (PCIEGEN1_ASPM(sih)) { 965 /* Change the L1 threshold for better performance */ 966 w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); 967 w &= ~(PCIE_L1THRESHOLDTIME_MASK); 968 w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT); 969 pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); 970 971 pcie_war_serdes(pi); 972 973 pcie_war_aspm_clkreq(pi); 974 } else if (pi->sih->buscorerev == 7) 975 pcie_war_noplldown(pi); 976 977 /* Note that the fix is actually in the SROM, that's why this is open-ended */ 978 if (pi->sih->buscorerev >= 6) 979 pcie_misc_config_fixup(pi); 980} 981 982void 983pcie_war_ovr_aspm_update(void *pch, uint8 aspm) 984{ 985 pcicore_info_t *pi = (pcicore_info_t *)pch; 986 987 if (!PCIE_GEN1(pi->sih)) 988 return; 989 990 if (!PCIEGEN1_ASPM(pi->sih)) 991 return; 992 993 /* Validate */ 994 if (aspm > PCIE_ASPM_ENAB) 995 return; 996 997 pi->pcie_war_aspm_ovr = aspm; 998 999 /* Update the current state */ 1000 pcie_war_aspm_clkreq(pi); 1001} 1002 1003 1004void 1005pcie_power_save_enable(void *pch, bool enable) 1006{ 1007 pcicore_info_t *pi = (pcicore_info_t *)pch; 1008 1009 1010 if (!pi) 1011 return; 1012 1013 pi->pcie_power_save = enable; 1014} 1015 1016static void 1017pcie_power_save_upd(pcicore_info_t *pi, bool up) 1018{ 1019 si_t *sih = pi->sih; 1020 1021 if (!pi->pcie_power_save) 1022 return; 1023 1024 1025 if ((sih->buscorerev >= 15) && (sih->buscorerev <= 20)) { 1026 1027 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT1, 1, 0x7F64); 1028 1029 if (up) 1030 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x74); 1031 else 1032 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x7C); 1033 1034 } else if ((sih->buscorerev >= 21) && (sih->buscorerev <= 22)) { 1035 1036 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT1, 1, 0x7E65); 1037 1038 if (up) 1039 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x175); 1040 else 1041 pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x17D); 1042 } 1043} 1044 1045void 1046pcie_set_request_size(void *pch, uint16 size) 1047{ 1048 pcicore_info_t *pi = (pcicore_info_t *)pch; 1049 si_t *sih; 1050 1051 if (!pi) 1052 return; 1053 1054 sih = pi->sih; 1055 1056 if (size == 128) 1057 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B; 1058 else if (size == 256) 1059 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_256B; 1060 else if (size == 512) 1061 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_512B; 1062 else if (size == 1024) 1063 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_1024B; 1064 else 1065 return; 1066 1067 if (PCIE_GEN1(sih)) { 1068 if (pi->sih->buscorerev == 18 || pi->sih->buscorerev == 19) 1069 pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, 1070 (uint32)pi->pcie_reqsize); 1071 } 1072 else if (PCIE_GEN2(sih)) { 1073 pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, (uint32)pi->pcie_reqsize); 1074 } 1075 else 1076 ASSERT(0); 1077} 1078 1079uint16 1080pcie_get_request_size(void *pch) 1081{ 1082 pcicore_info_t *pi = (pcicore_info_t *)pch; 1083 1084 if (!pi) 1085 return (0); 1086 1087 if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_128B) 1088 return (128); 1089 else if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_256B) 1090 return (256); 1091 else if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_512B) 1092 return (512); 1093 return (0); 1094} 1095 1096void 1097pcie_set_maxpayload_size(void *pch, uint16 size) 1098{ 1099 pcicore_info_t *pi = (pcicore_info_t *)pch; 1100 1101 if (!pi) 1102 return; 1103 1104 if (size == 128) 1105 pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_128B; 1106 else if (size == 256) 1107 pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_256B; 1108 else if (size == 512) 1109 pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_512B; 1110 else if (size == 1024) 1111 pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_1024B; 1112 else 1113 return; 1114 1115 pcie_devcontrol_mps(pi, PCIE_CAP_DEVCTRL_MPS_MASK, (uint32)pi->pcie_mps); 1116} 1117 1118uint16 1119pcie_get_maxpayload_size(void *pch) 1120{ 1121 pcicore_info_t *pi = (pcicore_info_t *)pch; 1122 1123 if (!pi) 1124 return (0); 1125 1126 if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_128B) 1127 return (128); 1128 else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_256B) 1129 return (256); 1130 else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_512B) 1131 return (512); 1132 else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_1024B) 1133 return (1024); 1134 return (0); 1135} 1136 1137void 1138pcie_disable_TL_clk_gating(void *pch) 1139{ 1140 /* disable TL clk gating is located in bit 4 of PCIEControl (Offset 0x000) */ 1141 pcicore_info_t *pi = (pcicore_info_t *)pch; 1142 si_t *sih = pi->sih; 1143 1144 if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih)) 1145 return; 1146 1147 si_corereg(sih, sih->buscoreidx, 0, 0x10, 0x10); 1148} 1149 1150void 1151pcie_set_L1_entry_time(void *pch, uint32 val) 1152{ 1153 /* L1 entry time is located in bits [22:16] of register 0x1004 (pdl_control_1) */ 1154 pcicore_info_t *pi = (pcicore_info_t *)pch; 1155 si_t *sih = pi->sih; 1156 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1157 uint32 data; 1158 1159 if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih)) 1160 return; 1161 1162 if (val > 0x7F) 1163 return; 1164 1165 data = pcie_readreg(sih, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_PDL_CTRL1); 1166 pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, 1167 PCIECFGREG_PDL_CTRL1, (data & ~0x7F0000) | (val << 16)); 1168} 1169 1170/** mode : 0 -- reset, 1 -- tx, 2 -- rx */ 1171void 1172pcie_set_error_injection(void *pch, uint32 mode) 1173{ 1174 /* through reg_phy_ctl_7 - 0x181c */ 1175 pcicore_info_t *pi = (pcicore_info_t *)pch; 1176 si_t *sih = pi->sih; 1177 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1178 1179 if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih)) 1180 return; 1181 1182 if (mode == 0) 1183 pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0); 1184 else if (mode == 1) 1185 pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0x14031); 1186 else 1187 pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0x2c031); 1188} 1189 1190void 1191pcie_set_L1substate(void *pch, uint32 substate) 1192{ 1193 pcicore_info_t *pi = (pcicore_info_t *)pch; 1194 si_t *sih = pi->sih; 1195 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1196 uint32 data; 1197 1198 ASSERT(PCIE_GEN2(sih)); 1199 ASSERT(substate <= 3); 1200 1201 if (substate != 0) { 1202 /* turn on ASPM L1 */ 1203 data = pcie_readreg(sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset); 1204 pcie_writereg(sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset, data | 2); 1205 1206 /* enable LTR */ 1207 pcie_ltrenable(pch, 1, 1); 1208 } 1209 1210 /* PML1_sub_control1 can only be accessed by OSL_PCI_xxxx_CONFIG */ 1211 data = OSL_PCI_READ_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32)) & 0xfffffff0; 1212 1213 /* JIRA:SWWLAN-28455 */ 1214 if (substate & 1) 1215 data |= PCI_PM_L1_2_ENA_MASK | ASPM_L1_2_ENA_MASK; 1216 1217 if (substate & 2) 1218 data |= PCI_PM_L1_1_ENA_MASK | ASPM_L1_1_ENA_MASK; 1219 1220 OSL_PCI_WRITE_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32), data); 1221} 1222 1223uint32 1224pcie_get_L1substate(void *pch) 1225{ 1226 pcicore_info_t *pi = (pcicore_info_t *)pch; 1227 si_t *sih = pi->sih; 1228 uint32 data, substate = 0; 1229 1230 ASSERT(PCIE_GEN2(sih)); 1231 UNUSED_PARAMETER(sih); 1232 1233 data = OSL_PCI_READ_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32)); 1234 1235 /* JIRA:SWWLAN-28455 */ 1236 if (data & (PCI_PM_L1_2_ENA_MASK | ASPM_L1_2_ENA_MASK)) 1237 substate |= 1; 1238 1239 if (data & (PCI_PM_L1_1_ENA_MASK | ASPM_L1_1_ENA_MASK)) 1240 substate |= 2; 1241 1242 return substate; 1243} 1244 1245/* ***** Functions called during driver state changes ***** */ 1246void 1247BCMATTACHFN(pcicore_attach)(void *pch, char *pvars, int state) 1248{ 1249 pcicore_info_t *pi = (pcicore_info_t *)pch; 1250 si_t *sih = pi->sih; 1251 1252 if (!PCIE_GEN1(sih)) { 1253 if ((BCM4360_CHIP_ID == CHIPID(sih->chip)) || 1254 (BCM43460_CHIP_ID == CHIPID(sih->chip)) || 1255 (BCM4350_CHIP_ID == CHIPID(sih->chip)) || 1256 (BCM4352_CHIP_ID == CHIPID(sih->chip)) || 1257 (BCM4335_CHIP_ID == CHIPID(sih->chip))) 1258 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_1024B; 1259 return; 1260 } 1261 1262 if (PCIEGEN1_ASPM(sih)) { 1263 if (((sih->boardvendor == VENDOR_APPLE) && 1264 ((uint8)getintvar(pvars, "sromrev") == 4) && 1265 ((uint8)getintvar(pvars, "boardrev") <= 0x71)) || 1266 ((uint32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR)) { 1267 pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB; 1268 } else { 1269 pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB; 1270 } 1271 } 1272 1273 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B; 1274 if (BCM4331_CHIP_ID == CHIPID(sih->chip)) 1275 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_512B; 1276 1277 bzero(pi->pcie_configspace, PCI_CONFIG_SPACE_SIZE); 1278 1279 /* These need to happen in this order only */ 1280 pcie_war_polarity(pi); 1281 1282 pcie_war_serdes(pi); 1283 1284 pcie_war_aspm_clkreq(pi); 1285 1286 pcie_clkreq_upd(pi, state); 1287 1288 pcie_war_pmebits(pi); 1289 1290 /* Alter default TX drive strength setting */ 1291 if (sih->boardvendor == VENDOR_APPLE) { 1292 if (sih->boardtype == 0x8d) 1293 /* change the TX drive strength to max */ 1294 pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x7f); 1295 else if (PCIE_DRIVE_STRENGTH_OVERRIDE(sih)) 1296 /* change the drive strength to 700mv */ 1297 pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x70); 1298 } 1299} 1300 1301void 1302pcicore_hwup(void *pch) 1303{ 1304 pcicore_info_t *pi = (pcicore_info_t *)pch; 1305 1306 if (!pi || !PCIE_GEN1(pi->sih)) 1307 return; 1308 1309 pcie_power_save_upd(pi, TRUE); 1310 1311 if (pi->sih->boardtype == CB2_4321_BOARD || pi->sih->boardtype == CB2_4321_AG_BOARD) 1312 pcicore_fixlatencytimer(pch, 0x20); 1313 1314 pcie_war_pci_setup(pi); 1315 1316 /* Alter default TX drive strength setting */ 1317 if (pi->sih->boardvendor == VENDOR_APPLE) { 1318 if (pi->sih->boardtype == 0x8d) 1319 /* change the TX drive strength to max */ 1320 pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x7f); 1321 else if (PCIE_DRIVE_STRENGTH_OVERRIDE(pi->sih)) 1322 /* change the drive strength to 700mv */ 1323 pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x70); 1324 } 1325} 1326 1327void 1328pcicore_up(void *pch, int state) 1329{ 1330 pcicore_info_t *pi = (pcicore_info_t *)pch; 1331 1332 if (!pi) 1333 return; 1334 1335 if (PCIE_GEN2(pi->sih)) { 1336 pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, pi->pcie_reqsize); 1337 return; 1338 } 1339 1340 pcie_power_save_upd(pi, TRUE); 1341 1342 /* Restore L1 timer for better performance */ 1343 pcie_extendL1timer(pi, TRUE); 1344 1345 pcie_clkreq_upd(pi, state); 1346 1347 if (pi->sih->buscorerev == 18 || 1348 (pi->sih->buscorerev == 19 && !PCIE_MRRS_OVERRIDE(sih))) 1349 pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B; 1350 1351 pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, pi->pcie_reqsize); 1352} 1353 1354/** When the device is going to enter D3 state (or the system is going to enter S3/S4 states */ 1355void 1356pcicore_sleep(void *pch) 1357{ 1358 pcicore_info_t *pi = (pcicore_info_t *)pch; 1359 uint32 w; 1360 1361 if (!pi || !PCIE_GEN1(pi->sih)) 1362 return; 1363 1364 pcie_power_save_upd(pi, FALSE); 1365 1366 1367 if (!PCIEGEN1_ASPM(pi->sih)) 1368 return; 1369 1370 1371 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); 1372 w &= ~PCIE_CAP_LCREG_ASPML1; 1373 OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); 1374 1375 1376 pi->pcie_pr42767 = FALSE; 1377} 1378 1379void 1380pcicore_down(void *pch, int state) 1381{ 1382 pcicore_info_t *pi = (pcicore_info_t *)pch; 1383 1384 if (!pi || !PCIE_GEN1(pi->sih)) 1385 return; 1386 1387 pcie_clkreq_upd(pi, state); 1388 1389 /* Reduce L1 timer for better power savings */ 1390 pcie_extendL1timer(pi, FALSE); 1391 1392 pcie_power_save_upd(pi, FALSE); 1393} 1394 1395/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */ 1396/** Just uses PCI config accesses to find out, when needed before sb_attach is done */ 1397bool 1398pcicore_pmecap_fast(osl_t *osh) 1399{ 1400 uint8 cap_ptr; 1401 uint32 pmecap; 1402 1403 cap_ptr = pcicore_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL); 1404 1405 if (!cap_ptr) 1406 return FALSE; 1407 1408 pmecap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); 1409 1410 return ((pmecap & PME_CAP_PM_STATES) != 0); 1411} 1412 1413/** 1414 * return TRUE if PM capability exists in the pci config space 1415 * Uses and caches the information using core handle 1416 */ 1417static bool 1418pcicore_pmecap(pcicore_info_t *pi) 1419{ 1420 uint8 cap_ptr; 1421 uint32 pmecap; 1422 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1423 uint16*reg16; 1424 1425 if (!pi->pmecap_offset) { 1426 cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL); 1427 if (!cap_ptr) 1428 return FALSE; 1429 1430 pi->pmecap_offset = cap_ptr; 1431 1432 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV8]; 1433 pi->pmebits = R_REG(pi->osh, reg16); 1434 1435 pmecap = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset, sizeof(uint32)); 1436 1437 /* At least one state can generate PME */ 1438 pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0; 1439 } 1440 1441 return (pi->pmecap); 1442} 1443 1444/** Enable PME generation */ 1445void 1446pcicore_pmeen(void *pch) 1447{ 1448 pcicore_info_t *pi = (pcicore_info_t *)pch; 1449 uint32 w; 1450 1451 /* if not pmecapable return */ 1452 if (!pcicore_pmecap(pi)) 1453 return; 1454 1455 pcie_war_pmebits(pi); 1456 1457 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32)); 1458 w |= (PME_CSR_PME_EN); 1459 OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w); 1460} 1461 1462/** Return TRUE if PME status set */ 1463bool 1464pcicore_pmestat(void *pch) 1465{ 1466 pcicore_info_t *pi = (pcicore_info_t *)pch; 1467 uint32 w; 1468 1469 if (!pcicore_pmecap(pi)) 1470 return FALSE; 1471 1472 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32)); 1473 1474 return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT; 1475} 1476 1477void 1478pcicore_pmestatclr(void *pch) 1479{ 1480 pcicore_info_t *pi = (pcicore_info_t *)pch; 1481 uint32 w; 1482 1483 if (!pcicore_pmecap(pi)) 1484 return; 1485 1486 pcie_war_pmebits(pi); 1487 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32)); 1488 1489 PCI_ERROR(("pcicore_pmestatclr PMECSR : 0x%x\n", w)); 1490 1491 /* Writing a 1 to PMESTAT will clear it */ 1492 if ((w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT) { 1493 OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), 1494 w); 1495 } 1496} 1497 1498/** Disable PME generation, clear the PME status bit if set */ 1499void 1500pcicore_pmeclr(void *pch) 1501{ 1502 pcicore_info_t *pi = (pcicore_info_t *)pch; 1503 uint32 w; 1504 1505 if (!pcicore_pmecap(pi)) 1506 return; 1507 1508 pcie_war_pmebits(pi); 1509 1510 w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32)); 1511 1512 PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w)); 1513 1514 /* PMESTAT is cleared by writing 1 to it */ 1515 w &= ~(PME_CSR_PME_EN); 1516 1517 OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w); 1518} 1519 1520static void 1521pcicore_fixlatencytimer(pcicore_info_t* pch, uint8 timer_val) 1522{ 1523 pcicore_info_t *pi = (pcicore_info_t *)pch; 1524 osl_t *osh; 1525 uint8 lattim; 1526 1527 osh = pi->osh; 1528 lattim = read_pci_cfg_byte(PCI_CFG_LATTIM); 1529 1530 if (!lattim) { 1531 PCI_ERROR(("%s: Modifying PCI_CFG_LATTIM from 0x%x to 0x%x\n", 1532 __FUNCTION__, lattim, timer_val)); 1533 write_pci_cfg_byte(PCI_CFG_LATTIM, timer_val); 1534 } 1535} 1536 1537uint32 1538pcie_lcreg(void *pch, uint32 mask, uint32 val) 1539{ 1540 pcicore_info_t *pi = (pcicore_info_t *)pch; 1541 uint8 offset; 1542 1543 offset = pi->pciecap_lcreg_offset; 1544 if (!offset) 1545 return 0; 1546 1547 /* set operation */ 1548 if (mask) 1549 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), val); 1550 1551 return OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 1552} 1553 1554 1555uint32 1556pcicore_pciereg(void *pch, uint32 offset, uint32 mask, uint32 val, uint type) 1557{ 1558 uint32 reg_val = 0; 1559 pcicore_info_t *pi = (pcicore_info_t *)pch; 1560 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1561 1562 if (mask) { 1563 PCI_ERROR(("PCIEREG: 0x%x writeval 0x%x\n", offset, val)); 1564 pcie_writereg(pi->sih, pcieregs, type, offset, val); 1565 } 1566 1567 /* Should not read register 0x154 */ 1568 if (PCIE_GEN1(pi->sih) && 1569 pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11 && type == PCIE_PCIEREGS) 1570 return reg_val; 1571 1572 reg_val = pcie_readreg(pi->sih, pcieregs, type, offset); 1573 PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val)); 1574 1575 return reg_val; 1576} 1577 1578uint32 1579pcicore_pcieserdesreg(void *pch, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val) 1580{ 1581 uint32 reg_val = 0; 1582 pcicore_info_t *pi = (pcicore_info_t *)pch; 1583 1584 if (mask) { 1585 pcie_mdiowrite(pi, mdioslave, offset, val); 1586 } 1587 1588 if (pcie_mdioread(pi, mdioslave, offset, ®_val)) 1589 reg_val = 0xFFFFFFFF; 1590 1591 return reg_val; 1592} 1593 1594uint16 1595pcie_get_ssid(void* pch) 1596{ 1597 uint32 ssid = 1598 OSL_PCI_READ_CONFIG(((pcicore_info_t *)pch)->osh, PCI_CFG_SVID, sizeof(uint32)); 1599 return (uint16)(ssid >> 16); 1600} 1601 1602uint32 1603pcie_get_bar0(void* pch) 1604{ 1605 return OSL_PCI_READ_CONFIG(((pcicore_info_t *)pch)->osh, PCI_CFG_BAR0, sizeof(uint32)); 1606} 1607 1608int 1609pcie_configspace_cache(void* pch) 1610{ 1611 pcicore_info_t *pi = (pcicore_info_t *)pch; 1612 uint offset = 0; 1613 uint32 *tmp = (uint32 *)pi->pcie_configspace; 1614 1615 while (offset < PCI_CONFIG_SPACE_SIZE) { 1616 *tmp++ = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32)); 1617 offset += 4; 1618 } 1619 return 0; 1620} 1621 1622int 1623pcie_configspace_restore(void* pch) 1624{ 1625 pcicore_info_t *pi = (pcicore_info_t *)pch; 1626 uint offset = 0; 1627 uint32 *tmp = (uint32 *)pi->pcie_configspace; 1628 1629 /* if config space was not buffered, than abort restore */ 1630 if (*tmp == 0) 1631 return -1; 1632 1633 while (offset < PCI_CONFIG_SPACE_SIZE) { 1634 OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), *tmp); 1635 tmp++; 1636 offset += 4; 1637 } 1638 return 0; 1639} 1640 1641int 1642pcie_configspace_get(void* pch, uint8 *buf, uint size) 1643{ 1644 pcicore_info_t *pi = (pcicore_info_t *)pch; 1645 memcpy(buf, pi->pcie_configspace, size); 1646 return 0; 1647} 1648 1649uint32 1650pcie_get_link_speed(void* pch) 1651{ 1652 pcicore_info_t *pi = (pcicore_info_t *)pch; 1653 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1654 uint32 data; 1655 1656 data = pcie_readreg(pi->sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset); 1657 return (data & PCIE_LINKSPEED_MASK) >> PCIE_LINKSPEED_SHIFT; 1658} 1659 1660uint32 1661pcie_survive_perst(void* pch, uint32 mask, uint32 val) 1662{ 1663#ifdef SURVIVE_PERST_ENAB 1664 pcicore_info_t *pi = (pcicore_info_t *)pch; 1665 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1666 uint32 w; 1667 1668 /* mask and set */ 1669 if (mask || val) { 1670 w = (R_REG(pi->osh, (&pcieregs->control)) & ~mask) | val; 1671 W_REG(pi->osh, (&pcieregs->control), w); 1672 } 1673 /* readback */ 1674 return R_REG(pi->osh, (&pcieregs->control)); 1675#else 1676 return 0; 1677#endif /* SURVIVE_PERST_ENAB */ 1678} 1679 1680#if defined(BCMDBG_DUMP) 1681const struct fielddesc pcie_plp_regdesc[] = { 1682 { "Mode 0x%04x ", PCIE_PLP_MODEREG, 4}, 1683 { "Status 0x%04x ", PCIE_PLP_STATUSREG, 4}, 1684 { "LTSSMControl 0x%04x ", PCIE_PLP_LTSSMCTRLREG, 4}, 1685 { "LinkNumber 0x%04x ", PCIE_PLP_LTLINKNUMREG, 4}, 1686 { "LaneNumber 0x%04x ", PCIE_PLP_LTLANENUMREG, 4}, 1687 { "N_FTS 0x%04x ", PCIE_PLP_LTNFTSREG, 4}, 1688 { "Attention 0x%04x ", PCIE_PLP_ATTNREG, 4}, 1689 { "AttentionMask 0x%04x ", PCIE_PLP_ATTNMASKREG, 4}, 1690 { "RxErrCnt 0x%04x ", PCIE_PLP_RXERRCTR, 4}, 1691 { "RxFramingErrCnt 0x%04x ", PCIE_PLP_RXFRMERRCTR, 4}, 1692 { "TestCtrl 0x%04x ", PCIE_PLP_TESTCTRLREG, 4}, 1693 { "SERDESCtrlOvrd 0x%04x ", PCIE_PLP_SERDESCTRLOVRDREG, 4}, 1694 { "TimingparamOvrd 0x%04x ", PCIE_PLP_TIMINGOVRDREG, 4}, 1695 { "RXTXSMdbgReg 0x%04x ", PCIE_PLP_RXTXSMDIAGREG, 4}, 1696 { "LTSSMdbgReg 0x%04x\n", PCIE_PLP_LTSSMDIAGREG, 4}, 1697 { NULL, 0, 0} 1698}; 1699 1700const struct fielddesc pcie_dllp_regdesc[] = { 1701 {"LinkControl 0x%04x ", PCIE_DLLP_LCREG, 4}, 1702 {"LinkStatus 0x%04x ", PCIE_DLLP_LSREG, 4}, 1703 {"LinkAttention 0x%04x ", PCIE_DLLP_LAREG, 4}, 1704 {"LinkAttentionMask 0x%04x ", PCIE_DLLP_LAMASKREG, 4}, 1705 {"NextTxSeqNum 0x%04x ", PCIE_DLLP_NEXTTXSEQNUMREG, 4}, 1706 {"AckedTxSeqNum 0x%04x ", PCIE_DLLP_ACKEDTXSEQNUMREG, 4}, 1707 {"PurgedTxSeqNum 0x%04x ", PCIE_DLLP_PURGEDTXSEQNUMREG, 4}, 1708 {"RxSeqNum 0x%04x ", PCIE_DLLP_RXSEQNUMREG, 4}, 1709 {"LinkReplay 0x%04x ", PCIE_DLLP_LRREG, 4}, 1710 {"LinkAckTimeout 0x%04x ", PCIE_DLLP_LACKTOREG, 4}, 1711 {"PowerManagementThreshold 0x%04x ", PCIE_DLLP_PMTHRESHREG, 4}, 1712 {"RetryBufferwrptr 0x%04x ", PCIE_DLLP_RTRYWPREG, 4}, 1713 {"RetryBufferrdptr 0x%04x ", PCIE_DLLP_RTRYRPREG, 4}, 1714 {"RetryBufferpuptr 0x%04x ", PCIE_DLLP_RTRYPPREG, 4}, 1715 {"RetryBufferRd/Wr 0x%04x ", PCIE_DLLP_RTRRWREG, 4}, 1716 {"ErrorCountthreshold 0x%04x ", PCIE_DLLP_ECTHRESHREG, 4}, 1717 {"TLPErrorcounter 0x%04x ", PCIE_DLLP_TLPERRCTRREG, 4}, 1718 {"Errorcounter 0x%04x ", PCIE_DLLP_ERRCTRREG, 4}, 1719 {"NAKRecdcounter 0x%04x ", PCIE_DLLP_NAKRXCTRREG, 4}, 1720 {"Test 0x%04x\n", PCIE_DLLP_TESTREG, 4}, 1721 { NULL, 0, 0} 1722}; 1723 1724const struct fielddesc pcie_tlp_regdesc[] = { 1725 {"Config 0x%04x ", PCIE_TLP_CONFIGREG, 4}, 1726 {"Workarounds 0x%04x ", PCIE_TLP_WORKAROUNDSREG, 4}, 1727 {"WR-DMA-UA 0x%04x ", PCIE_TLP_WRDMAUPPER, 4}, 1728 {"WR-DMA-LA 0x%04x ", PCIE_TLP_WRDMALOWER, 4}, 1729 {"WR-DMA Len/BE 0x%04x ", PCIE_TLP_WRDMAREQ_LBEREG, 4}, 1730 {"RD-DMA-UA 0x%04x ", PCIE_TLP_RDDMAUPPER, 4}, 1731 {"RD-DMA-LA 0x%04x ", PCIE_TLP_RDDMALOWER, 4}, 1732 {"RD-DMA Len 0x%04x ", PCIE_TLP_RDDMALENREG, 4}, 1733 {"MSI-DMA-UA 0x%04x ", PCIE_TLP_MSIDMAUPPER, 4}, 1734 {"MSI-DMA-LA 0x%04x ", PCIE_TLP_MSIDMALOWER, 4}, 1735 {"MSI-DMALen 0x%04x ", PCIE_TLP_MSIDMALENREG, 4}, 1736 {"SlaveReqLen 0x%04x ", PCIE_TLP_SLVREQLENREG, 4}, 1737 {"FlowControlInput 0x%04x ", PCIE_TLP_FCINPUTSREQ, 4}, 1738 {"TxStateMachine 0x%04x ", PCIE_TLP_TXSMGRSREQ, 4}, 1739 {"AddressAckXferCnt 0x%04x ", PCIE_TLP_ADRACKCNTARBLEN, 4}, 1740 {"DMACompletion HDR0 0x%04x ", PCIE_TLP_DMACPLHDR0, 4}, 1741 {"DMACompletion HDR1 0x%04x ", PCIE_TLP_DMACPLHDR1, 4}, 1742 {"DMACompletion HDR2 0x%04x ", PCIE_TLP_DMACPLHDR2, 4}, 1743 {"DMACompletionMISC0 0x%04x ", PCIE_TLP_DMACPLMISC0, 4}, 1744 {"DMACompletionMISC1 0x%04x ", PCIE_TLP_DMACPLMISC1, 4}, 1745 {"DMACompletionMISC2 0x%04x ", PCIE_TLP_DMACPLMISC2, 4}, 1746 {"SplitControllerReqLen 0x%04x ", PCIE_TLP_SPTCTRLLEN, 4}, 1747 {"SplitControllerMISC0 0x%04x ", PCIE_TLP_SPTCTRLMSIC0, 4}, 1748 {"SplitControllerMISC1 0x%04x ", PCIE_TLP_SPTCTRLMSIC1, 4}, 1749 {"bus/dev/func 0x%04x ", PCIE_TLP_BUSDEVFUNC, 4}, 1750 {"ResetCounter 0x%04x ", PCIE_TLP_RESETCTR, 4}, 1751 {"RetryBufferValue 0x%04x ", PCIE_TLP_RTRYBUF, 4}, 1752 {"TargetDebug1 0x%04x ", PCIE_TLP_TGTDEBUG1, 4}, 1753 {"TargetDebug2 0x%04x ", PCIE_TLP_TGTDEBUG2, 4}, 1754 {"TargetDebug3 0x%04x\n", PCIE_TLP_TGTDEBUG3, 4}, 1755 { NULL, 0, 0} 1756}; 1757 1758#endif 1759 1760#if defined(WLTEST) || defined(BCMDBG_DUMP) 1761/* Dump PCIE Info */ 1762int 1763pcicore_dump_pcieinfo(void *pch, struct bcmstrbuf *b) 1764{ 1765 pcicore_info_t *pi = (pcicore_info_t *)pch; 1766 1767 if (!PCIE_GEN1(pi->sih) && !PCIE_GEN2(pi->sih)) 1768 return BCME_ERROR; 1769 1770 bcm_bprintf(b, "PCIE link speed: %d\n", pcie_get_link_speed(pch)); 1771 return 0; 1772} 1773#endif 1774 1775#if defined(BCMDBG_DUMP) 1776 1777/* size that can take bitfielddump */ 1778#define BITFIELD_DUMP_SIZE 2048 1779 1780/** Dump PCIE PLP/DLLP/TLP diagnostic registers */ 1781int 1782pcicore_dump_pcieregs(void *pch, struct bcmstrbuf *b) 1783{ 1784 pcicore_info_t *pi = (pcicore_info_t *)pch; 1785 sbpcieregs_t *pcieregs = pi->regs.pcieregs; 1786 si_t *sih = pi->sih; 1787 uint reg_val = 0; 1788 char *bitfield_dump_buf; 1789 1790 if (!PCIE_GEN1(pi->sih)) 1791 return BCME_ERROR; 1792 1793 if (!(bitfield_dump_buf = MALLOC(pi->osh, BITFIELD_DUMP_SIZE))) { 1794 printf("bitfield dump allocation failed\n"); 1795 return BCME_NOMEM; 1796 } 1797 1798 bcm_bprintf(b, "PLPRegs \t"); 1799 bcmdumpfields(si_pcie_readreg, (void *)(uintptr)pi->sih, PCIE_PCIEREGS, 1800 (struct fielddesc *)(uintptr)pcie_plp_regdesc, 1801 bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1802 bcm_bprintf(b, "%s", bitfield_dump_buf); 1803 bzero(bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1804 bcm_bprintf(b, "\n"); 1805 bcm_bprintf(b, "DLLPRegs \t"); 1806 bcmdumpfields(si_pcie_readreg, (void *)(uintptr)pi->sih, PCIE_PCIEREGS, 1807 (struct fielddesc *)(uintptr)pcie_dllp_regdesc, 1808 bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1809 bcm_bprintf(b, "%s", bitfield_dump_buf); 1810 bzero(bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1811 bcm_bprintf(b, "\n"); 1812 bcm_bprintf(b, "TLPRegs \t"); 1813 bcmdumpfields(si_pcie_readreg, (void *)(uintptr)pi->sih, PCIE_PCIEREGS, 1814 (struct fielddesc *)(uintptr)pcie_tlp_regdesc, 1815 bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1816 bcm_bprintf(b, "%s", bitfield_dump_buf); 1817 bzero(bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1818 bcm_bprintf(b, "\n"); 1819 1820 /* enable mdio access to SERDES */ 1821 W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); 1822 1823 bcm_bprintf(b, "SERDES regs \n"); 1824 if (sih->buscorerev >= 10) { 1825 pcie_mdioread(pi, MDIO_DEV_IEEE0, 0x2, ®_val); 1826 bcm_bprintf(b, "block IEEE0, offset 2: 0x%x\n", reg_val); 1827 pcie_mdioread(pi, MDIO_DEV_IEEE0, 0x3, ®_val); 1828 bcm_bprintf(b, "block IEEE0, offset 2: 0x%x\n", reg_val); 1829 pcie_mdioread(pi, MDIO_DEV_IEEE1, 0x08, ®_val); 1830 bcm_bprintf(b, "block IEEE1, lanestatus: 0x%x\n", reg_val); 1831 pcie_mdioread(pi, MDIO_DEV_IEEE1, 0x0a, ®_val); 1832 bcm_bprintf(b, "block IEEE1, lanestatus2: 0x%x\n", reg_val); 1833 pcie_mdioread(pi, MDIO_DEV_BLK4, 0x16, ®_val); 1834 bcm_bprintf(b, "MDIO_DEV_BLK4, lanetest0: 0x%x\n", reg_val); 1835 pcie_mdioread(pi, MDIO_DEV_TXPLL, 0x11, ®_val); 1836 bcm_bprintf(b, "MDIO_DEV_TXPLL, pllcontrol: 0x%x\n", reg_val); 1837 pcie_mdioread(pi, MDIO_DEV_TXPLL, 0x12, ®_val); 1838 bcm_bprintf(b, "MDIO_DEV_TXPLL, plltimer1: 0x%x\n", reg_val); 1839 pcie_mdioread(pi, MDIO_DEV_TXPLL, 0x13, ®_val); 1840 bcm_bprintf(b, "MDIO_DEV_TXPLL, plltimer2: 0x%x\n", reg_val); 1841 pcie_mdioread(pi, MDIO_DEV_TXPLL, 0x14, ®_val); 1842 bcm_bprintf(b, "MDIO_DEV_TXPLL, plltimer3: 0x%x\n", reg_val); 1843 pcie_mdioread(pi, MDIO_DEV_TXPLL, 0x17, ®_val); 1844 bcm_bprintf(b, "MDIO_DEV_TXPLL, freqdetcounter: 0x%x\n", reg_val); 1845 } else { 1846 pcie_mdioread(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, ®_val); 1847 bcm_bprintf(b, "rxtimer1 0x%x ", reg_val); 1848 pcie_mdioread(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, ®_val); 1849 bcm_bprintf(b, "rxCDR 0x%x ", reg_val); 1850 pcie_mdioread(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, ®_val); 1851 bcm_bprintf(b, "rxCDRBW 0x%x\n", reg_val); 1852 } 1853 1854 /* disable mdio access to SERDES */ 1855 W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), 0); 1856 1857 MFREE(pi->osh, bitfield_dump_buf, BITFIELD_DUMP_SIZE); 1858 1859 return 0; 1860} 1861 1862#endif 1863