1/******************************************************************************/ 2/* */ 3/* Broadcom BCM4400 Linux Network Driver, Copyright (c) 2000 Broadcom */ 4/* Corporation. */ 5/* All rights reserved. */ 6/* */ 7/* This program is free software; you can redistribute it and/or modify */ 8/* it under the terms of the GNU General Public License as published by */ 9/* the Free Software Foundation, located in the file LICENSE. */ 10/* */ 11/* History: */ 12/******************************************************************************/ 13 14#include "b44mm.h" 15 16 17 18/******************************************************************************/ 19/* Local functions. */ 20/******************************************************************************/ 21 22LM_STATUS b44_LM_Abort(PLM_DEVICE_BLOCK pDevice); 23LM_STATUS b44_LM_QueueRxPackets(PLM_DEVICE_BLOCK pDevice); 24LM_STATUS b44_LM_SetFlowControl(PLM_DEVICE_BLOCK pDevice, 25 LM_UINT32 LocalPhyAd, LM_UINT32 RemotePhyAd); 26static LM_UINT32 b44_GetPhyAdFlowCntrlSettings(PLM_DEVICE_BLOCK pDevice); 27 28STATIC LM_STATUS b44_LM_ResetChip(PLM_DEVICE_BLOCK pDevice); 29STATIC LM_STATUS b44_LM_DisableChip(PLM_DEVICE_BLOCK pDevice); 30void b44_LM_ClearStats(LM_DEVICE_BLOCK *pDevice); 31void b44_LM_WriteCam(LM_DEVICE_BLOCK *pDevice, LM_UINT8 *ea, 32 LM_UINT32 camindex); 33 34LM_UINT32 b44_LM_getsbaddr(LM_DEVICE_BLOCK *pDevice, LM_UINT32 id, 35 LM_UINT32 coreunit); 36void b44_LM_sb_core_disable(LM_DEVICE_BLOCK *pDevice); 37LM_UINT32 b44_LM_sb_pci_setup(LM_DEVICE_BLOCK *pDevice, LM_UINT32 cores); 38LM_UINT32 b44_LM_sb_coreunit(LM_DEVICE_BLOCK *pDevice); 39void b44_LM_sb_core_reset(LM_DEVICE_BLOCK *pDevice); 40LM_UINT32 b44_LM_sb_coreid(LM_DEVICE_BLOCK *pDevice); 41LM_UINT32 b44_LM_sb_corerev(LM_DEVICE_BLOCK *pDevice); 42LM_UINT32 b44_LM_sb_iscoreup(LM_DEVICE_BLOCK *pDevice); 43#ifdef BCM_WOL 44static void b44_LM_ftwrite(LM_DEVICE_BLOCK *pDevice, LM_UINT32 *b, 45 LM_UINT32 nbytes, LM_UINT32 ftaddr); 46#endif 47 48#define BCM4710_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */ 49#define BCM4710_ENUM 0x18000000 /* Beginning of core enumeration space */ 50 51struct sbmap bcm4402[] = { 52 {SBID_PCI_DMA, 0, BCM4710_PCI_DMA}, 53 {SBID_ENUM, 0, BCM4710_ENUM}, 54 {SBID_REG_EMAC, 0, 0x18000000}, 55 {SBID_REG_CODEC, 0, 0x18001000}, 56 {SBID_REG_PCI, 0, 0x18002000} 57}; 58 59#ifdef B44_DEBUG 60int b44_reset_count = 0; 61#endif 62 63/******************************************************************************/ 64/* External functions. */ 65/******************************************************************************/ 66 67LM_UINT32 68b44_LM_ByteSwap(LM_UINT32 Value32) 69{ 70 return ((Value32 & 0xff) << 24)| ((Value32 & 0xff00) << 8)| 71 ((Value32 & 0xff0000) >> 8) | ((Value32 >> 24) & 0xff); 72} 73 74/******************************************************************************/ 75/* Description: */ 76/* */ 77/* Return: */ 78/******************************************************************************/ 79LM_STATUS 80b44_LM_QueueRxPackets(PLM_DEVICE_BLOCK pDevice) 81{ 82 PLM_PACKET pPacket; 83 LM_PHYSICAL_ADDRESS pa; 84 LM_UINT32 rxout = pDevice->rxout; 85 LM_UINT32 ctrl; 86 87 pPacket = (PLM_PACKET) QQ_PopHead(&pDevice->RxPacketFreeQ.Container); 88 while(pPacket) { 89 90 /* Initialize the receive buffer pointer */ 91 b44_MM_MapRxDma(pDevice, pPacket, &pa); 92 93 *((LM_UINT32 *) pPacket->u.Rx.pRxBufferVirt) = 0; 94 95 /* prep the descriptor control value */ 96 ctrl = pPacket->u.Rx.RxBufferSize; 97 if (rxout == (pDevice->MaxRxPacketDescCnt - 1)) 98 ctrl |= CTRL_EOT; 99 100 /* init the rx descriptor */ 101 pDevice->pRxDesc[rxout].ctrl = ctrl; 102 pDevice->pRxDesc[rxout].addr = pa + pDevice->dataoffset; 103 104 pDevice->RxPacketArr[rxout] = pPacket; 105 rxout = (rxout + 1) & (pDevice->MaxRxPacketDescCnt - 1); 106 107 pPacket = (PLM_PACKET) 108 QQ_PopHead(&pDevice->RxPacketFreeQ.Container); 109 } /* while */ 110 111 pDevice->rxout = rxout; 112 MM_WMB(); 113 114 REG_WR(pDevice, dmaregs.rcvptr, rxout * sizeof(dmadd_t)); 115 return LM_STATUS_SUCCESS; 116} /* LM_QueueRxPackets */ 117 118 119/******************************************************************************/ 120/* Description: */ 121/* */ 122/* Return: */ 123/******************************************************************************/ 124STATIC LM_STATUS 125b44_LM_EepromReadBlock(PLM_DEVICE_BLOCK pDevice, 126 LM_UINT32 offset, LM_UINT32 *pData, LM_UINT32 size) 127{ 128 int off, nw; 129// LM_UINT8 chk8; 130 int i; 131 LM_UINT32 *buf; 132 133 off = offset; 134 nw = ROUNDUP(size, 4); 135 buf = (LM_UINT32 *) pData; 136 137 /* read the sprom */ 138 for (i = 0; i < nw; i += 4) 139 buf[i/4] = REG_RD_OFFSET(pDevice, 4096 + off + i); 140 141 return LM_STATUS_SUCCESS; 142} /* b44_LM_EepromRead */ 143 144 145/******************************************************************************/ 146/* Description: */ 147/* This routine initializes default parameters and reads the PCI */ 148/* configurations. */ 149/* */ 150/* Return: */ 151/* LM_STATUS_SUCCESS */ 152/******************************************************************************/ 153LM_STATUS 154b44_LM_GetAdapterInfo( 155PLM_DEVICE_BLOCK pDevice) 156{ 157 LM_UINT32 eprom_dw[32]; 158 LM_UINT8 *eprom = (LM_UINT8 *) eprom_dw; 159 LM_STATUS Status; 160 LM_UINT32 Value32; 161 162 /* Get Device Id and Vendor Id */ 163 Status = b44_MM_ReadConfig32(pDevice, PCI_VENDOR_ID_REG, &Value32); 164 if(Status != LM_STATUS_SUCCESS) 165 { 166 return Status; 167 } 168 pDevice->PciVendorId = (LM_UINT16) Value32; 169 pDevice->PciDeviceId = (LM_UINT16) (Value32 >> 16); 170 171 Status = b44_MM_ReadConfig32(pDevice, PCI_REV_ID_REG, &Value32); 172 if(Status != LM_STATUS_SUCCESS) 173 { 174 return Status; 175 } 176 pDevice->PciRevId = (LM_UINT8) Value32; 177 178 /* Get subsystem vendor. */ 179 Status = b44_MM_ReadConfig32(pDevice, PCI_SUBSYSTEM_VENDOR_ID_REG, &Value32); 180 if(Status != LM_STATUS_SUCCESS) 181 { 182 return Status; 183 } 184 pDevice->PciSubvendorId = (LM_UINT16) Value32; 185 186 /* Get PCI subsystem id. */ 187 pDevice->PciSubsystemId = (LM_UINT16) (Value32 >> 16); 188 189 Status = b44_MM_MapMemBase(pDevice); 190 if(Status != LM_STATUS_SUCCESS) 191 { 192 return Status; 193 } 194 /* Initialize the memory view pointer. */ 195 pDevice->pMemView = (bcmenetregs_t *) pDevice->pMappedMemBase; 196 197 b44_LM_EepromReadBlock(pDevice, 0, eprom_dw, sizeof(eprom_dw)); 198 199 /* check sprom version */ 200 if ((eprom[126] != 1) && (eprom[126] != 0x10)) 201 return LM_STATUS_FAILURE; 202 203 pDevice->PermanentNodeAddress[0] = eprom[79]; 204 pDevice->PermanentNodeAddress[1] = eprom[78]; 205 pDevice->PermanentNodeAddress[2] = eprom[81]; 206 pDevice->PermanentNodeAddress[3] = eprom[80]; 207 pDevice->PermanentNodeAddress[4] = eprom[83]; 208 pDevice->PermanentNodeAddress[5] = eprom[82]; 209 210 memcpy(pDevice->NodeAddress, pDevice->PermanentNodeAddress, 6); 211 212 pDevice->PhyAddr = eprom[90] & 0x1f; 213 pDevice->MdcPort = (eprom[90] >> 14) & 0x1; 214 215 /* Initialize the default values. */ 216 pDevice->TxPacketDescCnt = DEFAULT_TX_PACKET_DESC_COUNT; 217 pDevice->RxPacketDescCnt = DEFAULT_RX_PACKET_DESC_COUNT; 218 pDevice->MaxRxPacketDescCnt = DMAMAXRINGSZ / sizeof(dmadd_t); 219 pDevice->MaxTxPacketDescCnt = DMAMAXRINGSZ / sizeof(dmadd_t); 220 pDevice->rxoffset = 30; 221 pDevice->lazyrxfc = 1; 222 pDevice->lazyrxmult = 0; 223 pDevice->lazytxfc = 0; 224 pDevice->lazytxmult = 0; 225 pDevice->intmask = DEF_INTMASK; 226 pDevice->LinkStatus = LM_STATUS_LINK_DOWN; 227 228#ifdef BCM_WOL 229 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE; 230#endif 231 232 /* Change driver parameters. */ 233 Status = b44_MM_GetConfig(pDevice); 234 if(Status != LM_STATUS_SUCCESS) 235 { 236 return Status; 237 } 238 239#if 0 240 /* Calling SetupPhy will cause target aborts if the chip has not */ 241 /* been reset */ 242 b44_LM_SetupPhy(pDevice); 243#endif 244 ASSERT(b44_LM_sb_coreid(pDevice) == SB_ENET); 245 246 pDevice->corerev = b44_LM_sb_corerev(pDevice); 247 248 pDevice->sbmap = bcm4402; 249 250 pDevice->coreunit = b44_LM_sb_coreunit(pDevice); 251 252 ASSERT((pDevice->coreunit == 0) || (pDevice->coreunit == 1)); 253 254 /* supports link change interrupt */ 255 if (pDevice->corerev >= 7) 256 pDevice->intmask |= I_LS; 257 258 return LM_STATUS_SUCCESS; 259} /* LM_GetAdapterInfo */ 260 261 262/******************************************************************************/ 263/* Description: */ 264/* This routine sets up receive/transmit buffer descriptions queues. */ 265/* */ 266/* Return: */ 267/* LM_STATUS_SUCCESS */ 268/******************************************************************************/ 269LM_STATUS 270b44_LM_InitializeAdapter(PLM_DEVICE_BLOCK pDevice) 271{ 272 LM_PHYSICAL_ADDRESS MemPhy, MemBasePhy; 273 LM_UINT8 *pMemVirt, *pMemBase; 274 PLM_PACKET pPacket; 275 LM_STATUS Status; 276 LM_UINT32 Size; 277 LM_UINT32 align, j; 278 279 /* Intialize the queues. */ 280 QQ_InitQueue(&pDevice->RxPacketReceivedQ.Container, 281 MAX_RX_PACKET_DESC_COUNT); 282 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container, 283 MAX_RX_PACKET_DESC_COUNT); 284 285 QQ_InitQueue(&pDevice->TxPacketFreeQ.Container,MAX_TX_PACKET_DESC_COUNT); 286 QQ_InitQueue(&pDevice->TxPacketXmittedQ.Container,MAX_TX_PACKET_DESC_COUNT); 287 288 /* Allocate memory for packet descriptors. */ 289 Size = (pDevice->RxPacketDescCnt + 290 pDevice->TxPacketDescCnt) * B44_MM_PACKET_DESC_SIZE; 291 Status = b44_MM_AllocateMemory(pDevice, Size, (PLM_VOID *) &pPacket); 292 if(Status != LM_STATUS_SUCCESS) 293 { 294 return Status; 295 } 296 297 for(j = 0; j < pDevice->TxPacketDescCnt; j++) 298 { 299 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket); 300 301 pPacket = (PLM_PACKET) ((PLM_UINT8) pPacket + B44_MM_PACKET_DESC_SIZE); 302 } /* for(j.. */ 303 304 for(j = 0; j < pDevice->RxPacketDescCnt; j++) 305 { 306 /* Receive buffer size. */ 307 pPacket->u.Rx.RxBufferSize = 1522 + pDevice->rxoffset; 308 309 /* Add the descriptor to RxPacketFreeQ. */ 310 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket); 311 312 pPacket = (PLM_PACKET) ((PLM_UINT8) pPacket + B44_MM_PACKET_DESC_SIZE); 313 } /* for */ 314 315 /* Initialize the rest of the packet descriptors. */ 316 Status = b44_MM_InitializeUmPackets(pDevice); 317 if(Status != LM_STATUS_SUCCESS) 318 { 319 return Status; 320 } /* if */ 321 322 /* Make the Tx ring size power of 2 */ 323 pDevice->MaxTxPacketDescCnt = DMAMAXRINGSZ / sizeof(dmadd_t); 324 while ((pDevice->MaxTxPacketDescCnt >> 1) > pDevice->TxPacketDescCnt) 325 pDevice->MaxTxPacketDescCnt >>= 1; 326 327 Size = (pDevice->MaxRxPacketDescCnt + pDevice->MaxTxPacketDescCnt) * 328 sizeof(dmadd_t) + DMARINGALIGN; 329 330 Status = b44_MM_AllocateSharedMemory(pDevice, Size, (PLM_VOID) &pMemBase, &MemBasePhy); 331 if(Status != LM_STATUS_SUCCESS) 332 { 333 return Status; 334 } 335 336 MemPhy = (MemBasePhy + (DMARINGALIGN - 1)) & ~(DMARINGALIGN - 1); 337 align = MemPhy - MemBasePhy; 338 pMemVirt = pMemBase + align; 339 340 pDevice->pRxDesc = (dmadd_t *) pMemVirt; 341 pDevice->RxDescPhy = MemPhy; 342 343 pMemVirt += pDevice->MaxRxPacketDescCnt * sizeof(dmadd_t); 344 MemPhy += pDevice->MaxRxPacketDescCnt * sizeof(dmadd_t); 345 346 pDevice->pTxDesc = (dmadd_t *) pMemVirt; 347 pDevice->TxDescPhy = MemPhy; 348 349 /* Initialize the hardware. */ 350 Status = b44_LM_ResetAdapter(pDevice, TRUE); 351 if(Status != LM_STATUS_SUCCESS) 352 { 353 return Status; 354 } 355 356 /* We are done with initialization. */ 357 pDevice->InitDone = TRUE; 358 359 return LM_STATUS_SUCCESS; 360} /* LM_InitializeAdapter */ 361 362 363LM_STATUS 364b44_LM_DisableChip(PLM_DEVICE_BLOCK pDevice) 365{ 366 367 /* disable emac */ 368 REG_WR(pDevice, enetcontrol, EC_ED); 369 SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ED), 200); 370 371 REG_WR(pDevice, dmaregs.xmtcontrol, 0); 372 REG_WR(pDevice, dmaregs.rcvcontrol, 0); 373 b44_MM_Wait(10); 374 375 return LM_STATUS_SUCCESS; 376} 377 378/******************************************************************************/ 379/* Description: */ 380/* This function reinitializes the adapter. */ 381/* */ 382/* Return: */ 383/* LM_STATUS_SUCCESS */ 384/******************************************************************************/ 385LM_STATUS 386b44_LM_ResetAdapter(PLM_DEVICE_BLOCK pDevice, LM_BOOL full) 387{ 388 389 /* Disable interrupt. */ 390 if (pDevice->InitDone) 391 { 392 b44_LM_DisableInterrupt(pDevice); 393 } 394 395 /* Disable transmit and receive DMA engines. Abort all pending requests. */ 396 b44_LM_Abort(pDevice); 397 398 pDevice->ShuttingDown = FALSE; 399 400 /* enable pci interrupts, bursts, and prefetch */ 401 pDevice->pcirev = b44_LM_sb_pci_setup(pDevice, 402 ((pDevice->coreunit == 0)? SBIV_ENET0: SBIV_ENET1)); 403 404 pDevice->ddoffset = pDevice->dataoffset = 405 b44_LM_getsbaddr(pDevice, SBID_PCI_DMA, 0); 406 407 b44_LM_ResetChip(pDevice); 408 409#if 1 410 if (pDevice->InitDone != TRUE) { 411 if (pDevice->MdcPort == pDevice->coreunit) { 412 b44_LM_ResetPhy(pDevice); 413 b44_LM_SetupPhy(pDevice); 414 } 415 } 416#endif 417 418 b44_LM_SetMacAddress(pDevice, pDevice->NodeAddress); 419 420 /* enable crc32 generation and set proper LED modes */ 421 REG_WR(pDevice, emaccontrol, EMC_CG | (0x7 << EMC_LC_SHIFT)); 422 423 REG_WR(pDevice, intrecvlazy, (pDevice->lazyrxfc << IRL_FC_SHIFT)); 424 if (pDevice->lazyrxfc > 1) 425 { 426 REG_OR(pDevice, intrecvlazy, (pDevice->lazyrxmult * pDevice->lazyrxfc)); 427 } 428 429 /* enable 802.3x tx flow control (honor received PAUSE frames) */ 430// REG_WR(pDevice, rxconfig, ERC_FE | ERC_UF); 431 432 b44_LM_SetReceiveMask(pDevice, pDevice->ReceiveMask); 433 434 /* set max frame lengths - account for possible vlan tag */ 435 REG_WR(pDevice, rxmaxlength, MAX_ETHERNET_PACKET_SIZE + 32); 436 REG_WR(pDevice, txmaxlength, MAX_ETHERNET_PACKET_SIZE + 32); 437 438 /* set tx watermark */ 439 REG_WR(pDevice, txwatermark, 56); 440 441 if (full) 442 { 443 /* initialize the tx and rx dma channels */ 444 /* clear tx descriptor ring */ 445 memset((void*)pDevice->pTxDesc, 0, (pDevice->MaxTxPacketDescCnt * 446 sizeof(dmadd_t))); 447 448 REG_WR(pDevice, dmaregs.xmtcontrol, XC_XE); 449 REG_WR(pDevice, dmaregs.xmtaddr, (pDevice->TxDescPhy + 450 pDevice->ddoffset)); 451 452 /* clear rx descriptor ring */ 453 memset((void*)pDevice->pRxDesc, 0, (pDevice->MaxRxPacketDescCnt * 454 sizeof(dmadd_t))); 455 456 REG_WR(pDevice, dmaregs.rcvcontrol, ((pDevice->rxoffset << 457 RC_RO_SHIFT) | RC_RE)); 458 459 REG_WR(pDevice, dmaregs.rcvaddr, (pDevice->RxDescPhy + 460 pDevice->ddoffset)); 461 462 /* Queue Rx packet buffers. */ 463 b44_LM_QueueRxPackets(pDevice); 464 465 MM_ATOMIC_SET(&pDevice->SendDescLeft, pDevice->TxPacketDescCnt - 1); 466 } 467 else 468 { 469 REG_WR(pDevice, dmaregs.rcvcontrol, ((pDevice->rxoffset << 470 RC_RO_SHIFT) | RC_RE)); 471 } 472 473 /* turn on the emac */ 474 REG_OR(pDevice, enetcontrol, EC_EE); 475 476 return LM_STATUS_SUCCESS; 477} /* LM_ResetAdapter */ 478 479 480/******************************************************************************/ 481/* Description: */ 482/* This routine disables the adapter from generating interrupts. */ 483/* */ 484/* Return: */ 485/* LM_STATUS_SUCCESS */ 486/******************************************************************************/ 487LM_STATUS 488b44_LM_DisableInterrupt( 489 PLM_DEVICE_BLOCK pDevice) 490{ 491 REG_WR(pDevice, intmask, 0); 492 (void) REG_RD(pDevice, intmask); /* sync readback */ 493 return LM_STATUS_SUCCESS; 494} 495 496 497 498/******************************************************************************/ 499/* Description: */ 500/* This routine enables the adapter to generate interrupts. */ 501/* */ 502/* Return: */ 503/* LM_STATUS_SUCCESS */ 504/******************************************************************************/ 505LM_STATUS 506b44_LM_EnableInterrupt( 507 PLM_DEVICE_BLOCK pDevice) 508{ 509 REG_WR(pDevice, intmask, pDevice->intmask); 510 return LM_STATUS_SUCCESS; 511} 512 513 514 515/******************************************************************************/ 516/* Description: */ 517/* This routine puts a packet on the wire if there is a transmit DMA */ 518/* descriptor available; otherwise the packet is queued for later */ 519/* transmission. If the second argue is NULL, this routine will put */ 520/* the queued packet on the wire if possible. */ 521/* */ 522/* Return: */ 523/* LM_STATUS_SUCCESS */ 524/******************************************************************************/ 525LM_STATUS 526b44_LM_SendPacket(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket) 527{ 528 LM_UINT32 fragcount; 529 LM_UINT32 len, txout, ctrl; 530 LM_PHYSICAL_ADDRESS pa; 531 int first, next; 532 533 txout = pDevice->txout; 534 535 pDevice->TxPacketArr[txout] = pPacket; 536 for(fragcount = 0, first = 1, next = 1; next; 537 first = 0, fragcount++) { 538 539 ctrl = 0; 540 b44_MM_MapTxDma(pDevice, pPacket, &pa, &len, fragcount); 541 ctrl = len & CTRL_BC_MASK; 542 543 if (first) 544 ctrl |= CTRL_SOF; 545 if (fragcount == (pPacket->u.Tx.FragCount - 1)) { 546 ctrl |= CTRL_EOF; 547 next = 0; 548 } 549 if (txout == (pDevice->MaxTxPacketDescCnt - 1)) { 550 ctrl |= CTRL_EOT; 551 } 552 ctrl |= CTRL_IOC; 553 554 /* init the tx descriptor */ 555 pDevice->pTxDesc[txout].ctrl = ctrl; 556 pDevice->pTxDesc[txout].addr = pa + pDevice->dataoffset; 557 558 txout = (txout + 1) & (pDevice->MaxTxPacketDescCnt - 1); 559 560 } 561 562 MM_ATOMIC_SUB(&pDevice->SendDescLeft, pPacket->u.Tx.FragCount); 563 564 pDevice->txout = txout; 565 566 MM_WMB(); 567 568 REG_WR(pDevice, dmaregs.xmtptr, (txout * sizeof(dmadd_t))); 569 570 return LM_STATUS_SUCCESS; 571} 572 573 574/******************************************************************************/ 575/* Description: */ 576/* This routine sets the receive control register according to ReceiveMask */ 577/* */ 578/* Return: */ 579/* LM_STATUS_SUCCESS */ 580/******************************************************************************/ 581LM_STATUS 582b44_LM_SetReceiveMask(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Mask) 583{ 584 LM_UINT32 ReceiveMask; 585 LM_UINT32 j; 586 LM_UINT32 idx = 0; 587 LM_UINT8 zero[6] = {0,0,0,0,0,0}; 588 589 ReceiveMask = Mask; 590 591 if(Mask & LM_ACCEPT_UNICAST) 592 { 593 Mask &= ~LM_ACCEPT_UNICAST; 594 } 595 596 if(Mask & LM_ACCEPT_MULTICAST) 597 { 598 Mask &= ~LM_ACCEPT_MULTICAST; 599 } 600 601 if(Mask & LM_ACCEPT_ALL_MULTICAST) 602 { 603 Mask &= ~LM_ACCEPT_ALL_MULTICAST; 604 } 605 606 if(Mask & LM_ACCEPT_BROADCAST) 607 { 608 Mask &= ~LM_ACCEPT_BROADCAST; 609 } 610 611 if(Mask & LM_PROMISCUOUS_MODE) 612 { 613 Mask &= ~LM_PROMISCUOUS_MODE; 614 } 615 616 /* Make sure all the bits are valid before committing changes. */ 617 if(Mask) 618 { 619 return LM_STATUS_FAILURE; 620 } 621 622 if (ReceiveMask & LM_PROMISCUOUS_MODE) 623 REG_OR(pDevice, rxconfig, ERC_PE); 624 else { 625 REG_WR(pDevice, camcontrol, 0); 626 /* our local address */ 627 b44_LM_WriteCam(pDevice, pDevice->NodeAddress, idx++); 628 629 /* allmulti or a list of discrete multicast addresses */ 630 if (ReceiveMask & LM_ACCEPT_ALL_MULTICAST) 631 REG_OR(pDevice, rxconfig, ERC_AM); 632 else if (ReceiveMask & LM_ACCEPT_MULTICAST) { 633 for(j = 0; j < pDevice->McEntryCount; j++) { 634 b44_LM_WriteCam(pDevice, pDevice->McTable[j], 635 idx++); 636 } 637 } 638 639 for (; idx < 64; idx++) { 640 b44_LM_WriteCam(pDevice, zero, idx); 641 } 642 643 /* enable cam */ 644 REG_OR(pDevice, camcontrol, CC_CE); 645 } 646 647 return LM_STATUS_SUCCESS; 648} /* LM_SetReceiveMask */ 649 650 651 652/******************************************************************************/ 653/* Description: */ 654/* Disable the interrupt and put the transmitter and receiver engines in */ 655/* an idle state. Also aborts all pending send requests and receive */ 656/* buffers. */ 657/* */ 658/* Return: */ 659/* LM_STATUS_SUCCESS */ 660/******************************************************************************/ 661LM_STATUS 662b44_LM_Abort( 663PLM_DEVICE_BLOCK pDevice) 664{ 665 PLM_PACKET pPacket; 666 LM_UINT32 rxin, txin, txdmask, txcurr; 667 668 if (!pDevice->InitDone) 669 { 670 return LM_STATUS_SUCCESS; 671 } 672 673 b44_LM_DisableInterrupt(pDevice); 674 675 txcurr = (REG_RD(pDevice, dmaregs.xmtstatus) & XS_CD_MASK); 676 txcurr = txcurr / sizeof(dmadd_t); 677 /* Allow tx packets to drain */ 678 if (pDevice->txout != txcurr) 679 { 680 b44_MM_Wait(20); 681 } 682 REG_WR(pDevice, dmaregs.xmtcontrol, 0); 683 b44_MM_Wait(120); 684 685 b44_LM_DisableChip(pDevice); 686 687 txdmask = pDevice->MaxTxPacketDescCnt - 1; 688 for (txin = pDevice->txin; txin != pDevice->txout; 689 txin = (txin + 1) & txdmask) 690 { 691 if ((pPacket = pDevice->TxPacketArr[txin])) { 692 QQ_PushTail(&pDevice->TxPacketXmittedQ.Container, pPacket); 693 pDevice->TxPacketArr[txin] = 0; 694 } 695 } 696 697 if(!pDevice->ShuttingDown) 698 { 699 /* Indicate packets to the protocol. */ 700 b44_MM_IndicateTxPackets(pDevice); 701 702 /* Indicate received packets to the protocols. */ 703 b44_MM_IndicateRxPackets(pDevice); 704 } 705 else 706 { 707 /* Move the receive packet descriptors in the ReceivedQ to the */ 708 /* free queue. */ 709 for(; ;) 710 { 711 pPacket = (PLM_PACKET) QQ_PopHead( 712 &pDevice->RxPacketReceivedQ.Container); 713 if(pPacket == NULL) 714 { 715 break; 716 } 717 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket); 718 } 719 } 720 721 /* Clean up the Receive desc ring. */ 722 723 rxin = pDevice->rxin; 724 while(rxin != pDevice->rxout) { 725 pPacket = pDevice->RxPacketArr[rxin]; 726 727 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket); 728 729 rxin = (rxin + 1) & (pDevice->MaxRxPacketDescCnt - 1); 730 } /* while */ 731 732 pDevice->rxin = rxin; 733 return LM_STATUS_SUCCESS; 734} /* LM_Abort */ 735 736 737 738/******************************************************************************/ 739/* Description: */ 740/* Disable the interrupt and put the transmitter and receiver engines in */ 741/* an idle state. Aborts all pending send requests and receive buffers. */ 742/* Also free all the receive buffers. */ 743/* */ 744/* Return: */ 745/* LM_STATUS_SUCCESS */ 746/******************************************************************************/ 747LM_STATUS 748b44_LM_Halt(PLM_DEVICE_BLOCK pDevice) 749{ 750 PLM_PACKET pPacket; 751 LM_UINT32 EntryCnt; 752 753 b44_LM_Abort(pDevice); 754 755 /* Get the number of entries in the queue. */ 756 EntryCnt = QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container); 757 758 /* Make sure all the packets have been accounted for. */ 759 for(EntryCnt = 0; EntryCnt < pDevice->RxPacketDescCnt; EntryCnt++) 760 { 761 pPacket = (PLM_PACKET) QQ_PopHead(&pDevice->RxPacketFreeQ.Container); 762 if (pPacket == 0) 763 break; 764 765 b44_MM_FreeRxBuffer(pDevice, pPacket); 766 767 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket); 768 } 769 770 b44_LM_ResetChip(pDevice); 771 772 /* Reprogram the MAC address. */ 773 b44_LM_SetMacAddress(pDevice, pDevice->NodeAddress); 774 775 return LM_STATUS_SUCCESS; 776} /* LM_Halt */ 777 778 779STATIC LM_STATUS 780b44_LM_ResetChip(LM_DEVICE_BLOCK *pDevice) 781{ 782 if (!b44_LM_sb_iscoreup(pDevice)) { 783 b44_LM_sb_pci_setup(pDevice, 784 ((pDevice->coreunit == 0)? SBIV_ENET0: SBIV_ENET1)); 785 /* power on reset: reset the enet core */ 786 b44_LM_sb_core_reset(pDevice); 787 788 goto chipinreset; 789 } 790 791 /* read counters before resetting the chip */ 792 if (pDevice->mibgood) 793 b44_LM_StatsUpdate(pDevice); 794 795 REG_WR(pDevice, intrecvlazy, 0); 796 797 /* disable emac */ 798 REG_WR(pDevice, enetcontrol, EC_ED); 799 SPINWAIT((REG_RD(pDevice, enetcontrol) & EC_ED), 200); 800 801 /* reset the dma engines */ 802 REG_WR(pDevice, dmaregs.xmtcontrol, 0); 803 pDevice->txin = pDevice->txout = 0; 804 805 if (REG_RD(pDevice, dmaregs.rcvstatus) & RS_RE_MASK) { 806 /* wait until channel is idle or stopped */ 807 SPINWAIT(!(REG_RD(pDevice, dmaregs.rcvstatus) & RS_RS_IDLE), 808 100); 809 } 810 811 REG_WR(pDevice, dmaregs.rcvcontrol, 0); 812 pDevice->rxin = pDevice->rxout = 0; 813 814 REG_WR(pDevice, enetcontrol, EC_ES); 815 816 b44_LM_sb_core_reset(pDevice); 817 818chipinreset: 819 if (pDevice->InitDone == FALSE) 820 b44_LM_ClearStats(pDevice); 821 822 /* 823 * We want the phy registers to be accessible even when 824 * the driver is "downed" so initialize MDC preamble, frequency, 825 * and whether internal or external phy here. 826 */ 827 /* 4402 has 62.5Mhz SB clock and internal phy */ 828 REG_WR(pDevice, mdiocontrol, 0x8d); 829 830 /* some chips have internal phy, some don't */ 831 if (!(REG_RD(pDevice, devcontrol) & DC_IP)) { 832 REG_WR(pDevice, enetcontrol, EC_EP); 833 } else if (REG_RD(pDevice, devcontrol) & DC_ER) { 834 REG_AND(pDevice, devcontrol, ~DC_ER); 835 836 b44_MM_Wait(100); 837 } 838 839 /* clear persistent sw intstatus */ 840 pDevice->intstatus = 0; 841 return LM_STATUS_SUCCESS; 842} 843 844void 845b44_LM_ClearStats(LM_DEVICE_BLOCK *pDevice) 846{ 847 /* must clear mib registers by hand */ 848 REG_WR(pDevice, mibcontrol, EMC_RZ); 849 (void) REG_RD(pDevice, mib.tx_good_octets); 850 (void) REG_RD(pDevice, mib.tx_good_pkts); 851 (void) REG_RD(pDevice, mib.tx_octets); 852 (void) REG_RD(pDevice, mib.tx_pkts); 853 (void) REG_RD(pDevice, mib.tx_broadcast_pkts); 854 (void) REG_RD(pDevice, mib.tx_multicast_pkts); 855 (void) REG_RD(pDevice, mib.tx_len_64); 856 (void) REG_RD(pDevice, mib.tx_len_65_to_127); 857 (void) REG_RD(pDevice, mib.tx_len_128_to_255); 858 (void) REG_RD(pDevice, mib.tx_len_256_to_511); 859 (void) REG_RD(pDevice, mib.tx_len_512_to_1023); 860 (void) REG_RD(pDevice, mib.tx_len_1024_to_max); 861 (void) REG_RD(pDevice, mib.tx_jabber_pkts); 862 (void) REG_RD(pDevice, mib.tx_oversize_pkts); 863 (void) REG_RD(pDevice, mib.tx_fragment_pkts); 864 (void) REG_RD(pDevice, mib.tx_underruns); 865 (void) REG_RD(pDevice, mib.tx_total_cols); 866 (void) REG_RD(pDevice, mib.tx_single_cols); 867 (void) REG_RD(pDevice, mib.tx_multiple_cols); 868 (void) REG_RD(pDevice, mib.tx_excessive_cols); 869 (void) REG_RD(pDevice, mib.tx_late_cols); 870 (void) REG_RD(pDevice, mib.tx_defered); 871 (void) REG_RD(pDevice, mib.tx_carrier_lost); 872 (void) REG_RD(pDevice, mib.tx_pause_pkts); 873 (void) REG_RD(pDevice, mib.rx_good_octets); 874 (void) REG_RD(pDevice, mib.rx_good_pkts); 875 (void) REG_RD(pDevice, mib.rx_octets); 876 (void) REG_RD(pDevice, mib.rx_pkts); 877 (void) REG_RD(pDevice, mib.rx_broadcast_pkts); 878 (void) REG_RD(pDevice, mib.rx_multicast_pkts); 879 (void) REG_RD(pDevice, mib.rx_len_64); 880 (void) REG_RD(pDevice, mib.rx_len_65_to_127); 881 (void) REG_RD(pDevice, mib.rx_len_128_to_255); 882 (void) REG_RD(pDevice, mib.rx_len_256_to_511); 883 (void) REG_RD(pDevice, mib.rx_len_512_to_1023); 884 (void) REG_RD(pDevice, mib.rx_len_1024_to_max); 885 (void) REG_RD(pDevice, mib.rx_jabber_pkts); 886 (void) REG_RD(pDevice, mib.rx_oversize_pkts); 887 (void) REG_RD(pDevice, mib.rx_fragment_pkts); 888 (void) REG_RD(pDevice, mib.rx_missed_pkts); 889 (void) REG_RD(pDevice, mib.rx_crc_align_errs); 890 (void) REG_RD(pDevice, mib.rx_undersize); 891 (void) REG_RD(pDevice, mib.rx_crc_errs); 892 (void) REG_RD(pDevice, mib.rx_align_errs); 893 (void) REG_RD(pDevice, mib.rx_symbol_errs); 894 (void) REG_RD(pDevice, mib.rx_pause_pkts); 895 (void) REG_RD(pDevice, mib.rx_nonpause_pkts); 896 pDevice->mibgood = TRUE; 897} 898 899#ifdef BCM_NAPI_RXPOLL 900int 901b44_LM_ServiceRxPoll(PLM_DEVICE_BLOCK pDevice, int limit) 902{ 903 LM_UINT32 rxin, curr, rxdmask; 904 unsigned int len; 905 int skiplen = 0; 906 bcmenetrxh_t *rxh; 907 LM_PACKET *pPacket; 908 int received = 0; 909 910 curr = (REG_RD(pDevice, dmaregs.rcvstatus) & RS_CD_MASK); 911 curr = curr / sizeof(dmadd_t); 912 rxdmask = pDevice->MaxRxPacketDescCnt - 1; 913 for (rxin = pDevice->rxin; rxin != curr; rxin = (rxin + 1) & rxdmask) 914 { 915 pPacket = pDevice->RxPacketArr[rxin]; 916 if (skiplen > 0) { 917 pPacket->PacketStatus = LM_STATUS_FAILURE; 918 skiplen -= pPacket->u.Rx.RxBufferSize; 919 if (skiplen < 0) 920 skiplen = 0; 921 goto rx_err; 922 } 923 rxh = (bcmenetrxh_t *) pPacket->u.Rx.pRxBufferVirt; 924 len = MM_SWAP_LE16(rxh->len); 925 if (len > (pPacket->u.Rx.RxBufferSize - pDevice->rxoffset)) { 926 pPacket->PacketStatus = LM_STATUS_FAILURE; 927 skiplen = len - (pPacket->u.Rx.RxBufferSize - 928 pDevice->rxoffset); 929 } 930 else { 931 int i = 0; 932 933 if (len == 0) { 934 while ((len == 0) && (i < 5)) { 935 b44_MM_Wait(2); 936 len = MM_SWAP_LE16(rxh->len); 937 i++; 938 } 939 if (len == 0) { 940 pPacket->PacketStatus = 941 LM_STATUS_FAILURE; 942 goto rx_err; 943 } 944 } 945 if (MM_SWAP_LE16(rxh->flags) & RXF_ERRORS) { 946 pPacket->PacketStatus = LM_STATUS_FAILURE; 947 } 948 else { 949 pPacket->PacketStatus = LM_STATUS_SUCCESS; 950 } 951 pPacket->PacketSize = len - 4; 952 } 953rx_err: 954 QQ_PushTail(&pDevice->RxPacketReceivedQ.Container, 955 pPacket); 956 957 if (++received >= limit) 958 { 959 rxin = (rxin + 1) & rxdmask; 960 break; 961 } 962 curr = (REG_RD(pDevice, dmaregs.rcvstatus) & RS_CD_MASK) / 963 sizeof(dmadd_t); 964 } 965 pDevice->rxin = rxin; 966 return received; 967} 968#endif 969 970void 971b44_LM_ServiceRxInterrupt(LM_DEVICE_BLOCK *pDevice) 972{ 973 LM_UINT32 curr; 974#ifndef BCM_NAPI_RXPOLL 975 LM_UINT32 rxin, rxdmask; 976 unsigned int len; 977 int skiplen = 0; 978 bcmenetrxh_t *rxh; 979 LM_PACKET *pPacket; 980#endif 981 982 curr = (REG_RD(pDevice, dmaregs.rcvstatus) & RS_CD_MASK); 983 curr = curr / sizeof(dmadd_t); 984#ifdef BCM_NAPI_RXPOLL 985 if (!pDevice->RxPoll) 986 { 987 if (pDevice->rxin != curr) 988 { 989 if (b44_MM_ScheduleRxPoll(pDevice) == LM_STATUS_SUCCESS) 990 { 991 pDevice->RxPoll = TRUE; 992 pDevice->intmask &= ~(I_RI | I_RU | I_RO); 993 REG_WR(pDevice, intmask, pDevice->intmask); 994 } 995 } 996 } 997#else 998 rxdmask = pDevice->MaxRxPacketDescCnt - 1; 999 for (rxin = pDevice->rxin; rxin != curr; rxin = (rxin + 1) & rxdmask) 1000 { 1001 pPacket = pDevice->RxPacketArr[rxin]; 1002 if (skiplen > 0) { 1003 pPacket->PacketStatus = LM_STATUS_FAILURE; 1004 skiplen -= pPacket->u.Rx.RxBufferSize; 1005 if (skiplen < 0) 1006 skiplen = 0; 1007 goto rx_err; 1008 } 1009 rxh = (bcmenetrxh_t *) pPacket->u.Rx.pRxBufferVirt; 1010 len = MM_SWAP_LE16(rxh->len); 1011 if (len > (pPacket->u.Rx.RxBufferSize - pDevice->rxoffset)) { 1012 pPacket->PacketStatus = LM_STATUS_FAILURE; 1013 skiplen = len - (pPacket->u.Rx.RxBufferSize - 1014 pDevice->rxoffset); 1015 } 1016 else { 1017 int i = 0; 1018 1019 if (len == 0) { 1020 while ((len == 0) && (i < 5)) { 1021 b44_MM_Wait(2); 1022 len = MM_SWAP_LE16(rxh->len); 1023 i++; 1024 } 1025 if (len == 0) { 1026 pPacket->PacketStatus = 1027 LM_STATUS_FAILURE; 1028 goto rx_err; 1029 } 1030 } 1031 if (MM_SWAP_LE16(rxh->flags) & RXF_ERRORS) { 1032 pPacket->PacketStatus = LM_STATUS_FAILURE; 1033 } 1034 else { 1035 pPacket->PacketStatus = LM_STATUS_SUCCESS; 1036 } 1037 pPacket->PacketSize = len - 4; 1038 } 1039rx_err: 1040 QQ_PushTail(&pDevice->RxPacketReceivedQ.Container, 1041 pPacket); 1042 curr = (REG_RD(pDevice, dmaregs.rcvstatus) & RS_CD_MASK) / 1043 sizeof(dmadd_t); 1044 } 1045 pDevice->rxin = curr; 1046#endif 1047} 1048 1049void 1050b44_LM_ServiceTxInterrupt(LM_DEVICE_BLOCK *pDevice) 1051{ 1052 LM_UINT32 txin, curr, txdmask; 1053 LM_PACKET *pPacket; 1054 1055 curr = (REG_RD(pDevice, dmaregs.xmtstatus) & XS_CD_MASK); 1056 curr = curr / sizeof(dmadd_t); 1057 txdmask = pDevice->MaxTxPacketDescCnt - 1; 1058 for (txin = pDevice->txin; txin != curr; txin = (txin + 1) & txdmask) 1059 { 1060 if ((pPacket = pDevice->TxPacketArr[txin])) { 1061 QQ_PushTail(&pDevice->TxPacketXmittedQ.Container, 1062 pPacket); 1063 pDevice->TxPacketArr[txin] = 0; 1064 MM_ATOMIC_ADD(&pDevice->SendDescLeft, 1065 pPacket->u.Tx.FragCount); 1066 } 1067 } 1068 pDevice->txin = curr; 1069} 1070 1071/******************************************************************************/ 1072/* Description: */ 1073/* This is the interrupt event handler routine. It acknowledges all */ 1074/* pending interrupts and process all pending events. */ 1075/* */ 1076/* Return: */ 1077/* LM_STATUS_SUCCESS */ 1078/******************************************************************************/ 1079LM_STATUS 1080b44_LM_ServiceInterrupts(PLM_DEVICE_BLOCK pDevice) 1081{ 1082 LM_UINT32 intstatus, intmask; 1083 1084 while (1) { 1085 intstatus = REG_RD(pDevice, intstatus); 1086 intmask = REG_RD(pDevice, intmask); 1087 1088 /* defer unsolicited interrupts */ 1089 intstatus &= intmask; 1090 1091 /* if not for us */ 1092 if (intstatus == 0) 1093 return 12; 1094 1095 /* clear the interrupt */ 1096 REG_WR(pDevice, intstatus, intstatus); 1097 1098 if (intstatus & I_LS) { 1099 b44_LM_PollLink(pDevice); 1100 } 1101 if (intstatus & I_RI) { 1102 b44_LM_ServiceRxInterrupt(pDevice); 1103 } 1104 1105 if (intstatus & (I_XI | I_TO)) { 1106 b44_LM_ServiceTxInterrupt(pDevice); 1107 REG_WR(pDevice, gptimer, 0); 1108 } 1109 if (intstatus & I_ERRORS) { 1110#ifdef B44_DEBUG 1111 b44_reset_count++; 1112#endif 1113 pDevice->InReset = TRUE; 1114 b44_LM_ResetAdapter(pDevice, TRUE); 1115 pDevice->InReset = FALSE; 1116 b44_LM_EnableInterrupt(pDevice); 1117 } 1118#ifndef BCM_NAPI_RXPOLL 1119 if (!QQ_Empty(&pDevice->RxPacketReceivedQ.Container)) { 1120 b44_MM_IndicateRxPackets(pDevice); 1121 } 1122#endif 1123 if (!QQ_Empty(&pDevice->TxPacketXmittedQ.Container)) { 1124 b44_MM_IndicateTxPackets(pDevice); 1125 } 1126 } 1127 return LM_STATUS_SUCCESS; 1128} /* LM_ServiceInterrupts */ 1129 1130 1131 1132/******************************************************************************/ 1133/* Description: */ 1134/* */ 1135/* Return: */ 1136/******************************************************************************/ 1137LM_STATUS 1138b44_LM_MulticastAdd( 1139PLM_DEVICE_BLOCK pDevice, 1140PLM_UINT8 pMcAddress) { 1141 PLM_UINT8 pEntry; 1142 LM_UINT32 j; 1143 1144 pEntry = pDevice->McTable[0]; 1145 for(j = 0; j < pDevice->McEntryCount; j++) 1146 { 1147 if(IS_ETH_ADDRESS_EQUAL(pEntry, pMcAddress)) 1148 { 1149 /* Found a match, increment the instance count. */ 1150 pEntry[LM_MC_INSTANCE_COUNT_INDEX] += 1; 1151 1152 return LM_STATUS_SUCCESS; 1153 } 1154 1155 pEntry += LM_MC_ENTRY_SIZE; 1156 } 1157 1158 if(pDevice->McEntryCount >= LM_MAX_MC_TABLE_SIZE) 1159 { 1160 return LM_STATUS_FAILURE; 1161 } 1162 1163 pEntry = pDevice->McTable[pDevice->McEntryCount]; 1164 1165 COPY_ETH_ADDRESS(pMcAddress, pEntry); 1166 pEntry[LM_MC_INSTANCE_COUNT_INDEX] = 1; 1167 1168 pDevice->McEntryCount++; 1169 1170 b44_LM_SetReceiveMask(pDevice, pDevice->ReceiveMask | LM_ACCEPT_MULTICAST); 1171 1172 return LM_STATUS_SUCCESS; 1173} /* b44_LM_MulticastAdd */ 1174 1175 1176 1177/******************************************************************************/ 1178/* Description: */ 1179/* */ 1180/* Return: */ 1181/******************************************************************************/ 1182LM_STATUS 1183b44_LM_MulticastDel(PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pMcAddress) 1184{ 1185 PLM_UINT8 pEntry; 1186 LM_UINT32 j; 1187 1188 pEntry = pDevice->McTable[0]; 1189 for(j = 0; j < pDevice->McEntryCount; j++) 1190 { 1191 if(IS_ETH_ADDRESS_EQUAL(pEntry, pMcAddress)) 1192 { 1193 /* Found a match, decrement the instance count. */ 1194 pEntry[LM_MC_INSTANCE_COUNT_INDEX] -= 1; 1195 1196 /* No more instance left, remove the address from the table. */ 1197 /* Move the last entry in the table to the delete slot. */ 1198 if(pEntry[LM_MC_INSTANCE_COUNT_INDEX] == 0 && 1199 pDevice->McEntryCount > 1) 1200 { 1201 1202 COPY_ETH_ADDRESS( 1203 pDevice->McTable[pDevice->McEntryCount-1], pEntry); 1204 pEntry[LM_MC_INSTANCE_COUNT_INDEX] = 1205 pDevice->McTable[pDevice->McEntryCount-1] 1206 [LM_MC_INSTANCE_COUNT_INDEX]; 1207 } 1208 pDevice->McEntryCount--; 1209 1210 /* Update the receive mask if the table is empty. */ 1211 if(pDevice->McEntryCount == 0) 1212 { 1213 b44_LM_SetReceiveMask(pDevice, 1214 pDevice->ReceiveMask & ~LM_ACCEPT_MULTICAST); 1215 } 1216 1217 return LM_STATUS_SUCCESS; 1218 } 1219 1220 pEntry += LM_MC_ENTRY_SIZE; 1221 } 1222 1223 return LM_STATUS_FAILURE; 1224} /* b44_LM_MulticastDel */ 1225 1226 1227 1228/******************************************************************************/ 1229/* Description: */ 1230/* */ 1231/* Return: */ 1232/******************************************************************************/ 1233LM_STATUS 1234b44_LM_MulticastClear( 1235PLM_DEVICE_BLOCK pDevice) { 1236 pDevice->McEntryCount = 0; 1237 1238 b44_LM_SetReceiveMask(pDevice, pDevice->ReceiveMask & ~LM_ACCEPT_MULTICAST); 1239 1240 return LM_STATUS_SUCCESS; 1241} /* b44_LM_MulticastClear */ 1242 1243 1244 1245/******************************************************************************/ 1246/* Description: */ 1247/* */ 1248/* Return: */ 1249/******************************************************************************/ 1250LM_STATUS 1251b44_LM_SetMacAddress(PLM_DEVICE_BLOCK pDevice, PLM_UINT8 pMacAddress) 1252{ 1253 return LM_STATUS_SUCCESS; 1254} 1255 1256 1257/******************************************************************************/ 1258/* Description: */ 1259/* */ 1260/* Return: */ 1261/******************************************************************************/ 1262LM_STATUS 1263b44_LM_SetFlowControl( 1264 PLM_DEVICE_BLOCK pDevice, 1265 LM_UINT32 LocalPhyAd, 1266 LM_UINT32 RemotePhyAd) 1267{ 1268 LM_FLOW_CONTROL FlowCap; 1269 1270 /* Resolve flow control. */ 1271 FlowCap = LM_FLOW_CONTROL_NONE; 1272 1273 /* See Table 28B-3 of 802.3ab-1999 spec. */ 1274 if(pDevice->FlowControlCap & LM_FLOW_CONTROL_AUTO_PAUSE) 1275 { 1276 if(LocalPhyAd & PHY_AN_AD_PAUSE_CAPABLE) 1277 { 1278 if(LocalPhyAd & PHY_AN_AD_ASYM_PAUSE) 1279 { 1280 if(RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) 1281 { 1282 FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE | 1283 LM_FLOW_CONTROL_RECEIVE_PAUSE; 1284 } 1285 else if(RemotePhyAd & PHY_LINK_PARTNER_ASYM_PAUSE) 1286 { 1287 FlowCap = LM_FLOW_CONTROL_RECEIVE_PAUSE; 1288 } 1289 } 1290 else 1291 { 1292 if(RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) 1293 { 1294 FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE | 1295 LM_FLOW_CONTROL_RECEIVE_PAUSE; 1296 } 1297 } 1298 } 1299 else if(LocalPhyAd & PHY_AN_AD_ASYM_PAUSE) 1300 { 1301 if((RemotePhyAd & PHY_LINK_PARTNER_PAUSE_CAPABLE) && 1302 (RemotePhyAd & PHY_LINK_PARTNER_ASYM_PAUSE)) 1303 { 1304 FlowCap = LM_FLOW_CONTROL_TRANSMIT_PAUSE; 1305 } 1306 } 1307 } 1308 else 1309 { 1310 FlowCap = pDevice->FlowControlCap; 1311 } 1312 1313 /* Enable/disable rx PAUSE. */ 1314 if(FlowCap & LM_FLOW_CONTROL_RECEIVE_PAUSE && 1315 (pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE || 1316 pDevice->FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE)) 1317 { 1318 pDevice->FlowControl |= LM_FLOW_CONTROL_RECEIVE_PAUSE; 1319 REG_WR(pDevice, rxconfig, ERC_EF); 1320 1321 } 1322 1323 /* Enable/disable tx PAUSE. */ 1324 if(FlowCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE && 1325 (pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE || 1326 pDevice->FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE)) 1327 { 1328 pDevice->FlowControl |= LM_FLOW_CONTROL_TRANSMIT_PAUSE; 1329 REG_WR(pDevice, emacflowcontrol, EMF_PG | (0xc0 & EMF_RFH_MASK)); 1330 1331 } 1332 1333 return LM_STATUS_SUCCESS; 1334} 1335 1336 1337 1338/******************************************************************************/ 1339/* Description: */ 1340/* */ 1341/* Return: */ 1342/******************************************************************************/ 1343LM_STATUS 1344b44_LM_SetupPhy( 1345 PLM_DEVICE_BLOCK pDevice) 1346{ 1347 LM_UINT32 Value32; 1348 LM_UINT32 Adv, FCAdv, Ctrl, NewCtrl; 1349 int RestartAuto = 0; 1350 int i; 1351 1352 /* enable activity led */ 1353 b44_LM_ReadPhy(pDevice, 26, &Value32); 1354 b44_LM_WritePhy(pDevice, 26, Value32 & 0x7fff); 1355 1356 /* enable traffic meter led mode */ 1357 b44_LM_ReadPhy(pDevice, 27, &Value32); 1358 b44_LM_WritePhy(pDevice, 27, Value32 | (1 << 6)); 1359 if (!pDevice->DisableAutoNeg) { 1360 if (pDevice->RequestedLineSpeed == LM_LINE_SPEED_AUTO) { 1361 Adv = PHY_AN_AD_ALL_SPEEDS; 1362 } 1363 else if (pDevice->RequestedLineSpeed == LM_LINE_SPEED_10MBPS) { 1364 if (pDevice->RequestedDuplexMode == 1365 LM_DUPLEX_MODE_FULL) { 1366 Adv = PHY_AN_AD_10BASET_FULL; 1367 } 1368 else { 1369 Adv = PHY_AN_AD_10BASET_HALF; 1370 } 1371 } 1372 else if (pDevice->RequestedLineSpeed == LM_LINE_SPEED_100MBPS) { 1373 if (pDevice->RequestedDuplexMode == 1374 LM_DUPLEX_MODE_FULL) { 1375 Adv = PHY_AN_AD_100BASETX_FULL; 1376 } 1377 else { 1378 Adv = PHY_AN_AD_100BASETX_HALF; 1379 } 1380 } 1381 else { 1382 Adv = PHY_AN_AD_ALL_SPEEDS; 1383 } 1384 1385 if ((pDevice->RequestedLineSpeed == LM_LINE_SPEED_AUTO) || 1386 (pDevice->RequestedDuplexMode == LM_DUPLEX_MODE_FULL)) { 1387 FCAdv = b44_GetPhyAdFlowCntrlSettings(pDevice); 1388 Value32 &= PHY_AN_AD_ASYM_PAUSE | 1389 PHY_AN_AD_PAUSE_CAPABLE; 1390 if (FCAdv != Value32) { 1391 RestartAuto = 1; 1392 Adv |= FCAdv; 1393 goto restart_auto_neg; 1394 } 1395 } 1396 1397 b44_LM_ReadPhy(pDevice, PHY_CTRL_REG, &Ctrl); 1398 if (!(Ctrl & PHY_CTRL_AUTO_NEG_ENABLE)) { 1399 RestartAuto = 1; 1400 goto restart_auto_neg; 1401 } 1402 b44_LM_ReadPhy(pDevice, PHY_AN_AD_REG, &Value32); 1403 if ((Value32 & PHY_AN_AD_ALL_SPEEDS) != Adv) { 1404 RestartAuto = 1; 1405 } 1406restart_auto_neg: 1407 if (RestartAuto) { 1408 Adv |= PHY_AN_AD_PROTOCOL_802_3_CSMA_CD; 1409 b44_LM_WritePhy(pDevice, PHY_AN_AD_REG, Adv); 1410 b44_LM_WritePhy(pDevice, PHY_CTRL_REG, 1411 PHY_CTRL_AUTO_NEG_ENABLE | 1412 PHY_CTRL_RESTART_AUTO_NEG); 1413 } 1414 pDevice->Advertising = Adv; 1415 } 1416 else { 1417 b44_LM_ReadPhy(pDevice, PHY_CTRL_REG, &Ctrl); 1418 NewCtrl = Ctrl & (~(PHY_CTRL_SPEED_SELECT_100MBPS | 1419 PHY_CTRL_FULL_DUPLEX_MODE | PHY_CTRL_AUTO_NEG_ENABLE)); 1420 if (pDevice->RequestedLineSpeed == LM_LINE_SPEED_100MBPS) { 1421 NewCtrl |= PHY_CTRL_SPEED_SELECT_100MBPS; 1422 } 1423 if (pDevice->RequestedDuplexMode == LM_DUPLEX_MODE_FULL) { 1424 NewCtrl |= PHY_CTRL_FULL_DUPLEX_MODE; 1425 REG_OR(pDevice, txcontrol, EXC_FD); 1426 } 1427 else { 1428 REG_AND(pDevice, txcontrol, ~EXC_FD); 1429 } 1430 if (NewCtrl != Ctrl) { 1431 /* force a link down */ 1432 b44_LM_WritePhy(pDevice, PHY_CTRL_REG, 1433 PHY_CTRL_LOOPBACK_MODE); 1434 i = 0; 1435 do { 1436 b44_LM_ReadPhy(pDevice, PHY_STATUS_REG, 1437 &Value32); 1438 b44_MM_Wait(100); 1439 i++; 1440 } while ((Value32 & PHY_STATUS_LINK_PASS) && (i < 800)); 1441 b44_LM_ResetPhy(pDevice); 1442 b44_MM_Wait(100); 1443 b44_LM_WritePhy(pDevice, PHY_CTRL_REG, NewCtrl); 1444 b44_LM_ReadPhy(pDevice, 26, &Value32); 1445 b44_LM_WritePhy(pDevice, 26, Value32 & 0x7fff); 1446 } 1447 if (pDevice->RequestedDuplexMode == LM_DUPLEX_MODE_FULL) { 1448 pDevice->FlowControlCap &= ~LM_FLOW_CONTROL_AUTO_PAUSE; 1449 b44_LM_SetFlowControl(pDevice, 0, 0); 1450 } 1451 } 1452 return LM_STATUS_SUCCESS; 1453} 1454 1455LM_STATUS 1456b44_LM_ResetPhy(LM_DEVICE_BLOCK *pDevice) 1457{ 1458 LM_UINT32 value32; 1459 1460 b44_LM_WritePhy(pDevice, 0, PHY_CTRL_PHY_RESET); 1461 b44_MM_Wait(100); 1462 b44_LM_ReadPhy(pDevice, 0, &value32); 1463 if (value32 & PHY_CTRL_PHY_RESET) { 1464 printf("Phy reset not complete\n"); 1465 } 1466 return LM_STATUS_SUCCESS; 1467} 1468 1469/******************************************************************************/ 1470/* Description: */ 1471/* */ 1472/* Return: */ 1473/******************************************************************************/ 1474LM_VOID 1475b44_LM_ReadPhy( 1476PLM_DEVICE_BLOCK pDevice, LM_UINT32 PhyReg, LM_UINT32 *pData32) 1477{ 1478 /* clear mii_int */ 1479 REG_WR(pDevice, emacintstatus, EI_MII); 1480 1481 /* issue the read */ 1482 REG_WR(pDevice, mdiodata, (MD_SB_START | MD_OP_READ | 1483 (pDevice->PhyAddr << MD_PMD_SHIFT) 1484 | (PhyReg << MD_RA_SHIFT) | MD_TA_VALID)); 1485 1486 /* wait for it to complete */ 1487 SPINWAIT(((REG_RD(pDevice, emacintstatus) & EI_MII) == 0), 100); 1488 if ((REG_RD(pDevice, emacintstatus) & EI_MII) == 0) { 1489 printf("LM_ReadPhy: did not complete\n"); 1490 } 1491 1492 *pData32 = REG_RD(pDevice, mdiodata) & MD_DATA_MASK; 1493} /* LM_ReadPhy */ 1494 1495 1496 1497/******************************************************************************/ 1498/* Description: */ 1499/* */ 1500/* Return: */ 1501/******************************************************************************/ 1502LM_VOID 1503b44_LM_WritePhy( 1504PLM_DEVICE_BLOCK pDevice, LM_UINT32 PhyReg, LM_UINT32 Data32) 1505{ 1506 /* clear mii_int */ 1507 REG_WR(pDevice, emacintstatus, EI_MII); 1508 ASSERT((REG_RD(pDevice, emacintstatus) & EI_MII) == 0); 1509 1510 /* issue the write */ 1511 REG_WR(pDevice, mdiodata, (MD_SB_START | MD_OP_WRITE | 1512 (pDevice->PhyAddr << MD_PMD_SHIFT) 1513 | (PhyReg << MD_RA_SHIFT) | MD_TA_VALID | Data32)); 1514 1515 /* wait for it to complete */ 1516 SPINWAIT(((REG_RD(pDevice, emacintstatus) & EI_MII) == 0), 100); 1517 if ((REG_RD(pDevice, emacintstatus) & EI_MII) == 0) { 1518 printf("b44_LM_WritePhy: did not complete\n"); 1519 } 1520} /* LM_WritePhy */ 1521 1522 1523LM_STATUS 1524b44_LM_StatsUpdate(LM_DEVICE_BLOCK *pDevice) 1525{ 1526 pDevice->tx_good_octets += REG_RD(pDevice, mib.tx_good_octets); 1527 pDevice->tx_good_pkts += REG_RD(pDevice, mib.tx_good_pkts); 1528 pDevice->tx_octets += REG_RD(pDevice, mib.tx_octets); 1529 pDevice->tx_pkts += REG_RD(pDevice, mib.tx_pkts); 1530 pDevice->tx_broadcast_pkts += REG_RD(pDevice, mib.tx_broadcast_pkts); 1531 pDevice->tx_multicast_pkts += REG_RD(pDevice, mib.tx_multicast_pkts); 1532 pDevice->tx_len_64 += REG_RD(pDevice, mib.tx_len_64); 1533 pDevice->tx_len_65_to_127 += REG_RD(pDevice, mib.tx_len_65_to_127); 1534 pDevice->tx_len_128_to_255 += REG_RD(pDevice, mib.tx_len_128_to_255); 1535 pDevice->tx_len_256_to_511 += REG_RD(pDevice, mib.tx_len_256_to_511); 1536 pDevice->tx_len_512_to_1023 += REG_RD(pDevice, mib.tx_len_512_to_1023); 1537 pDevice->tx_len_1024_to_max += REG_RD(pDevice, mib.tx_len_1024_to_max); 1538 pDevice->tx_jabber_pkts += REG_RD(pDevice, mib.tx_jabber_pkts); 1539 pDevice->tx_oversize_pkts += REG_RD(pDevice, mib.tx_oversize_pkts); 1540 pDevice->tx_fragment_pkts += REG_RD(pDevice, mib.tx_fragment_pkts); 1541 pDevice->tx_underruns += REG_RD(pDevice, mib.tx_underruns); 1542 pDevice->tx_total_cols += REG_RD(pDevice, mib.tx_total_cols); 1543 pDevice->tx_single_cols += REG_RD(pDevice, mib.tx_single_cols); 1544 pDevice->tx_multiple_cols += REG_RD(pDevice, mib.tx_multiple_cols); 1545 pDevice->tx_excessive_cols += REG_RD(pDevice, mib.tx_excessive_cols); 1546 pDevice->tx_late_cols += REG_RD(pDevice, mib.tx_late_cols); 1547 pDevice->tx_defered += REG_RD(pDevice, mib.tx_defered); 1548/* pDevice->tx_carrier_lost += REG_RD(pDevice, mib.tx_carrier_lost);*/ 1549 /* carrier counter is sometimes bogus, so disable it for now */ 1550 REG_RD(pDevice, mib.tx_carrier_lost); 1551 pDevice->tx_pause_pkts += REG_RD(pDevice, mib.tx_pause_pkts); 1552 1553 pDevice->rx_good_octets += REG_RD(pDevice, mib.rx_good_octets); 1554 pDevice->rx_good_pkts += REG_RD(pDevice, mib.rx_good_pkts); 1555 pDevice->rx_octets += REG_RD(pDevice, mib.rx_octets); 1556 pDevice->rx_pkts += REG_RD(pDevice, mib.rx_pkts); 1557 pDevice->rx_broadcast_pkts += REG_RD(pDevice, mib.rx_broadcast_pkts); 1558 pDevice->rx_multicast_pkts += REG_RD(pDevice, mib.rx_multicast_pkts); 1559 pDevice->rx_len_64 += REG_RD(pDevice, mib.rx_len_64); 1560 pDevice->rx_len_65_to_127 += REG_RD(pDevice, mib.rx_len_65_to_127); 1561 pDevice->rx_len_128_to_255 += REG_RD(pDevice, mib.rx_len_128_to_255); 1562 pDevice->rx_len_256_to_511 += REG_RD(pDevice, mib.rx_len_256_to_511); 1563 pDevice->rx_len_512_to_1023 += REG_RD(pDevice, mib.rx_len_512_to_1023); 1564 pDevice->rx_len_1024_to_max += REG_RD(pDevice, mib.rx_len_1024_to_max); 1565 pDevice->rx_jabber_pkts += REG_RD(pDevice, mib.rx_jabber_pkts); 1566 pDevice->rx_oversize_pkts += REG_RD(pDevice, mib.rx_oversize_pkts); 1567 pDevice->rx_fragment_pkts += REG_RD(pDevice, mib.rx_fragment_pkts); 1568 pDevice->rx_missed_pkts += REG_RD(pDevice, mib.rx_missed_pkts); 1569 pDevice->rx_crc_align_errs += REG_RD(pDevice, mib.rx_crc_align_errs); 1570 pDevice->rx_undersize += REG_RD(pDevice, mib.rx_undersize); 1571 pDevice->rx_crc_errs += REG_RD(pDevice, mib.rx_crc_errs); 1572 pDevice->rx_align_errs += REG_RD(pDevice, mib.rx_align_errs); 1573 pDevice->rx_symbol_errs += REG_RD(pDevice, mib.rx_symbol_errs); 1574 pDevice->rx_pause_pkts += REG_RD(pDevice, mib.rx_pause_pkts); 1575 pDevice->rx_nonpause_pkts += REG_RD(pDevice, mib.rx_nonpause_pkts); 1576 1577 return LM_STATUS_SUCCESS; 1578} 1579 1580void 1581b44_LM_WriteCam(LM_DEVICE_BLOCK *pDevice, LM_UINT8 *ea, LM_UINT32 camindex) 1582{ 1583 LM_UINT32 w; 1584 1585 w = ((LM_UINT32)ea[2] << 24) | ((LM_UINT32)ea[3] << 16) | 1586 ((LM_UINT32) ea[4] << 8) | ea[5]; 1587 REG_WR(pDevice, camdatalo, w); 1588 w = CD_V | ((LM_UINT32)ea[0] << 8) | ea[1]; 1589 REG_WR(pDevice, camdatahi, w); 1590 REG_WR(pDevice, camcontrol, (((LM_UINT32) camindex << CC_INDEX_SHIFT) | 1591 CC_WR)); 1592 1593 /* spin until done */ 1594 SPINWAIT((REG_RD(pDevice, camcontrol) & CC_CB), 100); 1595} 1596 1597/******************************************************************************/ 1598/* Description: */ 1599/* */ 1600/* Return: */ 1601/******************************************************************************/ 1602static LM_UINT32 1603b44_GetPhyAdFlowCntrlSettings( 1604 PLM_DEVICE_BLOCK pDevice) 1605{ 1606 LM_UINT32 Value32; 1607 1608 Value32 = 0; 1609 1610 /* Auto negotiation flow control only when autonegotiation is enabled. */ 1611 if(pDevice->DisableAutoNeg == FALSE || 1612 pDevice->RequestedLineSpeed == LM_LINE_SPEED_AUTO) 1613 { 1614 /* Please refer to Table 28B-3 of the 802.3ab-1999 spec. */ 1615 if((pDevice->FlowControlCap == LM_FLOW_CONTROL_AUTO_PAUSE) || 1616 ((pDevice->FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE) && 1617 (pDevice->FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE))) 1618 { 1619 Value32 |= PHY_AN_AD_PAUSE_CAPABLE; 1620 } 1621 else if(pDevice->FlowControlCap & LM_FLOW_CONTROL_TRANSMIT_PAUSE) 1622 { 1623 Value32 |= PHY_AN_AD_ASYM_PAUSE; 1624 } 1625 else if(pDevice->FlowControlCap & LM_FLOW_CONTROL_RECEIVE_PAUSE) 1626 { 1627 Value32 |= PHY_AN_AD_PAUSE_CAPABLE | PHY_AN_AD_ASYM_PAUSE; 1628 } 1629 } 1630 1631 return Value32; 1632} 1633 1634 1635LM_STATUS 1636b44_LM_GetStats(PLM_DEVICE_BLOCK pDevice) 1637{ 1638 return LM_STATUS_SUCCESS; 1639} 1640 1641void 1642b44_LM_PollLink(LM_DEVICE_BLOCK *pDevice) 1643{ 1644 LM_UINT32 status, aux; 1645 LM_UINT32 txcontrol; 1646 LM_UINT32 LocalAdv, RemoteAdv; 1647 1648 b44_LM_ReadPhy(pDevice, 1, &status); 1649 b44_LM_ReadPhy(pDevice, 1, &status); 1650 1651 b44_LM_ReadPhy(pDevice, 24, &aux); 1652 1653 /* check for bad mdio read */ 1654 if (status == 0xffff) { 1655 return; 1656 } 1657 1658 /* update current speed and duplex */ 1659 if (aux & AUX_SPEED) 1660 pDevice->LineSpeed = LM_LINE_SPEED_100MBPS; 1661 else 1662 pDevice->LineSpeed = LM_LINE_SPEED_10MBPS; 1663 if (aux & AUX_DUPLEX) 1664 pDevice->DuplexMode = LM_DUPLEX_MODE_FULL; 1665 else 1666 pDevice->DuplexMode = LM_DUPLEX_MODE_HALF; 1667 1668 /* monitor link state */ 1669 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) && 1670 (status & STAT_LINK)) { 1671 1672 /* keep emac txcontrol duplex bit consistent with current */ 1673 /* phy duplex */ 1674 txcontrol = REG_RD(pDevice, txcontrol); 1675 if ((pDevice->DuplexMode == LM_DUPLEX_MODE_FULL) && 1676 !(txcontrol & EXC_FD)) { 1677 1678 REG_OR(pDevice, txcontrol, EXC_FD); 1679 } 1680 else if ((pDevice->DuplexMode == LM_DUPLEX_MODE_HALF) && 1681 (txcontrol & EXC_FD)) { 1682 1683 REG_AND(pDevice, txcontrol, ~EXC_FD); 1684 } 1685 if (!pDevice->DisableAutoNeg && (pDevice->DuplexMode == 1686 LM_DUPLEX_MODE_FULL)) { 1687 1688 b44_LM_ReadPhy(pDevice, PHY_AN_AD_REG, &LocalAdv); 1689 b44_LM_ReadPhy(pDevice, PHY_LINK_PARTNER_ABILITY_REG, 1690 &RemoteAdv); 1691 b44_LM_SetFlowControl(pDevice, LocalAdv, RemoteAdv); 1692 } 1693 1694 pDevice->LinkStatus = LM_STATUS_LINK_ACTIVE; 1695 b44_MM_IndicateStatus(pDevice, LM_STATUS_LINK_ACTIVE); 1696 } 1697 else if ((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) && 1698 !(status & STAT_LINK)) { 1699 1700 pDevice->LinkStatus = LM_STATUS_LINK_DOWN; 1701 b44_MM_IndicateStatus(pDevice, LM_STATUS_LINK_DOWN); 1702 } 1703 1704 1705 /* check for remote fault error */ 1706 if (status & STAT_REMFAULT) { 1707 printf("remote fault\n"); 1708 } 1709 1710 /* check for jabber error */ 1711 if (status & STAT_JAB) { 1712 printf("jabber\n"); 1713 } 1714} 1715 1716/* reset and re-enable a core */ 1717void 1718b44_LM_sb_core_reset(LM_DEVICE_BLOCK *pDevice) 1719{ 1720 volatile LM_UINT32 dummy; 1721 1722 /* 1723 * Must do the disable sequence first to work for arbitrary current core state. 1724 */ 1725 b44_LM_sb_core_disable(pDevice); 1726 1727 /* 1728 * Now do the initialization sequence. 1729 */ 1730 1731 /* set reset while enabling the clock and forcing them on throughout the core */ 1732 REG_WR(pDevice, sbconfig.sbtmstatelow, 1733 (SBTML_FGC | SBTML_CLK | SBTML_RESET)); 1734 1735 dummy = REG_RD(pDevice, sbconfig.sbtmstatelow); 1736 b44_MM_Wait(1); 1737 1738 /* PR3158 workaround - not fixed in any chip yet */ 1739 if (REG_RD(pDevice, sbconfig.sbtmstatehigh) & SBTMH_SERR) { 1740 printf("SBTMH_SERR; clearing...\n"); 1741 REG_WR(pDevice, sbconfig.sbtmstatehigh, 0); 1742 ASSERT(0); 1743 } 1744 if ((dummy = REG_RD(pDevice, sbconfig.sbimstate)) & 1745 (SBIM_IBE | SBIM_TO)) { 1746 1747 REG_AND(pDevice, sbconfig.sbimstate, ~(SBIM_IBE | SBIM_TO)); 1748 ASSERT(0); 1749 } 1750 1751 /* clear reset and allow it to propagate throughout the core */ 1752 REG_WR(pDevice, sbconfig.sbtmstatelow, (SBTML_FGC | SBTML_CLK)); 1753 dummy = REG_RD(pDevice, sbconfig.sbtmstatelow); 1754 b44_MM_Wait(1); 1755 1756 /* leave clock enabled */ 1757 REG_WR(pDevice, sbconfig.sbtmstatelow, SBTML_CLK); 1758 dummy = REG_RD(pDevice, sbconfig.sbtmstatelow); 1759 b44_MM_Wait(1); 1760} 1761 1762void 1763b44_LM_sb_core_disable(LM_DEVICE_BLOCK *pDevice) 1764{ 1765 volatile LM_UINT32 dummy; 1766 1767 /* must return if core is already in reset */ 1768 if (REG_RD(pDevice, sbconfig.sbtmstatelow) & SBTML_RESET) 1769 return; 1770 1771 /* set the reject bit */ 1772 REG_WR(pDevice, sbconfig.sbtmstatelow, (SBTML_CLK | SBTML_REJ)); 1773 1774 /* spin until reject is set */ 1775 while ((REG_RD(pDevice, sbconfig.sbtmstatelow) & SBTML_REJ) == 0) 1776 b44_MM_Wait(1); 1777 1778 /* spin until sbtmstatehigh.busy is clear */ 1779 while (REG_RD(pDevice, sbconfig.sbtmstatehigh) & SBTMH_BUSY) 1780 b44_MM_Wait(1); 1781 1782 /* set reset and reject while enabling the clocks */ 1783 REG_WR(pDevice, sbconfig.sbtmstatelow, 1784 (SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET)); 1785 1786 dummy = REG_RD(pDevice, sbconfig.sbtmstatelow); 1787 b44_MM_Wait(10); 1788 1789 /* leave reset and reject asserted */ 1790 REG_WR(pDevice, sbconfig.sbtmstatelow, (SBTML_REJ | SBTML_RESET)); 1791 b44_MM_Wait(1); 1792} 1793 1794/* 1795 * Configure the pci core for pci client (NIC) action 1796 * and return the pci core revision. 1797 */ 1798LM_UINT32 1799b44_LM_sb_pci_setup(LM_DEVICE_BLOCK *pDevice, LM_UINT32 cores) 1800{ 1801 LM_UINT32 bar0window; 1802 sbpciregs_t *pciregs; 1803 LM_UINT32 pcirev; 1804 1805 pciregs = (sbpciregs_t *) pDevice->pMemView; 1806 1807 /* save bar0window */ 1808 b44_MM_ReadConfig32(pDevice, PCI_BAR0_WIN, &bar0window); 1809 /* point bar0 at pci core registers */ 1810 b44_MM_WriteConfig32(pDevice, PCI_BAR0_WIN, b44_LM_getsbaddr(pDevice, 1811 SBID_REG_PCI, 0)); 1812 1813 ASSERT(b44_LM_sb_coreid(pDevice) == SB_PCI); 1814 1815 pcirev = b44_LM_sb_corerev(pDevice); 1816 1817 /* enable sb->pci interrupts */ 1818 REG_OR(pDevice, sbconfig.sbintvec, cores); 1819 1820 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 1821 REG_WR_OFFSET(pDevice, OFFSETOF(sbpciregs_t, sbtopci2), 1822 REG_RD_OFFSET(pDevice, OFFSETOF(sbpciregs_t, sbtopci2)) | 1823 (SBTOPCI_PREF|SBTOPCI_BURST)); 1824 1825 /* restore bar0window */ 1826 b44_MM_WriteConfig32(pDevice, PCI_BAR0_WIN, bar0window); 1827 1828 return (pcirev); 1829} 1830 1831/* 1832 * Return the SB address corresponding to core <id> instance <coreunit>. 1833 * Provide a layer of indirection between SB address map elements 1834 * and the individual chip maps. 1835 */ 1836LM_UINT32 1837b44_LM_getsbaddr(LM_DEVICE_BLOCK *pDevice, LM_UINT32 id, LM_UINT32 coreunit) 1838{ 1839 struct sbmap *sbmap; 1840 int i; 1841 1842 sbmap = pDevice->sbmap; 1843 ASSERT(sbmap); 1844 1845 for (i = 0; i < SBID_MAX; i++) 1846 if ((id == sbmap[i].id) && (coreunit == sbmap[i].coreunit)) 1847 return (sbmap[i].sbaddr); 1848 1849 ASSERT(0); 1850 return (0xdeadbeef); 1851} 1852 1853LM_UINT32 1854b44_LM_sb_base(LM_UINT32 admatch) 1855{ 1856 LM_UINT32 base; 1857 LM_UINT32 type; 1858 1859 type = admatch & SBAM_TYPE_MASK; 1860 ASSERT(type < 3); 1861 1862 base = 0; 1863 1864 if (type == 0) { 1865 base = admatch & SBAM_BASE0_MASK; 1866 } else if (type == 1) { 1867 ASSERT(admatch & SBAM_ADEN); 1868 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ 1869 base = admatch & SBAM_BASE1_MASK; 1870 } else if (type == 2) { 1871 ASSERT(admatch & SBAM_ADEN); 1872 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ 1873 base = admatch & SBAM_BASE2_MASK; 1874 } 1875 1876 return (base); 1877} 1878 1879LM_UINT32 1880b44_LM_sb_size(LM_UINT32 admatch) 1881{ 1882 LM_UINT32 size; 1883 LM_UINT32 type; 1884 1885 type = admatch & SBAM_TYPE_MASK; 1886 ASSERT(type < 3); 1887 1888 size = 0; 1889 1890 if (type == 0) { 1891 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); 1892 } else if (type == 1) { 1893 ASSERT(admatch & SBAM_ADEN); 1894 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ 1895 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); 1896 } else if (type == 2) { 1897 ASSERT(admatch & SBAM_ADEN); 1898 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ 1899 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); 1900 } 1901 1902 return (size); 1903} 1904 1905/* return the core instance number of this core */ 1906LM_UINT32 1907b44_LM_sb_coreunit(LM_DEVICE_BLOCK *pDevice) 1908{ 1909 struct sbmap *sbmap; 1910 LM_UINT32 base; 1911 int i; 1912 1913 sbmap = pDevice->sbmap; 1914 ASSERT(sbmap); 1915 1916 base = b44_LM_sb_base(REG_RD(pDevice, sbconfig.sbadmatch0)); 1917 1918 for (i = 0; i < SBID_MAX; i++) 1919 if (base == sbmap[i].sbaddr) 1920 return (sbmap[i].coreunit); 1921 1922 ASSERT(0); 1923 return (0xdeadbeef); 1924} 1925 1926 1927LM_UINT32 1928b44_LM_sb_clock(LM_DEVICE_BLOCK *pDevice, LM_UINT32 extifva) 1929{ 1930 ASSERT(0); /* XXX TBD */ 1931 return (0); 1932} 1933 1934LM_UINT32 1935b44_LM_sb_coreid(LM_DEVICE_BLOCK *pDevice) 1936{ 1937 return ((REG_RD(pDevice, sbconfig.sbidhigh) & 1938 SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); 1939} 1940 1941LM_UINT32 1942b44_LM_sb_corerev(LM_DEVICE_BLOCK *pDevice) 1943{ 1944 return (REG_RD(pDevice, sbconfig.sbidhigh) & SBIDH_RC_MASK); 1945} 1946 1947LM_UINT32 1948b44_LM_sb_iscoreup(LM_DEVICE_BLOCK *pDevice) 1949{ 1950 return ((REG_RD(pDevice, sbconfig.sbtmstatelow) & 1951 (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK); 1952} 1953 1954LM_VOID 1955b44_LM_PowerDownPhy(LM_DEVICE_BLOCK *pDevice) 1956{ 1957 REG_WR(pDevice, emaccontrol, EMC_EP); 1958} 1959 1960#ifdef BCM_WOL 1961 1962/* Program patterns on the chip */ 1963static void 1964b44_LM_pmprog(LM_DEVICE_BLOCK *pDevice) 1965{ 1966 LM_UINT32 wfl; 1967 int plen0, plen1, max, i, j; 1968 LM_UINT8 wol_pattern[BCMENET_PMPSIZE]; 1969 LM_UINT8 wol_mask[BCMENET_PMMSIZE]; 1970 1971 /* program the chip with wakeup patterns, masks, and lengths */ 1972 1973 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_NONE) { 1974 wfl = DISABLE_3210_PATMATCH; 1975 REG_WR(pDevice, wakeuplength, wfl); 1976 } 1977 else if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET) { 1978 /* allow multicast magic packet */ 1979 REG_OR(pDevice, rxconfig, ERC_AM); 1980 1981 if (pDevice->corerev >= 7) { 1982 LM_UINT32 addr; 1983 1984 REG_WR(pDevice, wakeuplength, DISABLE_3210_PATMATCH); 1985 1986 addr = (pDevice->NodeAddress[2] << 24) | 1987 (pDevice->NodeAddress[3] << 16) | 1988 (pDevice->NodeAddress[4] << 8) | 1989 pDevice->NodeAddress[5]; 1990 REG_WR(pDevice, enetaddrlo, addr); 1991 1992 addr = (pDevice->NodeAddress[0] << 8) | 1993 pDevice->NodeAddress[1]; 1994 REG_WR(pDevice, enetaddrhi, addr); 1995 1996 REG_OR(pDevice, devcontrol, DC_MPM | DC_PM); 1997 return; 1998 } 1999 /* older chip */ 2000 /* UDP magic packet pattern */ 2001 memset(wol_pattern, 0, BCMENET_PMPSIZE); 2002 memset(wol_pattern + 42, 0xff, 6); /* sync pattern */ 2003 max = ETHERNET_ADDRESS_SIZE; 2004 for (i = 0; i < 14; ++i) { 2005 if (i == 13) 2006 max = 2; 2007 for (j = 0; j < max; ++j) { 2008 wol_pattern[42 + 6 + 2009 (i * ETHERNET_ADDRESS_SIZE) + j] = 2010 pDevice->NodeAddress[j]; 2011 } 2012 } 2013 memset(wol_mask, 0, BCMENET_PMMSIZE); 2014 wol_mask[5] = 0xfc; 2015 memset(wol_mask + 6, 0xff, 10); 2016 plen0 = BCMENET_PMPSIZE - 1; 2017 2018 b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_pattern, 2019 BCMENET_PMPSIZE, BCMENET_PMPBASE); 2020 2021 b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_mask, BCMENET_PMMSIZE, 2022 BCMENET_PMMBASE); 2023 2024 /* raw ethernet II magic packet pattern */ 2025 memset(wol_pattern, 0, BCMENET_PMPSIZE); 2026 memset(wol_pattern + 14, 0xff, 6); /* sync pattern */ 2027 max = ETHERNET_ADDRESS_SIZE; 2028 for (i = 0; i < 16; ++i) { 2029 for (j = 0; j < max; ++j) { 2030 wol_pattern[14 + 6 + 2031 (i * ETHERNET_ADDRESS_SIZE) + j] = 2032 pDevice->NodeAddress[j]; 2033 } 2034 } 2035 memset(wol_mask, 0, BCMENET_PMMSIZE); 2036 wol_mask[2] = 0xf0; 2037 memset(wol_mask + 3, 0xff, 11); 2038 wol_mask[14] = 0xf; 2039 plen1 = 14 + 6 + 96 - 1; 2040 2041 b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_pattern, 2042 BCMENET_PMPSIZE, BCMENET_PMPBASE + BCMENET_PMPSIZE); 2043 2044 b44_LM_ftwrite(pDevice, (LM_UINT32 *)wol_mask, BCMENET_PMMSIZE, 2045 BCMENET_PMMBASE + BCMENET_PMMSIZE); 2046 2047 /* set this pattern's length: one less than the real length */ 2048 wfl = plen0 | (plen1 << 8) | DISABLE_32_PATMATCH; 2049 2050 REG_WR(pDevice, wakeuplength, wfl); 2051 2052 /* enable chip wakeup pattern matching */ 2053 REG_OR(pDevice, devcontrol, DC_PM); 2054 } 2055 2056} 2057 2058LM_VOID 2059b44_LM_pmset(LM_DEVICE_BLOCK *pDevice) 2060{ 2061 LM_UINT16 Value16; 2062 2063 b44_LM_Halt(pDevice); 2064 2065 /* now turn on just enough of the chip to receive and match patterns */ 2066 b44_LM_ResetAdapter(pDevice, FALSE); 2067 2068 /* program patterns */ 2069 b44_LM_pmprog(pDevice); 2070 2071 /* enable sonics bus PME */ 2072 REG_OR(pDevice, sbconfig.sbtmstatelow, SBTML_PE); 2073 2074 b44_MM_ReadConfig16(pDevice, BCMENET_PMCSR, &Value16); 2075 b44_MM_WriteConfig16(pDevice, BCMENET_PMCSR, 2076 Value16 | ENABLE_PCICONFIG_PME); 2077} 2078 2079 2080static void 2081b44_LM_ftwrite(LM_DEVICE_BLOCK *pDevice, LM_UINT32 *b, LM_UINT32 nbytes, 2082 LM_UINT32 ftaddr) 2083{ 2084 LM_UINT32 i; 2085 2086 for (i = 0; i < nbytes; i += sizeof(LM_UINT32)) { 2087 REG_WR(pDevice, enetftaddr, ftaddr + i); 2088 REG_WR(pDevice, enetftdata, b[i / sizeof(LM_UINT32)]); 2089 } 2090} 2091 2092#endif /* BCM_WOL */ 2093