1 2 3 4#include <linux/module.h> 5 6#include <linux/kernel.h> 7#include <linux/delay.h> 8#include <linux/types.h> 9#include <linux/fcntl.h> 10#include <linux/interrupt.h> 11#include <linux/ptrace.h> 12#include <linux/ioport.h> 13#include <linux/in.h> 14#include <linux/slab.h> 15#include <linux/string.h> 16#include <linux/spinlock.h> 17#include <linux/errno.h> 18#include <linux/init.h> 19 20#include <linux/if.h> 21#include <linux/mii.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24#include <linux/skbuff.h> 25#include <linux/ethtool.h> 26 27#include <asm/arch/svinto.h>/* DMA and register descriptions */ 28#include <asm/io.h> /* LED_* I/O functions */ 29#include <asm/irq.h> 30#include <asm/dma.h> 31#include <asm/system.h> 32#include <asm/bitops.h> 33#include <asm/ethernet.h> 34#include <asm/cache.h> 35 36//#define ETHDEBUG 37#define D(x) 38 39/* 40 * The name of the card. Is used for messages and in the requests for 41 * io regions, irqs and dma channels 42 */ 43 44static const char* cardname = "ETRAX 100LX built-in ethernet controller"; 45 46/* A default ethernet address. Highlevel SW will set the real one later */ 47 48static struct sockaddr default_mac = { 49 0, 50 { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 } 51}; 52 53/* Information that need to be kept for each board. */ 54struct net_local { 55 struct net_device_stats stats; 56 struct mii_if_info mii_if; 57 58 /* Tx control lock. This protects the transmit buffer ring 59 * state along with the "tx full" state of the driver. This 60 * means all netif_queue flow control actions are protected 61 * by this lock as well. 62 */ 63 spinlock_t lock; 64}; 65 66typedef struct etrax_eth_descr 67{ 68 etrax_dma_descr descr; 69 struct sk_buff* skb; 70} etrax_eth_descr; 71 72/* Some transceivers requires special handling */ 73struct transceiver_ops 74{ 75 unsigned int oui; 76 void (*check_speed)(struct net_device* dev); 77 void (*check_duplex)(struct net_device* dev); 78}; 79 80struct transceiver_ops* transceiver; 81 82/* Duplex settings */ 83enum duplex 84{ 85 half, 86 full, 87 autoneg 88}; 89 90/* Dma descriptors etc. */ 91 92#define MAX_MEDIA_DATA_SIZE 1518 93 94#define MIN_PACKET_LEN 46 95#define ETHER_HEAD_LEN 14 96 97/* 98** MDIO constants. 99*/ 100#define MDIO_START 0x1 101#define MDIO_READ 0x2 102#define MDIO_WRITE 0x1 103#define MDIO_PREAMBLE 0xfffffffful 104 105/* Broadcom specific */ 106#define MDIO_AUX_CTRL_STATUS_REG 0x18 107#define MDIO_BC_FULL_DUPLEX_IND 0x1 108#define MDIO_BC_SPEED 0x2 109 110/* TDK specific */ 111#define MDIO_TDK_DIAGNOSTIC_REG 18 112#define MDIO_TDK_DIAGNOSTIC_RATE 0x400 113#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800 114 115/*Intel LXT972A specific*/ 116#define MDIO_INT_STATUS_REG_2 0x0011 117#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 ) 118#define MDIO_INT_SPEED ( 1 << 14 ) 119 120/* Network flash constants */ 121#define NET_FLASH_TIME (HZ/50) /* 20 ms */ 122#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */ 123#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */ 124#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */ 125 126#define NO_NETWORK_ACTIVITY 0 127#define NETWORK_ACTIVITY 1 128 129#define NBR_OF_RX_DESC 64 130#define NBR_OF_TX_DESC 256 131 132/* Large packets are sent directly to upper layers while small packets are */ 133/* copied (to reduce memory waste). The following constant decides the breakpoint */ 134#define RX_COPYBREAK 256 135 136/* Due to a chip bug we need to flush the cache when descriptors are returned */ 137/* to the DMA. To decrease performance impact we return descriptors in chunks. */ 138/* The following constant determines the number of descriptors to return. */ 139#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2 140 141#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01) 142 143/* Define some macros to access ETRAX 100 registers */ 144#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ 145 IO_FIELD_(reg##_, field##_, val) 146#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ 147 IO_STATE_(reg##_, field##_, _##val) 148 149static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to 150 to be processed */ 151static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ 152static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */ 153 154static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); 155 156static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */ 157static etrax_eth_descr* myLastTxDesc; /* End of send queue */ 158static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ 159static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); 160 161static unsigned int network_rec_config_shadow = 0; 162static unsigned int mdio_phy_addr; /* Transciever address */ 163 164static unsigned int network_tr_ctrl_shadow = 0; 165 166/* Network speed indication. */ 167static DEFINE_TIMER(speed_timer, NULL, 0, 0); 168static DEFINE_TIMER(clear_led_timer, NULL, 0, 0); 169static int current_speed; /* Speed read from transceiver */ 170static int current_speed_selection; /* Speed selected by user */ 171static unsigned long led_next_time; 172static int led_active; 173static int rx_queue_len; 174 175/* Duplex */ 176static DEFINE_TIMER(duplex_timer, NULL, 0, 0); 177static int full_duplex; 178static enum duplex current_duplex; 179 180/* Index to functions, as function prototypes. */ 181 182static int etrax_ethernet_init(void); 183 184static int e100_open(struct net_device *dev); 185static int e100_set_mac_address(struct net_device *dev, void *addr); 186static int e100_send_packet(struct sk_buff *skb, struct net_device *dev); 187static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id); 188static irqreturn_t e100nw_interrupt(int irq, void *dev_id); 189static void e100_rx(struct net_device *dev); 190static int e100_close(struct net_device *dev); 191static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 192static int e100_set_config(struct net_device* dev, struct ifmap* map); 193static void e100_tx_timeout(struct net_device *dev); 194static struct net_device_stats *e100_get_stats(struct net_device *dev); 195static void set_multicast_list(struct net_device *dev); 196static void e100_hardware_send_packet(char *buf, int length); 197static void update_rx_stats(struct net_device_stats *); 198static void update_tx_stats(struct net_device_stats *); 199static int e100_probe_transceiver(struct net_device* dev); 200 201static void e100_check_speed(unsigned long priv); 202static void e100_set_speed(struct net_device* dev, unsigned long speed); 203static void e100_check_duplex(unsigned long priv); 204static void e100_set_duplex(struct net_device* dev, enum duplex); 205static void e100_negotiate(struct net_device* dev); 206 207static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location); 208static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value); 209 210static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd); 211static void e100_send_mdio_bit(unsigned char bit); 212static unsigned char e100_receive_mdio_bit(void); 213static void e100_reset_transceiver(struct net_device* net); 214 215static void e100_clear_network_leds(unsigned long dummy); 216static void e100_set_network_leds(int active); 217 218static const struct ethtool_ops e100_ethtool_ops; 219 220static void broadcom_check_speed(struct net_device* dev); 221static void broadcom_check_duplex(struct net_device* dev); 222static void tdk_check_speed(struct net_device* dev); 223static void tdk_check_duplex(struct net_device* dev); 224static void intel_check_speed(struct net_device* dev); 225static void intel_check_duplex(struct net_device* dev); 226static void generic_check_speed(struct net_device* dev); 227static void generic_check_duplex(struct net_device* dev); 228 229struct transceiver_ops transceivers[] = 230{ 231 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ 232 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ 233 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ 234 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ 235 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ 236}; 237 238#define tx_done(dev) (*R_DMA_CH0_CMD == 0) 239 240/* 241 * Check for a network adaptor of this type, and return '0' if one exists. 242 * If dev->base_addr == 0, probe all likely locations. 243 * If dev->base_addr == 1, always return failure. 244 * If dev->base_addr == 2, allocate space for the device and return success 245 * (detachable devices only). 246 */ 247 248static int __init 249etrax_ethernet_init(void) 250{ 251 struct net_device *dev; 252 struct net_local* np; 253 int i, err; 254 255 printk(KERN_INFO 256 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n"); 257 258 dev = alloc_etherdev(sizeof(struct net_local)); 259 np = dev->priv; 260 261 if (!dev) 262 return -ENOMEM; 263 264 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ 265 266 /* now setup our etrax specific stuff */ 267 268 dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */ 269 dev->dma = NETWORK_RX_DMA_NBR; 270 271 /* fill in our handlers so the network layer can talk to us in the future */ 272 273 dev->open = e100_open; 274 dev->hard_start_xmit = e100_send_packet; 275 dev->stop = e100_close; 276 dev->get_stats = e100_get_stats; 277 dev->set_multicast_list = set_multicast_list; 278 dev->set_mac_address = e100_set_mac_address; 279 dev->ethtool_ops = &e100_ethtool_ops; 280 dev->do_ioctl = e100_ioctl; 281 dev->set_config = e100_set_config; 282 dev->tx_timeout = e100_tx_timeout; 283 284 /* Initialise the list of Etrax DMA-descriptors */ 285 286 /* Initialise receive descriptors */ 287 288 for (i = 0; i < NBR_OF_RX_DESC; i++) { 289 /* Allocate two extra cachelines to make sure that buffer used by DMA 290 * does not share cacheline with any other data (to avoid cache bug) 291 */ 292 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 293 if (!RxDescList[i].skb) 294 return -ENOMEM; 295 RxDescList[i].descr.ctrl = 0; 296 RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE; 297 RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]); 298 RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data)); 299 RxDescList[i].descr.status = 0; 300 RxDescList[i].descr.hw_len = 0; 301 prepare_rx_descriptor(&RxDescList[i].descr); 302 } 303 304 RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol; 305 RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]); 306 rx_queue_len = 0; 307 308 /* Initialize transmit descriptors */ 309 for (i = 0; i < NBR_OF_TX_DESC; i++) { 310 TxDescList[i].descr.ctrl = 0; 311 TxDescList[i].descr.sw_len = 0; 312 TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr); 313 TxDescList[i].descr.buf = 0; 314 TxDescList[i].descr.status = 0; 315 TxDescList[i].descr.hw_len = 0; 316 TxDescList[i].skb = 0; 317 } 318 319 TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol; 320 TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr); 321 322 /* Initialise initial pointers */ 323 324 myNextRxDesc = &RxDescList[0]; 325 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 326 myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 327 myFirstTxDesc = &TxDescList[0]; 328 myNextTxDesc = &TxDescList[0]; 329 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; 330 331 /* Register device */ 332 err = register_netdev(dev); 333 if (err) { 334 free_netdev(dev); 335 return err; 336 } 337 338 /* set the default MAC address */ 339 340 e100_set_mac_address(dev, &default_mac); 341 342 /* Initialize speed indicator stuff. */ 343 344 current_speed = 10; 345 current_speed_selection = 0; /* Auto */ 346 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 347 duplex_timer.data = (unsigned long)dev; 348 speed_timer.function = e100_check_speed; 349 350 clear_led_timer.function = e100_clear_network_leds; 351 352 full_duplex = 0; 353 current_duplex = autoneg; 354 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; 355 duplex_timer.data = (unsigned long)dev; 356 duplex_timer.function = e100_check_duplex; 357 358 /* Initialize mii interface */ 359 np->mii_if.phy_id = mdio_phy_addr; 360 np->mii_if.phy_id_mask = 0x1f; 361 np->mii_if.reg_num_mask = 0x1f; 362 np->mii_if.dev = dev; 363 np->mii_if.mdio_read = e100_get_mdio_reg; 364 np->mii_if.mdio_write = e100_set_mdio_reg; 365 366 /* Initialize group address registers to make sure that no */ 367 /* unwanted addresses are matched */ 368 *R_NETWORK_GA_0 = 0x00000000; 369 *R_NETWORK_GA_1 = 0x00000000; 370 return 0; 371} 372 373/* set MAC address of the interface. called from the core after a 374 * SIOCSIFADDR ioctl, and from the bootup above. 375 */ 376 377static int 378e100_set_mac_address(struct net_device *dev, void *p) 379{ 380 struct net_local *np = (struct net_local *)dev->priv; 381 struct sockaddr *addr = p; 382 int i; 383 384 spin_lock(&np->lock); /* preemption protection */ 385 386 /* remember it */ 387 388 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 389 390 /* Write it to the hardware. 391 * Note the way the address is wrapped: 392 * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24); 393 * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8); 394 */ 395 396 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | 397 (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); 398 *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); 399 *R_NETWORK_SA_2 = 0; 400 401 /* show it in the log as well */ 402 403 printk(KERN_INFO "%s: changed MAC to ", dev->name); 404 405 for (i = 0; i < 5; i++) 406 printk("%02X:", dev->dev_addr[i]); 407 408 printk("%02X\n", dev->dev_addr[i]); 409 410 spin_unlock(&np->lock); 411 412 return 0; 413} 414 415/* 416 * Open/initialize the board. This is called (in the current kernel) 417 * sometime after booting when the 'ifconfig' program is run. 418 * 419 * This routine should set everything up anew at each open, even 420 * registers that "should" only need to be set once at boot, so that 421 * there is non-reboot way to recover if something goes wrong. 422 */ 423 424static int 425e100_open(struct net_device *dev) 426{ 427 unsigned long flags; 428 429 /* enable the MDIO output pin */ 430 431 *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable); 432 433 *R_IRQ_MASK0_CLR = 434 IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | 435 IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | 436 IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); 437 438 /* clear dma0 and 1 eop and descr irq masks */ 439 *R_IRQ_MASK2_CLR = 440 IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | 441 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | 442 IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | 443 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); 444 445 /* Reset and wait for the DMA channels */ 446 447 RESET_DMA(NETWORK_TX_DMA_NBR); 448 RESET_DMA(NETWORK_RX_DMA_NBR); 449 WAIT_DMA(NETWORK_TX_DMA_NBR); 450 WAIT_DMA(NETWORK_RX_DMA_NBR); 451 452 /* Initialise the etrax network controller */ 453 454 /* allocate the irq corresponding to the receiving DMA */ 455 456 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 457 IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) { 458 goto grace_exit0; 459 } 460 461 /* allocate the irq corresponding to the transmitting DMA */ 462 463 if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0, 464 cardname, (void *)dev)) { 465 goto grace_exit1; 466 } 467 468 /* allocate the irq corresponding to the network errors etc */ 469 470 if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0, 471 cardname, (void *)dev)) { 472 goto grace_exit2; 473 } 474 475 /* give the HW an idea of what MAC address we want */ 476 477 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | 478 (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); 479 *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); 480 *R_NETWORK_SA_2 = 0; 481 482 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); 483 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); 484 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); 485 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 486 487 *R_NETWORK_GEN_CONFIG = 488 IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) | 489 IO_STATE(R_NETWORK_GEN_CONFIG, enable, on); 490 491 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 492 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none); 493 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont); 494 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable); 495 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable); 496 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable); 497 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); 498 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 499 500 save_flags(flags); 501 cli(); 502 503 /* enable the irq's for ethernet DMA */ 504 505 *R_IRQ_MASK2_SET = 506 IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | 507 IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); 508 509 *R_IRQ_MASK0_SET = 510 IO_STATE(R_IRQ_MASK0_SET, overrun, set) | 511 IO_STATE(R_IRQ_MASK0_SET, underrun, set) | 512 IO_STATE(R_IRQ_MASK0_SET, excessive_col, set); 513 514 /* make sure the irqs are cleared */ 515 516 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); 517 *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); 518 519 /* make sure the rec and transmit error counters are cleared */ 520 521 (void)*R_REC_COUNTERS; /* dummy read */ 522 (void)*R_TR_COUNTERS; /* dummy read */ 523 524 /* start the receiving DMA channel so we can receive packets from now on */ 525 526 *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc); 527 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start); 528 529 /* Set up transmit DMA channel so it can be restarted later */ 530 531 *R_DMA_CH0_FIRST = 0; 532 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); 533 534 restore_flags(flags); 535 536 /* Probe for transceiver */ 537 if (e100_probe_transceiver(dev)) 538 goto grace_exit3; 539 540 /* Start duplex/speed timers */ 541 add_timer(&speed_timer); 542 add_timer(&duplex_timer); 543 544 /* We are now ready to accept transmit requeusts from 545 * the queueing layer of the networking. 546 */ 547 netif_start_queue(dev); 548 549 return 0; 550 551grace_exit3: 552 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 553grace_exit2: 554 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); 555grace_exit1: 556 free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); 557grace_exit0: 558 return -EAGAIN; 559} 560 561 562static void 563generic_check_speed(struct net_device* dev) 564{ 565 unsigned long data; 566 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 567 if ((data & ADVERTISE_100FULL) || 568 (data & ADVERTISE_100HALF)) 569 current_speed = 100; 570 else 571 current_speed = 10; 572} 573 574static void 575tdk_check_speed(struct net_device* dev) 576{ 577 unsigned long data; 578 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 579 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); 580} 581 582static void 583broadcom_check_speed(struct net_device* dev) 584{ 585 unsigned long data; 586 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 587 current_speed = (data & MDIO_BC_SPEED ? 100 : 10); 588} 589 590static void 591intel_check_speed(struct net_device* dev) 592{ 593 unsigned long data; 594 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 595 current_speed = (data & MDIO_INT_SPEED ? 100 : 10); 596} 597 598static void 599e100_check_speed(unsigned long priv) 600{ 601 struct net_device* dev = (struct net_device*)priv; 602 static int led_initiated = 0; 603 unsigned long data; 604 int old_speed = current_speed; 605 606 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR); 607 if (!(data & BMSR_LSTATUS)) { 608 current_speed = 0; 609 } else { 610 transceiver->check_speed(dev); 611 } 612 613 if ((old_speed != current_speed) || !led_initiated) { 614 led_initiated = 1; 615 e100_set_network_leds(NO_NETWORK_ACTIVITY); 616 } 617 618 /* Reinitialize the timer. */ 619 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 620 add_timer(&speed_timer); 621} 622 623static void 624e100_negotiate(struct net_device* dev) 625{ 626 unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 627 628 /* Discard old speed and duplex settings */ 629 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | 630 ADVERTISE_10HALF | ADVERTISE_10FULL); 631 632 switch (current_speed_selection) { 633 case 10 : 634 if (current_duplex == full) 635 data |= ADVERTISE_10FULL; 636 else if (current_duplex == half) 637 data |= ADVERTISE_10HALF; 638 else 639 data |= ADVERTISE_10HALF | ADVERTISE_10FULL; 640 break; 641 642 case 100 : 643 if (current_duplex == full) 644 data |= ADVERTISE_100FULL; 645 else if (current_duplex == half) 646 data |= ADVERTISE_100HALF; 647 else 648 data |= ADVERTISE_100HALF | ADVERTISE_100FULL; 649 break; 650 651 case 0 : /* Auto */ 652 if (current_duplex == full) 653 data |= ADVERTISE_100FULL | ADVERTISE_10FULL; 654 else if (current_duplex == half) 655 data |= ADVERTISE_100HALF | ADVERTISE_10HALF; 656 else 657 data |= ADVERTISE_10HALF | ADVERTISE_10FULL | 658 ADVERTISE_100HALF | ADVERTISE_100FULL; 659 break; 660 661 default : /* assume autoneg speed and duplex */ 662 data |= ADVERTISE_10HALF | ADVERTISE_10FULL | 663 ADVERTISE_100HALF | ADVERTISE_100FULL; 664 } 665 666 e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data); 667 668 /* Renegotiate with link partner */ 669 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 670 data |= BMCR_ANENABLE | BMCR_ANRESTART; 671 672 e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data); 673} 674 675static void 676e100_set_speed(struct net_device* dev, unsigned long speed) 677{ 678 if (speed != current_speed_selection) { 679 current_speed_selection = speed; 680 e100_negotiate(dev); 681 } 682} 683 684static void 685e100_check_duplex(unsigned long priv) 686{ 687 struct net_device *dev = (struct net_device *)priv; 688 struct net_local *np = (struct net_local *)dev->priv; 689 int old_duplex = full_duplex; 690 transceiver->check_duplex(dev); 691 if (old_duplex != full_duplex) { 692 /* Duplex changed */ 693 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); 694 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 695 } 696 697 /* Reinitialize the timer. */ 698 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; 699 add_timer(&duplex_timer); 700 np->mii_if.full_duplex = full_duplex; 701} 702 703static void 704generic_check_duplex(struct net_device* dev) 705{ 706 unsigned long data; 707 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 708 if ((data & ADVERTISE_10FULL) || 709 (data & ADVERTISE_100FULL)) 710 full_duplex = 1; 711 else 712 full_duplex = 0; 713} 714 715static void 716tdk_check_duplex(struct net_device* dev) 717{ 718 unsigned long data; 719 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 720 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; 721} 722 723static void 724broadcom_check_duplex(struct net_device* dev) 725{ 726 unsigned long data; 727 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 728 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; 729} 730 731static void 732intel_check_duplex(struct net_device* dev) 733{ 734 unsigned long data; 735 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 736 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; 737} 738 739static void 740e100_set_duplex(struct net_device* dev, enum duplex new_duplex) 741{ 742 if (new_duplex != current_duplex) { 743 current_duplex = new_duplex; 744 e100_negotiate(dev); 745 } 746} 747 748static int 749e100_probe_transceiver(struct net_device* dev) 750{ 751 unsigned int phyid_high; 752 unsigned int phyid_low; 753 unsigned int oui; 754 struct transceiver_ops* ops = NULL; 755 756 /* Probe MDIO physical address */ 757 for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { 758 if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff) 759 break; 760 } 761 if (mdio_phy_addr == 32) 762 return -ENODEV; 763 764 /* Get manufacturer */ 765 phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1); 766 phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2); 767 oui = (phyid_high << 6) | (phyid_low >> 10); 768 769 for (ops = &transceivers[0]; ops->oui; ops++) { 770 if (ops->oui == oui) 771 break; 772 } 773 transceiver = ops; 774 775 return 0; 776} 777 778static int 779e100_get_mdio_reg(struct net_device *dev, int phy_id, int location) 780{ 781 unsigned short cmd; /* Data to be sent on MDIO port */ 782 int data; /* Data read from MDIO */ 783 int bitCounter; 784 785 /* Start of frame, OP Code, Physical Address, Register Address */ 786 cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) | 787 (location << 2); 788 789 e100_send_mdio_cmd(cmd, 0); 790 791 data = 0; 792 793 /* Data... */ 794 for (bitCounter=15; bitCounter>=0 ; bitCounter--) { 795 data |= (e100_receive_mdio_bit() << bitCounter); 796 } 797 798 return data; 799} 800 801static void 802e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value) 803{ 804 int bitCounter; 805 unsigned short cmd; 806 807 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) | 808 (location << 2); 809 810 e100_send_mdio_cmd(cmd, 1); 811 812 /* Data... */ 813 for (bitCounter=15; bitCounter>=0 ; bitCounter--) { 814 e100_send_mdio_bit(GET_BIT(bitCounter, value)); 815 } 816 817} 818 819static void 820e100_send_mdio_cmd(unsigned short cmd, int write_cmd) 821{ 822 int bitCounter; 823 unsigned char data = 0x2; 824 825 /* Preamble */ 826 for (bitCounter = 31; bitCounter>= 0; bitCounter--) 827 e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); 828 829 for (bitCounter = 15; bitCounter >= 2; bitCounter--) 830 e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); 831 832 /* Turnaround */ 833 for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) 834 if (write_cmd) 835 e100_send_mdio_bit(GET_BIT(bitCounter, data)); 836 else 837 e100_receive_mdio_bit(); 838} 839 840static void 841e100_send_mdio_bit(unsigned char bit) 842{ 843 *R_NETWORK_MGM_CTRL = 844 IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | 845 IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); 846 udelay(1); 847 *R_NETWORK_MGM_CTRL = 848 IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | 849 IO_MASK(R_NETWORK_MGM_CTRL, mdck) | 850 IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); 851 udelay(1); 852} 853 854static unsigned char 855e100_receive_mdio_bit() 856{ 857 unsigned char bit; 858 *R_NETWORK_MGM_CTRL = 0; 859 bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); 860 udelay(1); 861 *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); 862 udelay(1); 863 return bit; 864} 865 866static void 867e100_reset_transceiver(struct net_device* dev) 868{ 869 unsigned short cmd; 870 unsigned short data; 871 int bitCounter; 872 873 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 874 875 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2); 876 877 e100_send_mdio_cmd(cmd, 1); 878 879 data |= 0x8000; 880 881 for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { 882 e100_send_mdio_bit(GET_BIT(bitCounter, data)); 883 } 884} 885 886/* Called by upper layers if they decide it took too long to complete 887 * sending a packet - we need to reset and stuff. 888 */ 889 890static void 891e100_tx_timeout(struct net_device *dev) 892{ 893 struct net_local *np = (struct net_local *)dev->priv; 894 unsigned long flags; 895 896 spin_lock_irqsave(&np->lock, flags); 897 898 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, 899 tx_done(dev) ? "IRQ problem" : "network cable problem"); 900 901 /* remember we got an error */ 902 903 np->stats.tx_errors++; 904 905 /* reset the TX DMA in case it has hung on something */ 906 907 RESET_DMA(NETWORK_TX_DMA_NBR); 908 WAIT_DMA(NETWORK_TX_DMA_NBR); 909 910 /* Reset the transceiver. */ 911 912 e100_reset_transceiver(dev); 913 914 /* and get rid of the packets that never got an interrupt */ 915 while (myFirstTxDesc != myNextTxDesc) 916 { 917 dev_kfree_skb(myFirstTxDesc->skb); 918 myFirstTxDesc->skb = 0; 919 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 920 } 921 922 /* Set up transmit DMA channel so it can be restarted later */ 923 *R_DMA_CH0_FIRST = 0; 924 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); 925 926 /* tell the upper layers we're ok again */ 927 928 netif_wake_queue(dev); 929 spin_unlock_irqrestore(&np->lock, flags); 930} 931 932 933/* This will only be invoked if the driver is _not_ in XOFF state. 934 * What this means is that we need not check it, and that this 935 * invariant will hold if we make sure that the netif_*_queue() 936 * calls are done at the proper times. 937 */ 938 939static int 940e100_send_packet(struct sk_buff *skb, struct net_device *dev) 941{ 942 struct net_local *np = (struct net_local *)dev->priv; 943 unsigned char *buf = skb->data; 944 unsigned long flags; 945 946#ifdef ETHDEBUG 947 printk("send packet len %d\n", length); 948#endif 949 spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */ 950 951 myNextTxDesc->skb = skb; 952 953 dev->trans_start = jiffies; 954 955 e100_hardware_send_packet(buf, skb->len); 956 957 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); 958 959 /* Stop queue if full */ 960 if (myNextTxDesc == myFirstTxDesc) { 961 netif_stop_queue(dev); 962 } 963 964 spin_unlock_irqrestore(&np->lock, flags); 965 966 return 0; 967} 968 969/* 970 * The typical workload of the driver: 971 * Handle the network interface interrupts. 972 */ 973 974static irqreturn_t 975e100rxtx_interrupt(int irq, void *dev_id) 976{ 977 struct net_device *dev = (struct net_device *)dev_id; 978 struct net_local *np = (struct net_local *)dev->priv; 979 unsigned long irqbits = *R_IRQ_MASK2_RD; 980 981 /* Disable RX/TX IRQs to avoid reentrancy */ 982 *R_IRQ_MASK2_CLR = 983 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | 984 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); 985 986 /* Handle received packets */ 987 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { 988 /* acknowledge the eop interrupt */ 989 990 *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); 991 992 /* check if one or more complete packets were indeed received */ 993 994 while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) && 995 (myNextRxDesc != myLastRxDesc)) { 996 /* Take out the buffer and give it to the OS, then 997 * allocate a new buffer to put a packet in. 998 */ 999 e100_rx(dev); 1000 ((struct net_local *)dev->priv)->stats.rx_packets++; 1001 /* restart/continue on the channel, for safety */ 1002 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1003 /* clear dma channel 1 eop/descr irq bits */ 1004 *R_DMA_CH1_CLR_INTR = 1005 IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) | 1006 IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do); 1007 1008 /* now, we might have gotten another packet 1009 so we have to loop back and check if so */ 1010 } 1011 } 1012 1013 /* Report any packets that have been sent */ 1014 while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && 1015 myFirstTxDesc != myNextTxDesc) 1016 { 1017 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1018 np->stats.tx_packets++; 1019 1020 /* dma is ready with the transmission of the data in tx_skb, so now 1021 we can release the skb memory */ 1022 dev_kfree_skb_irq(myFirstTxDesc->skb); 1023 myFirstTxDesc->skb = 0; 1024 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 1025 } 1026 1027 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { 1028 /* acknowledge the eop interrupt and wake up queue */ 1029 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); 1030 netif_wake_queue(dev); 1031 } 1032 1033 /* Enable RX/TX IRQs again */ 1034 *R_IRQ_MASK2_SET = 1035 IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | 1036 IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); 1037 1038 return IRQ_HANDLED; 1039} 1040 1041static irqreturn_t 1042e100nw_interrupt(int irq, void *dev_id) 1043{ 1044 struct net_device *dev = (struct net_device *)dev_id; 1045 struct net_local *np = (struct net_local *)dev->priv; 1046 unsigned long irqbits = *R_IRQ_MASK0_RD; 1047 1048 /* check for underrun irq */ 1049 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) { 1050 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1051 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1052 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1053 np->stats.tx_errors++; 1054 D(printk("ethernet receiver underrun!\n")); 1055 } 1056 1057 /* check for overrun irq */ 1058 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { 1059 update_rx_stats(&np->stats); /* this will ack the irq */ 1060 D(printk("ethernet receiver overrun!\n")); 1061 } 1062 /* check for excessive collision irq */ 1063 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) { 1064 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1065 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1066 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1067 *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr); 1068 np->stats.tx_errors++; 1069 D(printk("ethernet excessive collisions!\n")); 1070 } 1071 return IRQ_HANDLED; 1072} 1073 1074/* We have a good packet(s), get it/them out of the buffers. */ 1075static void 1076e100_rx(struct net_device *dev) 1077{ 1078 struct sk_buff *skb; 1079 int length = 0; 1080 struct net_local *np = (struct net_local *)dev->priv; 1081 unsigned char *skb_data_ptr; 1082#ifdef ETHDEBUG 1083 int i; 1084#endif 1085 1086 if (!led_active && time_after(jiffies, led_next_time)) { 1087 /* light the network leds depending on the current speed. */ 1088 e100_set_network_leds(NETWORK_ACTIVITY); 1089 1090 /* Set the earliest time we may clear the LED */ 1091 led_next_time = jiffies + NET_FLASH_TIME; 1092 led_active = 1; 1093 mod_timer(&clear_led_timer, jiffies + HZ/10); 1094 } 1095 1096 length = myNextRxDesc->descr.hw_len - 4; 1097 ((struct net_local *)dev->priv)->stats.rx_bytes += length; 1098 1099#ifdef ETHDEBUG 1100 printk("Got a packet of length %d:\n", length); 1101 /* dump the first bytes in the packet */ 1102 skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf); 1103 for (i = 0; i < 8; i++) { 1104 printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8, 1105 skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3], 1106 skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]); 1107 skb_data_ptr += 8; 1108 } 1109#endif 1110 1111 if (length < RX_COPYBREAK) { 1112 /* Small packet, copy data */ 1113 skb = dev_alloc_skb(length - ETHER_HEAD_LEN); 1114 if (!skb) { 1115 np->stats.rx_errors++; 1116 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1117 return; 1118 } 1119 1120 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ 1121 skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */ 1122 1123#ifdef ETHDEBUG 1124 printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", 1125 skb->head, skb->data, skb_tail_pointer(skb), 1126 skb_end_pointer(skb)); 1127 printk("copying packet to 0x%x.\n", skb_data_ptr); 1128#endif 1129 1130 memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length); 1131 } 1132 else { 1133 /* Large packet, send directly to upper layers and allocate new 1134 * memory (aligned to cache line boundary to avoid bug). 1135 * Before sending the skb to upper layers we must make sure that 1136 * skb->data points to the aligned start of the packet. 1137 */ 1138 int align; 1139 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1140 if (!new_skb) { 1141 np->stats.rx_errors++; 1142 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1143 return; 1144 } 1145 skb = myNextRxDesc->skb; 1146 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; 1147 skb_put(skb, length + align); 1148 skb_pull(skb, align); /* Remove alignment bytes */ 1149 myNextRxDesc->skb = new_skb; 1150 myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); 1151 } 1152 1153 skb->protocol = eth_type_trans(skb, dev); 1154 1155 /* Send the packet to the upper layers */ 1156 netif_rx(skb); 1157 1158 /* Prepare for next packet */ 1159 myNextRxDesc->descr.status = 0; 1160 myPrevRxDesc = myNextRxDesc; 1161 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); 1162 1163 rx_queue_len++; 1164 1165 /* Check if descriptors should be returned */ 1166 if (rx_queue_len == RX_QUEUE_THRESHOLD) { 1167 flush_etrax_cache(); 1168 myPrevRxDesc->descr.ctrl |= d_eol; 1169 myLastRxDesc->descr.ctrl &= ~d_eol; 1170 myLastRxDesc = myPrevRxDesc; 1171 rx_queue_len = 0; 1172 } 1173} 1174 1175/* The inverse routine to net_open(). */ 1176static int 1177e100_close(struct net_device *dev) 1178{ 1179 struct net_local *np = (struct net_local *)dev->priv; 1180 1181 printk(KERN_INFO "Closing %s.\n", dev->name); 1182 1183 netif_stop_queue(dev); 1184 1185 *R_IRQ_MASK0_CLR = 1186 IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | 1187 IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | 1188 IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); 1189 1190 *R_IRQ_MASK2_CLR = 1191 IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | 1192 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | 1193 IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | 1194 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); 1195 1196 /* Stop the receiver and the transmitter */ 1197 1198 RESET_DMA(NETWORK_TX_DMA_NBR); 1199 RESET_DMA(NETWORK_RX_DMA_NBR); 1200 1201 /* Flush the Tx and disable Rx here. */ 1202 1203 free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); 1204 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); 1205 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 1206 1207 /* Update the statistics here. */ 1208 1209 update_rx_stats(&np->stats); 1210 update_tx_stats(&np->stats); 1211 1212 /* Stop speed/duplex timers */ 1213 del_timer(&speed_timer); 1214 del_timer(&duplex_timer); 1215 1216 return 0; 1217} 1218 1219static int 1220e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1221{ 1222 struct mii_ioctl_data *data = if_mii(ifr); 1223 struct net_local *np = netdev_priv(dev); 1224 1225 spin_lock(&np->lock); /* Preempt protection */ 1226 switch (cmd) { 1227 case SIOCGMIIPHY: /* Get PHY address */ 1228 data->phy_id = mdio_phy_addr; 1229 break; 1230 case SIOCGMIIREG: /* Read MII register */ 1231 data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num); 1232 break; 1233 case SIOCSMIIREG: /* Write MII register */ 1234 e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in); 1235 break; 1236 /* The ioctls below should be considered obsolete but are */ 1237 /* still present for compatability with old scripts/apps */ 1238 case SET_ETH_SPEED_10: /* 10 Mbps */ 1239 e100_set_speed(dev, 10); 1240 break; 1241 case SET_ETH_SPEED_100: /* 100 Mbps */ 1242 e100_set_speed(dev, 100); 1243 break; 1244 case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */ 1245 e100_set_speed(dev, 0); 1246 break; 1247 case SET_ETH_DUPLEX_HALF: /* Half duplex. */ 1248 e100_set_duplex(dev, half); 1249 break; 1250 case SET_ETH_DUPLEX_FULL: /* Full duplex. */ 1251 e100_set_duplex(dev, full); 1252 break; 1253 case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/ 1254 e100_set_duplex(dev, autoneg); 1255 break; 1256 default: 1257 return -EINVAL; 1258 } 1259 spin_unlock(&np->lock); 1260 return 0; 1261} 1262 1263static int e100_set_settings(struct net_device *dev, 1264 struct ethtool_cmd *ecmd) 1265{ 1266 ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | 1267 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1268 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; 1269 ecmd->port = PORT_TP; 1270 ecmd->transceiver = XCVR_EXTERNAL; 1271 ecmd->phy_address = mdio_phy_addr; 1272 ecmd->speed = current_speed; 1273 ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1274 ecmd->advertising = ADVERTISED_TP; 1275 1276 if (current_duplex == autoneg && current_speed_selection == 0) 1277 ecmd->advertising |= ADVERTISED_Autoneg; 1278 else { 1279 ecmd->advertising |= 1280 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 1281 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 1282 if (current_speed_selection == 10) 1283 ecmd->advertising &= ~(ADVERTISED_100baseT_Half | 1284 ADVERTISED_100baseT_Full); 1285 else if (current_speed_selection == 100) 1286 ecmd->advertising &= ~(ADVERTISED_10baseT_Half | 1287 ADVERTISED_10baseT_Full); 1288 if (current_duplex == half) 1289 ecmd->advertising &= ~(ADVERTISED_10baseT_Full | 1290 ADVERTISED_100baseT_Full); 1291 else if (current_duplex == full) 1292 ecmd->advertising &= ~(ADVERTISED_10baseT_Half | 1293 ADVERTISED_100baseT_Half); 1294 } 1295 1296 ecmd->autoneg = AUTONEG_ENABLE; 1297 return 0; 1298} 1299 1300static int e100_set_settings(struct net_device *dev, 1301 struct ethtool_cmd *ecmd) 1302{ 1303 if (ecmd->autoneg == AUTONEG_ENABLE) { 1304 e100_set_duplex(dev, autoneg); 1305 e100_set_speed(dev, 0); 1306 } else { 1307 e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full); 1308 e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100); 1309 } 1310 1311 return 0; 1312} 1313 1314static void e100_get_drvinfo(struct net_device *dev, 1315 struct ethtool_drvinfo *info) 1316{ 1317 strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1); 1318 strncpy(info->version, "$Revision: 1.1.1.1 $", sizeof(info->version) - 1); 1319 strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1); 1320 strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1); 1321} 1322 1323static int e100_nway_reset(struct net_device *dev) 1324{ 1325 if (current_duplex == autoneg && current_speed_selection == 0) 1326 e100_negotiate(dev); 1327 return 0; 1328} 1329 1330static const struct ethtool_ops e100_ethtool_ops = { 1331 .get_settings = e100_get_settings, 1332 .set_settings = e100_set_settings, 1333 .get_drvinfo = e100_get_drvinfo, 1334 .nway_reset = e100_nway_reset, 1335 .get_link = ethtool_op_get_link, 1336}; 1337 1338static int 1339e100_set_config(struct net_device *dev, struct ifmap *map) 1340{ 1341 struct net_local *np = (struct net_local *)dev->priv; 1342 spin_lock(&np->lock); /* Preempt protection */ 1343 1344 switch(map->port) { 1345 case IF_PORT_UNKNOWN: 1346 /* Use autoneg */ 1347 e100_set_speed(dev, 0); 1348 e100_set_duplex(dev, autoneg); 1349 break; 1350 case IF_PORT_10BASET: 1351 e100_set_speed(dev, 10); 1352 e100_set_duplex(dev, autoneg); 1353 break; 1354 case IF_PORT_100BASET: 1355 case IF_PORT_100BASETX: 1356 e100_set_speed(dev, 100); 1357 e100_set_duplex(dev, autoneg); 1358 break; 1359 case IF_PORT_100BASEFX: 1360 case IF_PORT_10BASE2: 1361 case IF_PORT_AUI: 1362 spin_unlock(&np->lock); 1363 return -EOPNOTSUPP; 1364 break; 1365 default: 1366 printk(KERN_ERR "%s: Invalid media selected", dev->name); 1367 spin_unlock(&np->lock); 1368 return -EINVAL; 1369 } 1370 spin_unlock(&np->lock); 1371 return 0; 1372} 1373 1374static void 1375update_rx_stats(struct net_device_stats *es) 1376{ 1377 unsigned long r = *R_REC_COUNTERS; 1378 /* update stats relevant to reception errors */ 1379 es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r); 1380 es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r); 1381 es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r); 1382 es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r); 1383} 1384 1385static void 1386update_tx_stats(struct net_device_stats *es) 1387{ 1388 unsigned long r = *R_TR_COUNTERS; 1389 /* update stats relevant to transmission errors */ 1390 es->collisions += 1391 IO_EXTRACT(R_TR_COUNTERS, single_col, r) + 1392 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); 1393 es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r); 1394} 1395 1396/* 1397 * Get the current statistics. 1398 * This may be called with the card open or closed. 1399 */ 1400static struct net_device_stats * 1401e100_get_stats(struct net_device *dev) 1402{ 1403 struct net_local *lp = (struct net_local *)dev->priv; 1404 unsigned long flags; 1405 spin_lock_irqsave(&lp->lock, flags); 1406 1407 update_rx_stats(&lp->stats); 1408 update_tx_stats(&lp->stats); 1409 1410 spin_unlock_irqrestore(&lp->lock, flags); 1411 return &lp->stats; 1412} 1413 1414/* 1415 * Set or clear the multicast filter for this adaptor. 1416 * num_addrs == -1 Promiscuous mode, receive all packets 1417 * num_addrs == 0 Normal mode, clear multicast list 1418 * num_addrs > 0 Multicast mode, receive normal and MC packets, 1419 * and do best-effort filtering. 1420 */ 1421static void 1422set_multicast_list(struct net_device *dev) 1423{ 1424 struct net_local *lp = (struct net_local *)dev->priv; 1425 int num_addr = dev->mc_count; 1426 unsigned long int lo_bits; 1427 unsigned long int hi_bits; 1428 spin_lock(&lp->lock); 1429 if (dev->flags & IFF_PROMISC) 1430 { 1431 /* promiscuous mode */ 1432 lo_bits = 0xfffffffful; 1433 hi_bits = 0xfffffffful; 1434 1435 /* Enable individual receive */ 1436 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive); 1437 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 1438 } else if (dev->flags & IFF_ALLMULTI) { 1439 /* enable all multicasts */ 1440 lo_bits = 0xfffffffful; 1441 hi_bits = 0xfffffffful; 1442 1443 /* Disable individual receive */ 1444 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); 1445 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 1446 } else if (num_addr == 0) { 1447 /* Normal, clear the mc list */ 1448 lo_bits = 0x00000000ul; 1449 hi_bits = 0x00000000ul; 1450 1451 /* Disable individual receive */ 1452 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); 1453 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 1454 } else { 1455 /* MC mode, receive normal and MC packets */ 1456 char hash_ix; 1457 struct dev_mc_list *dmi = dev->mc_list; 1458 int i; 1459 char *baddr; 1460 lo_bits = 0x00000000ul; 1461 hi_bits = 0x00000000ul; 1462 for (i=0; i<num_addr; i++) { 1463 /* Calculate the hash index for the GA registers */ 1464 1465 hash_ix = 0; 1466 baddr = dmi->dmi_addr; 1467 hash_ix ^= (*baddr) & 0x3f; 1468 hash_ix ^= ((*baddr) >> 6) & 0x03; 1469 ++baddr; 1470 hash_ix ^= ((*baddr) << 2) & 0x03c; 1471 hash_ix ^= ((*baddr) >> 4) & 0xf; 1472 ++baddr; 1473 hash_ix ^= ((*baddr) << 4) & 0x30; 1474 hash_ix ^= ((*baddr) >> 2) & 0x3f; 1475 ++baddr; 1476 hash_ix ^= (*baddr) & 0x3f; 1477 hash_ix ^= ((*baddr) >> 6) & 0x03; 1478 ++baddr; 1479 hash_ix ^= ((*baddr) << 2) & 0x03c; 1480 hash_ix ^= ((*baddr) >> 4) & 0xf; 1481 ++baddr; 1482 hash_ix ^= ((*baddr) << 4) & 0x30; 1483 hash_ix ^= ((*baddr) >> 2) & 0x3f; 1484 1485 hash_ix &= 0x3f; 1486 1487 if (hash_ix >= 32) { 1488 hi_bits |= (1 << (hash_ix-32)); 1489 } 1490 else { 1491 lo_bits |= (1 << hash_ix); 1492 } 1493 dmi = dmi->next; 1494 } 1495 /* Disable individual receive */ 1496 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); 1497 *R_NETWORK_REC_CONFIG = network_rec_config_shadow; 1498 } 1499 *R_NETWORK_GA_0 = lo_bits; 1500 *R_NETWORK_GA_1 = hi_bits; 1501 spin_unlock(&lp->lock); 1502} 1503 1504void 1505e100_hardware_send_packet(char *buf, int length) 1506{ 1507 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); 1508 1509 if (!led_active && time_after(jiffies, led_next_time)) { 1510 /* light the network leds depending on the current speed. */ 1511 e100_set_network_leds(NETWORK_ACTIVITY); 1512 1513 /* Set the earliest time we may clear the LED */ 1514 led_next_time = jiffies + NET_FLASH_TIME; 1515 led_active = 1; 1516 mod_timer(&clear_led_timer, jiffies + HZ/10); 1517 } 1518 1519 /* configure the tx dma descriptor */ 1520 myNextTxDesc->descr.sw_len = length; 1521 myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait; 1522 myNextTxDesc->descr.buf = virt_to_phys(buf); 1523 1524 /* Move end of list */ 1525 myLastTxDesc->descr.ctrl &= ~d_eol; 1526 myLastTxDesc = myNextTxDesc; 1527 1528 /* Restart DMA channel */ 1529 *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart); 1530} 1531 1532static void 1533e100_clear_network_leds(unsigned long dummy) 1534{ 1535 if (led_active && time_after(jiffies, led_next_time)) { 1536 e100_set_network_leds(NO_NETWORK_ACTIVITY); 1537 1538 /* Set the earliest time we may set the LED */ 1539 led_next_time = jiffies + NET_FLASH_PAUSE; 1540 led_active = 0; 1541 } 1542} 1543 1544static void 1545e100_set_network_leds(int active) 1546{ 1547#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK) 1548 int light_leds = (active == NO_NETWORK_ACTIVITY); 1549#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY) 1550 int light_leds = (active == NETWORK_ACTIVITY); 1551#else 1552#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY" 1553#endif 1554 1555 if (!current_speed) { 1556 /* Make LED red, link is down */ 1557#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION) 1558 LED_NETWORK_SET(LED_RED); 1559#else 1560 LED_NETWORK_SET(LED_OFF); 1561#endif 1562 } 1563 else if (light_leds) { 1564 if (current_speed == 10) { 1565 LED_NETWORK_SET(LED_ORANGE); 1566 } else { 1567 LED_NETWORK_SET(LED_GREEN); 1568 } 1569 } 1570 else { 1571 LED_NETWORK_SET(LED_OFF); 1572 } 1573} 1574 1575static int 1576etrax_init_module(void) 1577{ 1578 return etrax_ethernet_init(); 1579} 1580 1581static int __init 1582e100_boot_setup(char* str) 1583{ 1584 struct sockaddr sa = {0}; 1585 int i; 1586 1587 /* Parse the colon separated Ethernet station address */ 1588 for (i = 0; i < ETH_ALEN; i++) { 1589 unsigned int tmp; 1590 if (sscanf(str + 3*i, "%2x", &tmp) != 1) { 1591 printk(KERN_WARNING "Malformed station address"); 1592 return 0; 1593 } 1594 sa.sa_data[i] = (char)tmp; 1595 } 1596 1597 default_mac = sa; 1598 return 1; 1599} 1600 1601__setup("etrax100_eth=", e100_boot_setup); 1602 1603module_init(etrax_init_module); 1604