• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/

Lines Matching refs:gp

139 static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
149 writel(cmd, gp->regs + MIF_FRAME);
152 cmd = readl(gp->regs + MIF_FRAME);
167 struct gem *gp = dev->priv;
168 return __phy_read(gp, mii_id, reg);
171 static inline u16 phy_read(struct gem *gp, int reg)
173 return __phy_read(gp, gp->mii_phy_addr, reg);
176 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
187 writel(cmd, gp->regs + MIF_FRAME);
190 cmd = readl(gp->regs + MIF_FRAME);
200 struct gem *gp = dev->priv;
201 __phy_write(gp, mii_id, reg, val & 0xffff);
204 static inline void phy_write(struct gem *gp, int reg, u16 val)
206 __phy_write(gp, gp->mii_phy_addr, reg, val);
209 static inline void gem_enable_ints(struct gem *gp)
212 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
215 static inline void gem_disable_ints(struct gem *gp)
218 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
221 static void gem_get_cell(struct gem *gp)
223 BUG_ON(gp->cell_enabled < 0);
224 gp->cell_enabled++;
226 if (gp->cell_enabled == 1) {
228 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
235 static void gem_put_cell(struct gem *gp)
237 BUG_ON(gp->cell_enabled <= 0);
238 gp->cell_enabled--;
240 if (gp->cell_enabled == 0) {
242 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
248 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
250 if (netif_msg_intr(gp))
251 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
254 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
256 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
259 if (netif_msg_intr(gp))
261 gp->dev->name, pcs_istat);
273 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
276 (readl(gp->regs + PCS_MIISTAT) &
294 netif_carrier_on(gp->dev);
298 netif_carrier_off(gp->dev);
302 if (!timer_pending(&gp->link_timer))
309 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
311 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
313 if (netif_msg_intr(gp))
315 gp->dev->name, txmac_stat);
327 gp->net_stats.tx_fifo_errors++;
333 gp->net_stats.tx_errors++;
340 gp->net_stats.collisions += 0x10000;
343 gp->net_stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000;
348 gp->net_stats.tx_aborted_errors += 0x10000;
349 gp->net_stats.collisions += 0x10000;
364 static int gem_rxmac_reset(struct gem *gp)
366 struct net_device *dev = gp->dev;
372 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
374 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
384 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
385 gp->regs + MAC_RXCFG);
387 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
398 writel(0, gp->regs + RXDMA_CFG);
400 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
413 writel(gp->swrst_base | GREG_SWRST_RXRST,
414 gp->regs + GREG_SWRST);
416 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
428 struct gem_rxd *rxd = &gp->init_block->rxd[i];
430 if (gp->rx_skbs[i] == NULL) {
436 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
438 gp->rx_new = gp->rx_old = 0;
441 desc_dma = (u64) gp->gblock_dvma;
443 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
444 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
445 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
448 writel(val, gp->regs + RXDMA_CFG);
449 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
452 gp->regs + RXDMA_BLANK);
456 gp->regs + RXDMA_BLANK);
457 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
458 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
459 writel(val, gp->regs + RXDMA_PTHRESH);
460 val = readl(gp->regs + RXDMA_CFG);
461 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
462 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
463 val = readl(gp->regs + MAC_RXCFG);
464 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
469 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
471 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
474 if (netif_msg_intr(gp))
476 gp->dev->name, rxmac_stat);
479 u32 smac = readl(gp->regs + MAC_SMACHINE);
483 gp->net_stats.rx_over_errors++;
484 gp->net_stats.rx_fifo_errors++;
486 ret = gem_rxmac_reset(gp);
490 gp->net_stats.rx_frame_errors += 0x10000;
493 gp->net_stats.rx_crc_errors += 0x10000;
496 gp->net_stats.rx_length_errors += 0x10000;
504 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
506 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
508 if (netif_msg_intr(gp))
510 gp->dev->name, mac_cstat);
517 gp->pause_entered++;
520 gp->pause_last_time_recvd = (mac_cstat >> 16);
525 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
527 u32 mif_status = readl(gp->regs + MIF_STATUS);
533 gem_handle_mif_event(gp, reg_val, changed_bits);
538 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
540 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
542 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
543 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
565 pci_read_config_word(gp->pdev, PCI_STATUS,
595 pci_write_config_word(gp->pdev,
608 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
612 if (netif_msg_rx_err(gp))
614 gp->dev->name);
615 gp->net_stats.rx_dropped++;
620 if (netif_msg_rx_err(gp))
622 gp->dev->name);
623 gp->net_stats.rx_errors++;
629 if (gem_pcs_interrupt(dev, gp, gem_status))
634 if (gem_txmac_interrupt(dev, gp, gem_status))
639 if (gem_rxmac_interrupt(dev, gp, gem_status))
644 if (gem_mac_interrupt(dev, gp, gem_status))
649 if (gem_mif_interrupt(dev, gp, gem_status))
654 if (gem_pci_interrupt(dev, gp, gem_status))
661 gp->reset_task_pending = 1;
662 schedule_work(&gp->reset_task);
667 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
671 if (netif_msg_intr(gp))
673 gp->dev->name, gem_status);
675 entry = gp->tx_old;
684 if (netif_msg_tx_done(gp))
686 gp->dev->name, entry);
687 skb = gp->tx_skbs[entry];
704 gp->tx_skbs[entry] = NULL;
705 gp->net_stats.tx_bytes += skb->len;
708 txd = &gp->init_block->txd[entry];
713 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
717 gp->net_stats.tx_packets++;
720 gp->tx_old = entry;
723 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
727 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
731 cluster_start = curr = (gp->rx_new & ~(4 - 1));
739 &gp->init_block->rxd[cluster_start];
741 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
753 writel(kick, gp->regs + RXDMA_KICK);
757 static int gem_rx(struct gem *gp, int work_to_do)
762 if (netif_msg_rx_status(gp))
764 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
766 entry = gp->rx_new;
768 done = readl(gp->regs + RXDMA_DONE);
770 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
790 done = readl(gp->regs + RXDMA_DONE);
798 skb = gp->rx_skbs[entry];
802 gp->net_stats.rx_errors++;
804 gp->net_stats.rx_length_errors++;
806 gp->net_stats.rx_crc_errors++;
810 gp->net_stats.rx_dropped++;
818 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
823 pci_unmap_page(gp->pdev, dma_addr,
824 RX_BUF_ALLOC_SIZE(gp),
826 gp->rx_skbs[entry] = new_skb;
827 new_skb->dev = gp->dev;
828 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
829 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
832 RX_BUF_ALLOC_SIZE(gp),
848 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
850 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
858 skb->protocol = eth_type_trans(skb, gp->dev);
862 gp->net_stats.rx_packets++;
863 gp->net_stats.rx_bytes += len;
864 gp->dev->last_rx = jiffies;
870 gem_post_rxds(gp, entry);
872 gp->rx_new = entry;
876 gp->dev->name);
883 struct gem *gp = dev->priv;
889 spin_lock_irqsave(&gp->lock, flags);
895 if (gp->status & GREG_STAT_ABNORMAL) {
896 if (gem_abnormal_irq(dev, gp, gp->status))
901 spin_lock(&gp->tx_lock);
902 gem_tx(dev, gp, gp->status);
903 spin_unlock(&gp->tx_lock);
905 spin_unlock_irqrestore(&gp->lock, flags);
914 work_done = gem_rx(gp, work_to_do);
922 spin_lock_irqsave(&gp->lock, flags);
924 gp->status = readl(gp->regs + GREG_STAT);
925 } while (gp->status & GREG_STAT_NAPI);
928 gem_enable_ints(gp);
930 spin_unlock_irqrestore(&gp->lock, flags);
937 struct gem *gp = dev->priv;
944 if (!gp->running)
947 spin_lock_irqsave(&gp->lock, flags);
950 u32 gem_status = readl(gp->regs + GREG_STAT);
954 spin_unlock_irqrestore(&gp->lock, flags);
957 gp->status = gem_status;
958 gem_disable_ints(gp);
962 spin_unlock_irqrestore(&gp->lock, flags);
983 struct gem *gp = dev->priv;
986 if (!gp->running) {
992 readl(gp->regs + TXDMA_CFG),
993 readl(gp->regs + MAC_TXSTAT),
994 readl(gp->regs + MAC_TXCFG));
997 readl(gp->regs + RXDMA_CFG),
998 readl(gp->regs + MAC_RXSTAT),
999 readl(gp->regs + MAC_RXCFG));
1001 spin_lock_irq(&gp->lock);
1002 spin_lock(&gp->tx_lock);
1004 gp->reset_task_pending = 1;
1005 schedule_work(&gp->reset_task);
1007 spin_unlock(&gp->tx_lock);
1008 spin_unlock_irq(&gp->lock);
1022 struct gem *gp = dev->priv;
1038 if (!spin_trylock(&gp->tx_lock)) {
1044 if (!gp->running) {
1045 spin_unlock_irqrestore(&gp->tx_lock, flags);
1050 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1052 spin_unlock_irqrestore(&gp->tx_lock, flags);
1058 entry = gp->tx_new;
1059 gp->tx_skbs[entry] = skb;
1062 struct gem_txd *txd = &gp->init_block->txd[entry];
1067 mapping = pci_map_page(gp->pdev,
1093 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1105 mapping = pci_map_page(gp->pdev,
1113 txd = &gp->init_block->txd[entry];
1123 txd = &gp->init_block->txd[first_entry];
1130 gp->tx_new = entry;
1131 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1134 if (netif_msg_tx_queued(gp))
1138 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1139 spin_unlock_irqrestore(&gp->tx_lock, flags);
1148 /* Must be invoked under gp->lock and gp->tx_lock. */
1149 static void gem_reset(struct gem *gp)
1155 writel(0xffffffff, gp->regs + GREG_IMASK);
1158 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1159 gp->regs + GREG_SWRST);
1165 val = readl(gp->regs + GREG_SWRST);
1171 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1174 /* Must be invoked under gp->lock and gp->tx_lock. */
1175 static void gem_start_dma(struct gem *gp)
1180 val = readl(gp->regs + TXDMA_CFG);
1181 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1182 val = readl(gp->regs + RXDMA_CFG);
1183 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1184 val = readl(gp->regs + MAC_TXCFG);
1185 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1186 val = readl(gp->regs + MAC_RXCFG);
1187 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1189 (void) readl(gp->regs + MAC_RXCFG);
1192 gem_enable_ints(gp);
1194 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1197 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1200 static void gem_stop_dma(struct gem *gp)
1205 val = readl(gp->regs + TXDMA_CFG);
1206 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1207 val = readl(gp->regs + RXDMA_CFG);
1208 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1209 val = readl(gp->regs + MAC_TXCFG);
1210 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1211 val = readl(gp->regs + MAC_RXCFG);
1212 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1214 (void) readl(gp->regs + MAC_RXCFG);
1220 /* Must be invoked under gp->lock and gp->tx_lock. */
1221 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1228 if (gp->phy_type != phy_mii_mdio0 &&
1229 gp->phy_type != phy_mii_mdio1)
1233 if (found_mii_phy(gp))
1234 features = gp->phy_mii.def->features;
1239 if (gp->phy_mii.advertising != 0)
1240 advertise &= gp->phy_mii.advertising;
1242 autoneg = gp->want_autoneg;
1243 speed = gp->phy_mii.speed;
1244 duplex = gp->phy_mii.duplex;
1279 if (gp->asleep) {
1280 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1281 gp->phy_mii.speed = speed;
1282 gp->phy_mii.duplex = duplex;
1287 gp->want_autoneg = autoneg;
1289 if (found_mii_phy(gp))
1290 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1291 gp->lstate = link_aneg;
1293 if (found_mii_phy(gp))
1294 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1295 gp->lstate = link_force_ok;
1299 gp->timer_ticks = 0;
1300 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1306 * Must be invoked under gp->lock and gp->tx_lock.
1308 static int gem_set_link_modes(struct gem *gp)
1317 if (found_mii_phy(gp)) {
1318 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1320 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1321 speed = gp->phy_mii.speed;
1322 pause = gp->phy_mii.pause;
1323 } else if (gp->phy_type == phy_serialink ||
1324 gp->phy_type == phy_serdes) {
1325 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1332 if (netif_msg_link(gp))
1334 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1336 if (!gp->running)
1345 writel(val, gp->regs + MAC_TXCFG);
1349 (gp->phy_type == phy_mii_mdio0 ||
1350 gp->phy_type == phy_mii_mdio1)) {
1359 writel(val, gp->regs + MAC_XIFCFG);
1365 val = readl(gp->regs + MAC_TXCFG);
1366 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1368 val = readl(gp->regs + MAC_RXCFG);
1369 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1371 val = readl(gp->regs + MAC_TXCFG);
1372 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1374 val = readl(gp->regs + MAC_RXCFG);
1375 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1378 if (gp->phy_type == phy_serialink ||
1379 gp->phy_type == phy_serdes) {
1380 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1386 if (netif_msg_link(gp)) {
1390 gp->dev->name,
1391 gp->rx_fifo_sz,
1392 gp->rx_pause_off,
1393 gp->rx_pause_on);
1396 gp->dev->name);
1401 writel(512, gp->regs + MAC_STIME);
1403 writel(64, gp->regs + MAC_STIME);
1404 val = readl(gp->regs + MAC_MCCFG);
1409 writel(val, gp->regs + MAC_MCCFG);
1411 gem_start_dma(gp);
1416 /* Must be invoked under gp->lock and gp->tx_lock. */
1417 static int gem_mdio_link_not_up(struct gem *gp)
1419 switch (gp->lstate) {
1421 if (netif_msg_link(gp))
1423 " forced mode\n", gp->dev->name);
1424 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1425 gp->last_forced_speed, DUPLEX_HALF);
1426 gp->timer_ticks = 5;
1427 gp->lstate = link_force_ok;
1434 if (gp->phy_mii.def->magic_aneg)
1436 if (netif_msg_link(gp))
1438 gp->dev->name);
1440 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1442 gp->timer_ticks = 5;
1443 gp->lstate = link_force_try;
1450 if (gp->phy_mii.speed == SPEED_100) {
1451 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1453 gp->timer_ticks = 5;
1454 if (netif_msg_link(gp))
1456 gp->dev->name);
1467 struct gem *gp = (struct gem *) data;
1470 if (gp->asleep)
1473 spin_lock_irq(&gp->lock);
1474 spin_lock(&gp->tx_lock);
1475 gem_get_cell(gp);
1480 if (gp->reset_task_pending)
1483 if (gp->phy_type == phy_serialink ||
1484 gp->phy_type == phy_serdes) {
1485 u32 val = readl(gp->regs + PCS_MIISTAT);
1488 val = readl(gp->regs + PCS_MIISTAT);
1491 gp->lstate = link_up;
1492 netif_carrier_on(gp->dev);
1493 (void)gem_set_link_modes(gp);
1497 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1503 if (gp->lstate == link_force_try && gp->want_autoneg) {
1504 gp->lstate = link_force_ret;
1505 gp->last_forced_speed = gp->phy_mii.speed;
1506 gp->timer_ticks = 5;
1507 if (netif_msg_link(gp))
1509 " autoneg once...\n", gp->dev->name);
1510 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1511 } else if (gp->lstate != link_up) {
1512 gp->lstate = link_up;
1513 netif_carrier_on(gp->dev);
1514 if (gem_set_link_modes(gp))
1521 if (gp->lstate == link_up) {
1522 gp->lstate = link_down;
1523 if (netif_msg_link(gp))
1525 gp->dev->name);
1526 netif_carrier_off(gp->dev);
1527 gp->reset_task_pending = 1;
1528 schedule_work(&gp->reset_task);
1530 } else if (++gp->timer_ticks > 10) {
1531 if (found_mii_phy(gp))
1532 restart_aneg = gem_mdio_link_not_up(gp);
1538 gem_begin_auto_negotiation(gp, NULL);
1542 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1544 gem_put_cell(gp);
1545 spin_unlock(&gp->tx_lock);
1546 spin_unlock_irq(&gp->lock);
1549 /* Must be invoked under gp->lock and gp->tx_lock. */
1550 static void gem_clean_rings(struct gem *gp)
1552 struct gem_init_block *gb = gp->init_block;
1561 if (gp->rx_skbs[i] != NULL) {
1562 skb = gp->rx_skbs[i];
1564 pci_unmap_page(gp->pdev, dma_addr,
1565 RX_BUF_ALLOC_SIZE(gp),
1568 gp->rx_skbs[i] = NULL;
1576 if (gp->tx_skbs[i] != NULL) {
1580 skb = gp->tx_skbs[i];
1581 gp->tx_skbs[i] = NULL;
1588 pci_unmap_page(gp->pdev, dma_addr,
1600 /* Must be invoked under gp->lock and gp->tx_lock. */
1601 static void gem_init_rings(struct gem *gp)
1603 struct gem_init_block *gb = gp->init_block;
1604 struct net_device *dev = gp->dev;
1608 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1610 gem_clean_rings(gp);
1612 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1619 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1626 gp->rx_skbs[i] = skb;
1628 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1629 dma_addr = pci_map_page(gp->pdev,
1632 RX_BUF_ALLOC_SIZE(gp),
1636 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1651 static void gem_init_phy(struct gem *gp)
1656 mifcfg = readl(gp->regs + MIF_CFG);
1658 writel(mifcfg, gp->regs + MIF_CFG);
1660 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1669 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1675 phy_write(gp, MII_BMCR, BMCR_RESET);
1677 if (phy_read(gp, MII_BMCR) != 0xffff)
1681 gp->dev->name);
1685 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1686 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1690 if (gp->phy_type == phy_mii_mdio0 ||
1691 gp->phy_type == phy_mii_mdio1) {
1693 } else if (gp->phy_type == phy_serialink) {
1699 writel(val, gp->regs + PCS_DMODE);
1702 if (gp->phy_type == phy_mii_mdio0 ||
1703 gp->phy_type == phy_mii_mdio1) {
1704 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1707 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1708 gp->phy_mii.def->ops->init(&gp->phy_mii);
1714 val = readl(gp->regs + PCS_MIICTRL);
1716 writeb(val, gp->regs + PCS_MIICTRL);
1719 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1726 gp->dev->name);
1731 val = readl(gp->regs + PCS_CFG);
1733 writel(val, gp->regs + PCS_CFG);
1738 val = readl(gp->regs + PCS_MIIADV);
1741 writel(val, gp->regs + PCS_MIIADV);
1746 val = readl(gp->regs + PCS_MIICTRL);
1749 writel(val, gp->regs + PCS_MIICTRL);
1751 val = readl(gp->regs + PCS_CFG);
1753 writel(val, gp->regs + PCS_CFG);
1759 val = readl(gp->regs + PCS_SCTRL);
1760 if (gp->phy_type == phy_serialink)
1764 writel(val, gp->regs + PCS_SCTRL);
1768 gp->timer_ticks = 0;
1769 gp->lstate = link_down;
1770 netif_carrier_off(gp->dev);
1773 spin_lock_irq(&gp->lock);
1774 gem_begin_auto_negotiation(gp, NULL);
1775 spin_unlock_irq(&gp->lock);
1778 /* Must be invoked under gp->lock and gp->tx_lock. */
1779 static void gem_init_dma(struct gem *gp)
1781 u64 desc_dma = (u64) gp->gblock_dvma;
1785 writel(val, gp->regs + TXDMA_CFG);
1787 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1788 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1791 writel(0, gp->regs + TXDMA_KICK);
1795 writel(val, gp->regs + RXDMA_CFG);
1797 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1798 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1800 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1802 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1803 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1804 writel(val, gp->regs + RXDMA_PTHRESH);
1806 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1809 gp->regs + RXDMA_BLANK);
1813 gp->regs + RXDMA_BLANK);
1816 /* Must be invoked under gp->lock and gp->tx_lock. */
1817 static u32 gem_setup_multicast(struct gem *gp)
1822 if ((gp->dev->flags & IFF_ALLMULTI) ||
1823 (gp->dev->mc_count > 256)) {
1825 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1827 } else if (gp->dev->flags & IFF_PROMISC) {
1832 struct dev_mc_list *dmi = gp->dev->mc_list;
1838 for (i = 0; i < gp->dev->mc_count; i++) {
1851 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1858 /* Must be invoked under gp->lock and gp->tx_lock. */
1859 static void gem_init_mac(struct gem *gp)
1861 unsigned char *e = &gp->dev->dev_addr[0];
1863 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1865 writel(0x00, gp->regs + MAC_IPG0);
1866 writel(0x08, gp->regs + MAC_IPG1);
1867 writel(0x04, gp->regs + MAC_IPG2);
1868 writel(0x40, gp->regs + MAC_STIME);
1869 writel(0x40, gp->regs + MAC_MINFSZ);
1872 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1874 writel(0x07, gp->regs + MAC_PASIZE);
1875 writel(0x04, gp->regs + MAC_JAMSIZE);
1876 writel(0x10, gp->regs + MAC_ATTLIM);
1877 writel(0x8808, gp->regs + MAC_MCTYPE);
1879 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1881 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1882 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1883 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1885 writel(0, gp->regs + MAC_ADDR3);
1886 writel(0, gp->regs + MAC_ADDR4);
1887 writel(0, gp->regs + MAC_ADDR5);
1889 writel(0x0001, gp->regs + MAC_ADDR6);
1890 writel(0xc200, gp->regs + MAC_ADDR7);
1891 writel(0x0180, gp->regs + MAC_ADDR8);
1893 writel(0, gp->regs + MAC_AFILT0);
1894 writel(0, gp->regs + MAC_AFILT1);
1895 writel(0, gp->regs + MAC_AFILT2);
1896 writel(0, gp->regs + MAC_AF21MSK);
1897 writel(0, gp->regs + MAC_AF0MSK);
1899 gp->mac_rx_cfg = gem_setup_multicast(gp);
1901 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1903 writel(0, gp->regs + MAC_NCOLL);
1904 writel(0, gp->regs + MAC_FASUCC);
1905 writel(0, gp->regs + MAC_ECOLL);
1906 writel(0, gp->regs + MAC_LCOLL);
1907 writel(0, gp->regs + MAC_DTIMER);
1908 writel(0, gp->regs + MAC_PATMPS);
1909 writel(0, gp->regs + MAC_RFCTR);
1910 writel(0, gp->regs + MAC_LERR);
1911 writel(0, gp->regs + MAC_AERR);
1912 writel(0, gp->regs + MAC_FCSERR);
1913 writel(0, gp->regs + MAC_RXCVERR);
1918 writel(0, gp->regs + MAC_TXCFG);
1919 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1920 writel(0, gp->regs + MAC_MCCFG);
1921 writel(0, gp->regs + MAC_XIFCFG);
1927 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1928 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1933 writel(0xffffffff, gp->regs + MAC_MCMASK);
1937 if (gp->has_wol)
1938 writel(0, gp->regs + WOL_WAKECSR);
1941 /* Must be invoked under gp->lock and gp->tx_lock. */
1942 static void gem_init_pause_thresholds(struct gem *gp)
1951 if (gp->rx_fifo_sz <= (2 * 1024)) {
1952 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1954 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1955 int off = (gp->rx_fifo_sz - (max_frame * 2));
1958 gp->rx_pause_off = off;
1959 gp->rx_pause_on = on;
1967 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1974 writel(cfg, gp->regs + GREG_CFG);
1979 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1982 writel(cfg, gp->regs + GREG_CFG);
1986 static int gem_check_invariants(struct gem *gp)
1988 struct pci_dev *pdev = gp->pdev;
1996 gp->phy_type = phy_mii_mdio0;
1997 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
1998 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
1999 gp->swrst_base = 0;
2001 mif_cfg = readl(gp->regs + MIF_CFG);
2004 writel(mif_cfg, gp->regs + MIF_CFG);
2005 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2006 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2012 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2013 gp->mii_phy_addr = 1;
2015 gp->mii_phy_addr = 0;
2020 mif_cfg = readl(gp->regs + MIF_CFG);
2039 gp->phy_type = phy_mii_mdio1;
2041 writel(mif_cfg, gp->regs + MIF_CFG);
2043 gp->phy_type = phy_mii_mdio0;
2045 writel(mif_cfg, gp->regs + MIF_CFG);
2047 gp->phy_type = phy_serialink;
2049 if (gp->phy_type == phy_mii_mdio1 ||
2050 gp->phy_type == phy_mii_mdio0) {
2054 gp->mii_phy_addr = i;
2055 if (phy_read(gp, MII_BMCR) != 0xffff)
2063 gp->phy_type = phy_serdes;
2068 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2069 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2073 if (gp->tx_fifo_sz != (9 * 1024) ||
2074 gp->rx_fifo_sz != (20 * 1024)) {
2076 gp->tx_fifo_sz, gp->rx_fifo_sz);
2079 gp->swrst_base = 0;
2081 if (gp->tx_fifo_sz != (2 * 1024) ||
2082 gp->rx_fifo_sz != (2 * 1024)) {
2084 gp->tx_fifo_sz, gp->rx_fifo_sz);
2087 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2094 /* Must be invoked under gp->lock and gp->tx_lock. */
2095 static void gem_reinit_chip(struct gem *gp)
2098 gem_reset(gp);
2101 gem_disable_ints(gp);
2104 gem_init_rings(gp);
2107 gem_init_pause_thresholds(gp);
2110 gem_init_dma(gp);
2111 gem_init_mac(gp);
2116 static void gem_stop_phy(struct gem *gp, int wol)
2129 mifcfg = readl(gp->regs + MIF_CFG);
2131 writel(mifcfg, gp->regs + MIF_CFG);
2133 if (wol && gp->has_wol) {
2134 unsigned char *e = &gp->dev->dev_addr[0];
2139 gp->regs + MAC_RXCFG);
2140 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2141 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2142 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2144 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2146 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2148 writel(csr, gp->regs + WOL_WAKECSR);
2150 writel(0, gp->regs + MAC_RXCFG);
2151 (void)readl(gp->regs + MAC_RXCFG);
2159 writel(0, gp->regs + MAC_TXCFG);
2160 writel(0, gp->regs + MAC_XIFCFG);
2161 writel(0, gp->regs + TXDMA_CFG);
2162 writel(0, gp->regs + RXDMA_CFG);
2165 spin_lock_irqsave(&gp->lock, flags);
2166 spin_lock(&gp->tx_lock);
2167 gem_reset(gp);
2168 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2169 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2170 spin_unlock(&gp->tx_lock);
2171 spin_unlock_irqrestore(&gp->lock, flags);
2175 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2176 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2181 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2182 writel(0, gp->regs + MIF_BBCLK);
2183 writel(0, gp->regs + MIF_BBDATA);
2184 writel(0, gp->regs + MIF_BBOENAB);
2185 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2186 (void) readl(gp->regs + MAC_XIFCFG);
2193 struct gem *gp = dev->priv;
2196 spin_lock_irqsave(&gp->lock, flags);
2197 spin_lock(&gp->tx_lock);
2200 gem_get_cell(gp);
2203 gem_reinit_chip(gp);
2205 gp->running = 1;
2207 if (gp->lstate == link_up) {
2208 netif_carrier_on(gp->dev);
2209 gem_set_link_modes(gp);
2212 netif_wake_queue(gp->dev);
2214 spin_unlock(&gp->tx_lock);
2215 spin_unlock_irqrestore(&gp->lock, flags);
2217 if (request_irq(gp->pdev->irq, gem_interrupt,
2219 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2221 spin_lock_irqsave(&gp->lock, flags);
2222 spin_lock(&gp->tx_lock);
2224 gp->running = 0;
2225 gem_reset(gp);
2226 gem_clean_rings(gp);
2227 gem_put_cell(gp);
2229 spin_unlock(&gp->tx_lock);
2230 spin_unlock_irqrestore(&gp->lock, flags);
2240 struct gem *gp = dev->priv;
2243 spin_lock_irqsave(&gp->lock, flags);
2244 spin_lock(&gp->tx_lock);
2246 gp->running = 0;
2252 gem_disable_ints(gp);
2255 spin_unlock(&gp->tx_lock);
2256 spin_unlock_irqrestore(&gp->lock, flags);
2259 gem_stop_dma(gp);
2262 gem_reset(gp);
2266 gem_clean_rings(gp);
2269 free_irq(gp->pdev->irq, (void *) dev);
2273 spin_lock_irqsave(&gp->lock, flags);
2274 gem_put_cell(gp);
2275 spin_unlock_irqrestore(&gp->lock, flags);
2281 struct gem *gp = container_of(work, struct gem, reset_task);
2283 mutex_lock(&gp->pm_mutex);
2285 netif_poll_disable(gp->dev);
2287 spin_lock_irq(&gp->lock);
2288 spin_lock(&gp->tx_lock);
2290 if (gp->running == 0)
2293 if (gp->running) {
2294 netif_stop_queue(gp->dev);
2297 gem_reinit_chip(gp);
2298 if (gp->lstate == link_up)
2299 gem_set_link_modes(gp);
2300 netif_wake_queue(gp->dev);
2303 gp->reset_task_pending = 0;
2305 spin_unlock(&gp->tx_lock);
2306 spin_unlock_irq(&gp->lock);
2308 netif_poll_enable(gp->dev);
2310 mutex_unlock(&gp->pm_mutex);
2316 struct gem *gp = dev->priv;
2319 mutex_lock(&gp->pm_mutex);
2322 if (!gp->asleep)
2324 gp->opened = (rc == 0);
2326 mutex_unlock(&gp->pm_mutex);
2333 struct gem *gp = dev->priv;
2339 mutex_lock(&gp->pm_mutex);
2341 gp->opened = 0;
2342 if (!gp->asleep)
2345 mutex_unlock(&gp->pm_mutex);
2354 struct gem *gp = dev->priv;
2357 mutex_lock(&gp->pm_mutex);
2363 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2366 spin_lock_irqsave(&gp->lock, flags);
2367 spin_lock(&gp->tx_lock);
2368 gem_get_cell(gp);
2369 spin_unlock(&gp->tx_lock);
2370 spin_unlock_irqrestore(&gp->lock, flags);
2373 if (gp->opened) {
2378 gp->asleep_wol = gp->wake_on_lan;
2379 gem_do_stop(dev, gp->asleep_wol);
2381 gp->asleep_wol = 0;
2384 gp->asleep = 1;
2388 del_timer_sync(&gp->link_timer);
2394 mutex_unlock(&gp->pm_mutex);
2397 while (gp->reset_task_pending)
2402 gem_stop_phy(gp, gp->asleep_wol);
2405 pci_disable_device(gp->pdev);
2410 gem_put_cell(gp);
2418 struct gem *gp = dev->priv;
2423 mutex_lock(&gp->pm_mutex);
2429 gem_get_cell(gp);
2432 if (pci_enable_device(gp->pdev)) {
2438 gem_put_cell(gp);
2439 mutex_unlock(&gp->pm_mutex);
2442 pci_set_master(gp->pdev);
2445 gem_reset(gp);
2448 gp->asleep = 0;
2454 gem_init_phy(gp);
2457 if (gp->opened) {
2466 spin_lock_irqsave(&gp->lock, flags);
2467 spin_lock(&gp->tx_lock);
2472 if (gp->asleep_wol)
2473 gem_put_cell(gp);
2478 gem_put_cell(gp);
2480 spin_unlock(&gp->tx_lock);
2481 spin_unlock_irqrestore(&gp->lock, flags);
2485 mutex_unlock(&gp->pm_mutex);
2493 struct gem *gp = dev->priv;
2494 struct net_device_stats *stats = &gp->net_stats;
2496 spin_lock_irq(&gp->lock);
2497 spin_lock(&gp->tx_lock);
2502 if (gp->running) {
2503 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2504 writel(0, gp->regs + MAC_FCSERR);
2506 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2507 writel(0, gp->regs + MAC_AERR);
2509 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2510 writel(0, gp->regs + MAC_LERR);
2512 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2514 (readl(gp->regs + MAC_ECOLL) +
2515 readl(gp->regs + MAC_LCOLL));
2516 writel(0, gp->regs + MAC_ECOLL);
2517 writel(0, gp->regs + MAC_LCOLL);
2520 spin_unlock(&gp->tx_lock);
2521 spin_unlock_irq(&gp->lock);
2523 return &gp->net_stats;
2529 struct gem *gp = dev->priv;
2543 mutex_lock(&gp->pm_mutex);
2545 if (gp->running) {
2546 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2547 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2548 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2550 mutex_unlock(&gp->pm_mutex);
2557 struct gem *gp = dev->priv;
2562 spin_lock_irq(&gp->lock);
2563 spin_lock(&gp->tx_lock);
2565 if (!gp->running)
2570 rxcfg = readl(gp->regs + MAC_RXCFG);
2571 rxcfg_new = gem_setup_multicast(gp);
2575 gp->mac_rx_cfg = rxcfg_new;
2577 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2578 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2587 writel(rxcfg, gp->regs + MAC_RXCFG);
2592 spin_unlock(&gp->tx_lock);
2593 spin_unlock_irq(&gp->lock);
2602 struct gem *gp = dev->priv;
2615 mutex_lock(&gp->pm_mutex);
2616 spin_lock_irq(&gp->lock);
2617 spin_lock(&gp->tx_lock);
2619 if (gp->running) {
2620 gem_reinit_chip(gp);
2621 if (gp->lstate == link_up)
2622 gem_set_link_modes(gp);
2624 spin_unlock(&gp->tx_lock);
2625 spin_unlock_irq(&gp->lock);
2626 mutex_unlock(&gp->pm_mutex);
2633 struct gem *gp = dev->priv;
2637 strcpy(info->bus_info, pci_name(gp->pdev));
2642 struct gem *gp = dev->priv;
2644 if (gp->phy_type == phy_mii_mdio0 ||
2645 gp->phy_type == phy_mii_mdio1) {
2646 if (gp->phy_mii.def)
2647 cmd->supported = gp->phy_mii.def->features;
2657 spin_lock_irq(&gp->lock);
2658 cmd->autoneg = gp->want_autoneg;
2659 cmd->speed = gp->phy_mii.speed;
2660 cmd->duplex = gp->phy_mii.duplex;
2661 cmd->advertising = gp->phy_mii.advertising;
2669 spin_unlock_irq(&gp->lock);
2687 struct gem *gp = dev->priv;
2707 spin_lock_irq(&gp->lock);
2708 gem_get_cell(gp);
2709 gem_begin_auto_negotiation(gp, cmd);
2710 gem_put_cell(gp);
2711 spin_unlock_irq(&gp->lock);
2718 struct gem *gp = dev->priv;
2720 if (!gp->want_autoneg)
2724 spin_lock_irq(&gp->lock);
2725 gem_get_cell(gp);
2726 gem_begin_auto_negotiation(gp, NULL);
2727 gem_put_cell(gp);
2728 spin_unlock_irq(&gp->lock);
2735 struct gem *gp = dev->priv;
2736 return gp->msg_enable;
2741 struct gem *gp = dev->priv;
2742 gp->msg_enable = value;
2753 struct gem *gp = dev->priv;
2756 if (gp->has_wol) {
2758 wol->wolopts = gp->wake_on_lan;
2767 struct gem *gp = dev->priv;
2769 if (!gp->has_wol)
2771 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2789 struct gem *gp = dev->priv;
2797 mutex_lock(&gp->pm_mutex);
2799 spin_lock_irqsave(&gp->lock, flags);
2800 gem_get_cell(gp);
2801 spin_unlock_irqrestore(&gp->lock, flags);
2805 data->phy_id = gp->mii_phy_addr;
2809 if (!gp->running)
2812 data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2821 else if (!gp->running)
2824 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2831 spin_lock_irqsave(&gp->lock, flags);
2832 gem_put_cell(gp);
2833 spin_unlock_irqrestore(&gp->lock, flags);
2835 mutex_unlock(&gp->pm_mutex);
2893 static int __devinit gem_get_device_address(struct gem *gp)
2896 struct net_device *dev = gp->dev;
2899 addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2911 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2921 struct gem *gp = dev->priv;
2926 del_timer_sync(&gp->link_timer);
2929 gem_get_cell(gp);
2932 while (gp->reset_task_pending)
2937 gem_stop_phy(gp, 0);
2939 gem_put_cell(gp);
2942 pci_disable_device(gp->pdev);
2947 gp->init_block,
2948 gp->gblock_dvma);
2949 iounmap(gp->regs);
2963 struct gem *gp;
3016 dev = alloc_etherdev(sizeof(*gp));
3025 gp = dev->priv;
3034 gp->pdev = pdev;
3036 gp->dev = dev;
3038 gp->msg_enable = DEFAULT_MSG;
3040 spin_lock_init(&gp->lock);
3041 spin_lock_init(&gp->tx_lock);
3042 mutex_init(&gp->pm_mutex);
3044 init_timer(&gp->link_timer);
3045 gp->link_timer.function = gem_link_timer;
3046 gp->link_timer.data = (unsigned long) gp;
3048 INIT_WORK(&gp->reset_task, gem_reset_task);
3050 gp->lstate = link_down;
3051 gp->timer_ticks = 0;
3054 gp->regs = ioremap(gemreg_base, gemreg_len);
3055 if (gp->regs == 0UL) {
3066 gp->of_node = pci_device_to_OF_node(pdev);
3071 gp->has_wol = 1;
3074 gem_get_cell(gp);
3077 gem_reset(gp);
3080 gp->phy_mii.dev = dev;
3081 gp->phy_mii.mdio_read = _phy_read;
3082 gp->phy_mii.mdio_write = _phy_write;
3084 gp->phy_mii.platform_data = gp->of_node;
3087 gp->want_autoneg = 1;
3090 if (gem_check_invariants(gp)) {
3098 gp->init_block = (struct gem_init_block *)
3100 &gp->gblock_dvma);
3101 if (!gp->init_block) {
3108 if (gem_get_device_address(gp))
3136 gem_init_phy(gp);
3138 spin_lock_irq(&gp->lock);
3139 gem_put_cell(gp);
3140 spin_unlock_irq(&gp->lock);
3157 if (gp->phy_type == phy_mii_mdio0 ||
3158 gp->phy_type == phy_mii_mdio1)
3160 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3172 gem_put_cell(gp);
3173 iounmap(gp->regs);