Lines Matching refs:ep

177 #define rdb(ep, off)		__raw_readb((ep)->base_addr + (off))
178 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off))
179 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off))
180 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off))
181 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
182 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
186 struct ep93xx_priv *ep = netdev_priv(dev);
190 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
193 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
202 data = rdl(ep, REG_MIIDATA);
210 struct ep93xx_priv *ep = netdev_priv(dev);
213 wrl(ep, REG_MIIDATA, data);
214 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
217 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
228 struct ep93xx_priv *ep = netdev_priv(dev);
239 entry = ep->rx_pointer;
240 rstat = ep->descs->rstat + entry;
282 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
286 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
293 napi_gro_receive(&ep->napi, skb);
302 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1);
311 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
312 struct net_device *dev = ep->dev;
317 spin_lock_irq(&ep->rx_lock);
318 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
319 spin_unlock_irq(&ep->rx_lock);
323 wrw(ep, REG_RXDENQ, rx);
324 wrw(ep, REG_RXSTSENQ, rx);
332 struct ep93xx_priv *ep = netdev_priv(dev);
342 entry = ep->tx_pointer;
343 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
345 txd = &ep->descs->tdesc[entry];
350 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
355 spin_lock_irq(&ep->tx_pending_lock);
356 ep->tx_pending++;
357 if (ep->tx_pending == TX_QUEUE_ENTRIES)
359 spin_unlock_irq(&ep->tx_pending_lock);
361 wrl(ep, REG_TXDENQ, 1);
368 struct ep93xx_priv *ep = netdev_priv(dev);
373 spin_lock(&ep->tx_pending_lock);
379 entry = ep->tx_clean_pointer;
380 tstat = ep->descs->tstat + entry;
394 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
408 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
409 if (ep->tx_pending == TX_QUEUE_ENTRIES)
411 ep->tx_pending--;
413 spin_unlock(&ep->tx_pending_lock);
422 struct ep93xx_priv *ep = netdev_priv(dev);
425 status = rdl(ep, REG_INTSTSC);
430 spin_lock(&ep->rx_lock);
431 if (likely(napi_schedule_prep(&ep->napi))) {
432 wrl(ep, REG_INTEN, REG_INTEN_TX);
433 __napi_schedule(&ep->napi);
435 spin_unlock(&ep->rx_lock);
444 static void ep93xx_free_buffers(struct ep93xx_priv *ep)
446 struct device *dev = ep->dev->dev.parent;
449 if (!ep->descs)
455 d = ep->descs->rdesc[i].buf_addr;
459 kfree(ep->rx_buf[i]);
465 d = ep->descs->tdesc[i].buf_addr;
469 kfree(ep->tx_buf[i]);
472 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
473 ep->descs_dma_addr);
474 ep->descs = NULL;
477 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
479 struct device *dev = ep->dev->dev.parent;
482 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
483 &ep->descs_dma_addr, GFP_KERNEL);
484 if (ep->descs == NULL)
501 ep->rx_buf[i] = buf;
502 ep->descs->rdesc[i].buf_addr = d;
503 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
520 ep->tx_buf[i] = buf;
521 ep->descs->tdesc[i].buf_addr = d;
527 ep93xx_free_buffers(ep);
533 struct ep93xx_priv *ep = netdev_priv(dev);
537 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
539 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
549 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9));
552 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0)
553 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8));
556 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc);
557 wrl(ep, REG_RXDQBADD, addr);
558 wrl(ep, REG_RXDCURADD, addr);
559 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc));
562 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat);
563 wrl(ep, REG_RXSTSQBADD, addr);
564 wrl(ep, REG_RXSTSQCURADD, addr);
565 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat));
568 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc);
569 wrl(ep, REG_TXDQBADD, addr);
570 wrl(ep, REG_TXDQCURADD, addr);
571 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc));
574 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat);
575 wrl(ep, REG_TXSTSQBADD, addr);
576 wrl(ep, REG_TXSTSQCURADD, addr);
577 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat));
579 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX);
580 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
581 wrl(ep, REG_GIINTMSK, 0);
584 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0)
594 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES);
595 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES);
597 wrb(ep, REG_INDAD0, dev->dev_addr[0]);
598 wrb(ep, REG_INDAD1, dev->dev_addr[1]);
599 wrb(ep, REG_INDAD2, dev->dev_addr[2]);
600 wrb(ep, REG_INDAD3, dev->dev_addr[3]);
601 wrb(ep, REG_INDAD4, dev->dev_addr[4]);
602 wrb(ep, REG_INDAD5, dev->dev_addr[5]);
603 wrl(ep, REG_AFP, 0);
605 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE);
607 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT);
608 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE);
615 struct ep93xx_priv *ep = netdev_priv(dev);
618 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
620 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
631 struct ep93xx_priv *ep = netdev_priv(dev);
634 if (ep93xx_alloc_buffers(ep))
637 napi_enable(&ep->napi);
640 napi_disable(&ep->napi);
641 ep93xx_free_buffers(ep);
645 spin_lock_init(&ep->rx_lock);
646 ep->rx_pointer = 0;
647 ep->tx_clean_pointer = 0;
648 ep->tx_pointer = 0;
649 spin_lock_init(&ep->tx_pending_lock);
650 ep->tx_pending = 0;
652 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev);
654 napi_disable(&ep->napi);
656 ep93xx_free_buffers(ep);
660 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE);
669 struct ep93xx_priv *ep = netdev_priv(dev);
671 napi_disable(&ep->napi);
674 wrl(ep, REG_GIINTMSK, 0);
675 free_irq(ep->irq, dev);
677 ep93xx_free_buffers(ep);
684 struct ep93xx_priv *ep = netdev_priv(dev);
687 return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
698 struct ep93xx_priv *ep = netdev_priv(dev);
700 mii_ethtool_get_link_ksettings(&ep->mii, cmd);
708 struct ep93xx_priv *ep = netdev_priv(dev);
709 return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
714 struct ep93xx_priv *ep = netdev_priv(dev);
715 return mii_nway_restart(&ep->mii);
720 struct ep93xx_priv *ep = netdev_priv(dev);
721 return mii_link_ok(&ep->mii);
763 struct ep93xx_priv *ep;
770 ep = netdev_priv(dev);
774 ep93xx_free_buffers(ep);
776 if (ep->base_addr != NULL)
777 iounmap(ep->base_addr);
779 if (ep->res != NULL) {
791 struct ep93xx_priv *ep;
810 ep = netdev_priv(dev);
811 ep->dev = dev;
813 netif_napi_add(dev, &ep->napi, ep93xx_poll);
817 ep->res = request_mem_region(mem->start, resource_size(mem),
819 if (ep->res == NULL) {
825 ep->base_addr = ioremap(mem->start, resource_size(mem));
826 if (ep->base_addr == NULL) {
831 ep->irq = irq;
833 ep->mii.phy_id = data->phy_id;
834 ep->mii.phy_id_mask = 0x1f;
835 ep->mii.reg_num_mask = 0x1f;
836 ep->mii.dev = dev;
837 ep->mii.mdio_read = ep93xx_mdio_read;
838 ep->mii.mdio_write = ep93xx_mdio_write;
839 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
851 dev->name, ep->irq, dev->dev_addr);