Lines Matching refs:ip

36 #include <linux/ip.h>
48 #include <net/ip.h>
109 static void ioc3_start(struct ioc3_private *ip);
110 static inline void ioc3_stop(struct ioc3_private *ip);
113 static void ioc3_free_rx_bufs(struct ioc3_private *ip);
114 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
123 static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
139 d = dma_map_single(ip->dma_dev, new_skb->data,
142 if (dma_mapping_error(ip->dma_dev, d)) {
229 struct ioc3_private *ip = netdev_priv(dev);
233 &ip->regs->emar_h);
238 &ip->regs->emar_l);
243 struct ioc3_private *ip = netdev_priv(dev);
248 spin_lock_irq(&ip->ioc3_lock);
250 spin_unlock_irq(&ip->ioc3_lock);
260 struct ioc3_private *ip = netdev_priv(dev);
261 struct ioc3_ethregs *regs = ip->regs;
275 struct ioc3_private *ip = netdev_priv(dev);
276 struct ioc3_ethregs *regs = ip->regs;
286 static int ioc3_mii_init(struct ioc3_private *ip);
290 struct ioc3_private *ip = netdev_priv(dev);
291 struct ioc3_ethregs *regs = ip->regs;
314 * malformed packet we'll try to access the packet at ip header +
367 struct ioc3_private *ip = netdev_priv(dev);
375 rxr = ip->rxr; /* Ring base */
376 rx_entry = ip->rx_ci; /* RX consume index */
377 n_entry = ip->rx_pi;
379 skb = ip->rx_skbs[rx_entry];
390 if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
405 dma_unmap_single(ip->dma_dev, rxr[rx_entry],
410 ip->rx_skbs[rx_entry] = NULL; /* Poison */
429 ip->rx_skbs[n_entry] = new_skb;
436 skb = ip->rx_skbs[rx_entry];
440 writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
441 ip->rx_pi = n_entry;
442 ip->rx_ci = rx_entry;
447 struct ioc3_private *ip = netdev_priv(dev);
448 struct ioc3_ethregs *regs = ip->regs;
454 spin_lock(&ip->ioc3_lock);
458 o_entry = ip->tx_ci;
464 skb = ip->tx_skbs[o_entry];
467 ip->tx_skbs[o_entry] = NULL;
477 ip->txqlen -= packets;
479 if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
482 ip->tx_ci = o_entry;
483 spin_unlock(&ip->ioc3_lock);
494 struct ioc3_private *ip = netdev_priv(dev);
496 spin_lock(&ip->ioc3_lock);
511 ioc3_stop(ip);
512 ioc3_free_rx_bufs(ip);
513 ioc3_clean_tx_ring(ip);
518 spin_unlock(&ip->ioc3_lock);
521 ioc3_start(ip);
522 ioc3_mii_init(ip);
526 spin_unlock(&ip->ioc3_lock);
534 struct ioc3_private *ip = netdev_priv(dev_id);
535 struct ioc3_ethregs *regs = ip->regs;
553 static inline void ioc3_setup_duplex(struct ioc3_private *ip)
555 struct ioc3_ethregs *regs = ip->regs;
557 spin_lock_irq(&ip->ioc3_lock);
559 if (ip->mii.full_duplex) {
561 ip->emcr |= EMCR_DUPLEX;
564 ip->emcr &= ~EMCR_DUPLEX;
566 writel(ip->emcr, &regs->emcr);
568 spin_unlock_irq(&ip->ioc3_lock);
573 struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
576 mii_check_media(&ip->mii, 1, 0);
577 ioc3_setup_duplex(ip);
579 ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */
580 add_timer(&ip->ioc3_timer);
587 static int ioc3_mii_init(struct ioc3_private *ip)
593 word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
596 ip->mii.phy_id = i;
600 ip->mii.phy_id = -1;
604 static void ioc3_mii_start(struct ioc3_private *ip)
606 ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */
607 add_timer(&ip->ioc3_timer);
610 static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
615 desc = &ip->txr[entry];
620 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
625 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
630 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
636 skb = ip->tx_skbs[i];
638 ioc3_tx_unmap(ip, i);
639 ip->tx_skbs[i] = NULL;
642 ip->txr[i].cmd = 0;
644 ip->tx_pi = 0;
645 ip->tx_ci = 0;
648 static void ioc3_free_rx_bufs(struct ioc3_private *ip)
653 n_entry = ip->rx_ci;
654 rx_entry = ip->rx_pi;
657 skb = ip->rx_skbs[n_entry];
659 dma_unmap_single(ip->dma_dev,
660 be64_to_cpu(ip->rxr[n_entry]),
670 struct ioc3_private *ip = netdev_priv(dev);
680 if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
684 ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
686 ip->rx_ci = 0;
687 ip->rx_pi = RX_BUFFS;
692 static inline void ioc3_ssram_disc(struct ioc3_private *ip)
694 struct ioc3_ethregs *regs = ip->regs;
695 u32 *ssram0 = &ip->ssram[0x0000];
696 u32 *ssram1 = &ip->ssram[0x4000];
709 ip->emcr |= EMCR_RAMPAR;
712 ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
718 struct ioc3_private *ip = netdev_priv(dev);
719 struct ioc3_ethregs *regs = ip->regs;
721 del_timer_sync(&ip->ioc3_timer); /* Kill if running */
735 writel(ip->ehar_h, &regs->ehar_h);
736 writel(ip->ehar_l, &regs->ehar_l);
740 static void ioc3_start(struct ioc3_private *ip)
742 struct ioc3_ethregs *regs = ip->regs;
746 ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
749 writel(ip->rx_ci << 3, &regs->ercir);
750 writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir);
752 ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
754 ip->txqlen = 0; /* nothing queued */
759 writel(ip->tx_pi << 7, &regs->etpir);
760 writel(ip->tx_ci << 7, &regs->etcir);
763 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
765 writel(ip->emcr, &regs->emcr);
772 static inline void ioc3_stop(struct ioc3_private *ip)
774 struct ioc3_ethregs *regs = ip->regs;
783 struct ioc3_private *ip = netdev_priv(dev);
785 ip->ehar_h = 0;
786 ip->ehar_l = 0;
793 ioc3_start(ip);
794 ioc3_mii_start(ip);
802 struct ioc3_private *ip = netdev_priv(dev);
804 del_timer_sync(&ip->ioc3_timer);
808 ioc3_stop(ip);
810 ioc3_free_rx_bufs(ip);
811 ioc3_clean_tx_ring(ip);
831 struct ioc3_private *ip;
852 ip = netdev_priv(dev);
853 ip->dma_dev = pdev->dev.parent;
854 ip->regs = devm_platform_ioremap_resource(pdev, 0);
855 if (IS_ERR(ip->regs)) {
856 err = PTR_ERR(ip->regs);
860 ip->ssram = devm_platform_ioremap_resource(pdev, 1);
861 if (IS_ERR(ip->ssram)) {
862 err = PTR_ERR(ip->ssram);
879 spin_lock_init(&ip->ioc3_lock);
880 timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
882 ioc3_stop(ip);
885 ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
887 if (!ip->rxr) {
894 ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
895 &ip->txr_dma, GFP_KERNEL);
896 if (!ip->tx_ring) {
902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
903 ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
907 ip->mii.phy_id_mask = 0x1f;
908 ip->mii.reg_num_mask = 0x1f;
909 ip->mii.dev = dev;
910 ip->mii.mdio_read = ioc3_mdio_read;
911 ip->mii.mdio_write = ioc3_mdio_write;
913 ioc3_mii_init(ip);
915 if (ip->mii.phy_id == -1) {
921 ioc3_mii_start(ip);
922 ioc3_ssram_disc(ip);
932 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
933 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
939 mii_check_media(&ip->mii, 1, 1);
940 ioc3_setup_duplex(ip);
946 ip->mii.phy_id, vendor, model, rev);
948 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
953 del_timer_sync(&ip->ioc3_timer);
954 if (ip->rxr)
955 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
956 ip->rxr_dma);
957 if (ip->tx_ring)
958 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring,
959 ip->txr_dma);
968 struct ioc3_private *ip = netdev_priv(dev);
970 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
971 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
974 del_timer_sync(&ip->ioc3_timer);
981 struct ioc3_private *ip = netdev_priv(dev);
1033 spin_lock_irq(&ip->ioc3_lock);
1038 produce = ip->tx_pi;
1039 desc = &ip->txr[produce];
1061 d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
1062 if (dma_mapping_error(ip->dma_dev, d1))
1064 d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
1065 if (dma_mapping_error(ip->dma_dev, d2)) {
1066 dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
1077 d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
1078 if (dma_mapping_error(ip->dma_dev, d))
1085 ip->tx_skbs[produce] = skb; /* Remember skb */
1087 ip->tx_pi = produce;
1088 writel(produce << 7, &ip->regs->etpir); /* Fire ... */
1090 ip->txqlen++;
1092 if (ip->txqlen >= (TX_RING_ENTRIES - 1))
1095 spin_unlock_irq(&ip->ioc3_lock);
1103 spin_unlock_irq(&ip->ioc3_lock);
1110 struct ioc3_private *ip = netdev_priv(dev);
1114 spin_lock_irq(&ip->ioc3_lock);
1116 ioc3_stop(ip);
1117 ioc3_free_rx_bufs(ip);
1118 ioc3_clean_tx_ring(ip);
1123 spin_unlock_irq(&ip->ioc3_lock);
1126 ioc3_start(ip);
1127 ioc3_mii_init(ip);
1128 ioc3_mii_start(ip);
1130 spin_unlock_irq(&ip->ioc3_lock);
1168 struct ioc3_private *ip = netdev_priv(dev);
1170 spin_lock_irq(&ip->ioc3_lock);
1171 mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1172 spin_unlock_irq(&ip->ioc3_lock);
1180 struct ioc3_private *ip = netdev_priv(dev);
1183 spin_lock_irq(&ip->ioc3_lock);
1184 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1185 spin_unlock_irq(&ip->ioc3_lock);
1192 struct ioc3_private *ip = netdev_priv(dev);
1195 spin_lock_irq(&ip->ioc3_lock);
1196 rc = mii_nway_restart(&ip->mii);
1197 spin_unlock_irq(&ip->ioc3_lock);
1204 struct ioc3_private *ip = netdev_priv(dev);
1207 spin_lock_irq(&ip->ioc3_lock);
1208 rc = mii_link_ok(&ip->mii);
1209 spin_unlock_irq(&ip->ioc3_lock);
1224 struct ioc3_private *ip = netdev_priv(dev);
1227 spin_lock_irq(&ip->ioc3_lock);
1228 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1229 spin_unlock_irq(&ip->ioc3_lock);
1236 struct ioc3_private *ip = netdev_priv(dev);
1237 struct ioc3_ethregs *regs = ip->regs;
1241 spin_lock_irq(&ip->ioc3_lock);
1244 ip->emcr |= EMCR_PROMISC;
1245 writel(ip->emcr, &regs->emcr);
1248 ip->emcr &= ~EMCR_PROMISC;
1249 writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */
1258 ip->ehar_h = 0xffffffff;
1259 ip->ehar_l = 0xffffffff;
1264 ip->ehar_h = ehar >> 32;
1265 ip->ehar_l = ehar & 0xffffffff;
1267 writel(ip->ehar_h, &regs->ehar_h);
1268 writel(ip->ehar_l, &regs->ehar_l);
1271 spin_unlock_irq(&ip->ioc3_lock);