Lines Matching defs:mp

91 static inline void mace_clean_rings(struct mace_data *mp);
112 struct mace_data *mp;
155 mp = netdev_priv(dev);
156 mp->mdev = mdev;
160 mp->mace = ioremap(dev->base_addr, 0x1000);
161 if (mp->mace == NULL) {
173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
174 in_8(&mp->mace->chipid_lo);
177 mp = netdev_priv(dev);
178 mp->maccc = ENXMT | ENRCV;
180 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
181 if (mp->tx_dma == NULL) {
186 mp->tx_dma_intr = macio_irq(mdev, 1);
188 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
189 if (mp->rx_dma == NULL) {
194 mp->rx_dma_intr = macio_irq(mdev, 2);
196 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
197 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
199 memset((char *) mp->tx_cmds, 0,
201 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
202 spin_lock_init(&mp->lock);
203 mp->timeout_active = 0;
206 mp->port_aaui = port_aaui;
210 mp->port_aaui = 1;
213 mp->port_aaui = 1;
215 mp->port_aaui = 0;
232 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
234 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
237 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
239 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
251 mp->chipid >> 8, mp->chipid & 0xff);
262 iounmap(mp->rx_dma);
264 iounmap(mp->tx_dma);
266 iounmap(mp->mace);
278 struct mace_data *mp;
284 mp = netdev_priv(dev);
289 free_irq(mp->tx_dma_intr, dev);
290 free_irq(mp->rx_dma_intr, dev);
292 iounmap(mp->rx_dma);
293 iounmap(mp->tx_dma);
294 iounmap(mp->mace);
318 struct mace_data *mp = netdev_priv(dev);
319 volatile struct mace __iomem *mb = mp->mace;
351 if (mp->chipid == BROKEN_ADDRCHG_REV)
362 if (mp->chipid != BROKEN_ADDRCHG_REV)
365 if (mp->port_aaui)
373 struct mace_data *mp = netdev_priv(dev);
374 volatile struct mace __iomem *mb = mp->mace;
380 if (mp->chipid == BROKEN_ADDRCHG_REV)
392 if (mp->chipid != BROKEN_ADDRCHG_REV)
398 struct mace_data *mp = netdev_priv(dev);
399 volatile struct mace __iomem *mb = mp->mace;
402 spin_lock_irqsave(&mp->lock, flags);
407 out_8(&mb->maccc, mp->maccc);
409 spin_unlock_irqrestore(&mp->lock, flags);
413 static inline void mace_clean_rings(struct mace_data *mp)
419 if (mp->rx_bufs[i] != NULL) {
420 dev_kfree_skb(mp->rx_bufs[i]);
421 mp->rx_bufs[i] = NULL;
424 for (i = mp->tx_empty; i != mp->tx_fill; ) {
425 dev_kfree_skb(mp->tx_bufs[i]);
433 struct mace_data *mp = netdev_priv(dev);
434 volatile struct mace __iomem *mb = mp->mace;
435 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
436 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
446 mace_clean_rings(mp);
447 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
448 cp = mp->rx_cmds;
457 mp->rx_bufs[i] = skb;
464 mp->rx_bufs[i] = NULL;
466 mp->rx_fill = i;
467 mp->rx_empty = 0;
472 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
476 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
480 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
482 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
486 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
487 mp->tx_fill = 0;
488 mp->tx_empty = 0;
489 mp->tx_fullup = 0;
490 mp->tx_active = 0;
491 mp->tx_bad_runt = 0;
494 out_8(&mb->maccc, mp->maccc);
503 struct mace_data *mp = netdev_priv(dev);
504 volatile struct mace __iomem *mb = mp->mace;
505 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
506 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
516 mace_clean_rings(mp);
523 struct mace_data *mp = netdev_priv(dev);
525 if (mp->timeout_active)
526 del_timer(&mp->tx_timeout);
527 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
528 add_timer(&mp->tx_timeout);
529 mp->timeout_active = 1;
534 struct mace_data *mp = netdev_priv(dev);
535 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
541 spin_lock_irqsave(&mp->lock, flags);
542 fill = mp->tx_fill;
546 if (next == mp->tx_empty) {
548 mp->tx_fullup = 1;
549 spin_unlock_irqrestore(&mp->lock, flags);
552 spin_unlock_irqrestore(&mp->lock, flags);
560 mp->tx_bufs[fill] = skb;
561 cp = mp->tx_cmds + NCMDS_TX * fill;
565 np = mp->tx_cmds + NCMDS_TX * next;
569 spin_lock_irqsave(&mp->lock, flags);
570 mp->tx_fill = next;
571 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
575 ++mp->tx_active;
580 if (next == mp->tx_empty)
582 spin_unlock_irqrestore(&mp->lock, flags);
589 struct mace_data *mp = netdev_priv(dev);
590 volatile struct mace __iomem *mb = mp->mace;
595 spin_lock_irqsave(&mp->lock, flags);
596 mp->maccc &= ~PROM;
598 mp->maccc |= PROM;
622 if (mp->chipid == BROKEN_ADDRCHG_REV)
631 if (mp->chipid != BROKEN_ADDRCHG_REV)
635 out_8(&mb->maccc, mp->maccc);
636 spin_unlock_irqrestore(&mp->lock, flags);
639 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
641 volatile struct mace __iomem *mb = mp->mace;
663 struct mace_data *mp = netdev_priv(dev);
664 volatile struct mace __iomem *mb = mp->mace;
665 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
672 spin_lock_irqsave(&mp->lock, flags);
675 mace_handle_misc_intrs(mp, intr, dev);
677 i = mp->tx_empty;
679 del_timer(&mp->tx_timeout);
680 mp->timeout_active = 0;
688 mace_handle_misc_intrs(mp, intr, dev);
689 if (mp->tx_bad_runt) {
691 mp->tx_bad_runt = 0;
727 cp = mp->tx_cmds + NCMDS_TX * i;
738 mp->tx_bad_runt = 1;
755 if (i == mp->tx_fill) {
768 dev->stats.tx_bytes += mp->tx_bufs[i]->len;
771 dev_consume_skb_irq(mp->tx_bufs[i]);
772 --mp->tx_active;
781 if (i != mp->tx_empty) {
782 mp->tx_fullup = 0;
785 mp->tx_empty = i;
786 i += mp->tx_active;
789 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
792 cp = mp->tx_cmds + NCMDS_TX * i;
795 ++mp->tx_active;
798 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
802 spin_unlock_irqrestore(&mp->lock, flags);
808 struct mace_data *mp = from_timer(mp, t, tx_timeout);
809 struct net_device *dev = macio_get_drvdata(mp->mdev);
810 volatile struct mace __iomem *mb = mp->mace;
811 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
812 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
817 spin_lock_irqsave(&mp->lock, flags);
818 mp->timeout_active = 0;
819 if (mp->tx_active == 0 && !mp->tx_bad_runt)
823 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
825 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
841 i = mp->tx_empty;
842 mp->tx_active = 0;
844 if (mp->tx_bad_runt) {
845 mp->tx_bad_runt = 0;
846 } else if (i != mp->tx_fill) {
847 dev_kfree_skb_irq(mp->tx_bufs[i]);
850 mp->tx_empty = i;
852 mp->tx_fullup = 0;
854 if (i != mp->tx_fill) {
855 cp = mp->tx_cmds + NCMDS_TX * i;
860 ++mp->tx_active;
866 out_8(&mb->maccc, mp->maccc);
869 spin_unlock_irqrestore(&mp->lock, flags);
880 struct mace_data *mp = netdev_priv(dev);
881 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
890 spin_lock_irqsave(&mp->lock, flags);
891 for (i = mp->rx_empty; i != mp->rx_fill; ) {
892 cp = mp->rx_cmds + i;
898 np = mp->rx_cmds + next;
899 if (next != mp->rx_fill &&
909 skb = mp->rx_bufs[i];
936 mp->rx_bufs[i] = NULL;
948 mp->rx_empty = i;
950 i = mp->rx_fill;
955 if (next == mp->rx_empty)
957 cp = mp->rx_cmds + i;
958 skb = mp->rx_bufs[i];
963 mp->rx_bufs[i] = skb;
980 if (i != mp->rx_fill) {
982 mp->rx_fill = i;
984 spin_unlock_irqrestore(&mp->lock, flags);