• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/tokenring/

Lines Matching defs:xl_priv

161 	struct xl_private *xl_priv = netdev_priv(dev);
163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
170 txd = &(xl_priv->xl_tx_ring[i]) ;
184 struct xl_private *xl_priv = netdev_priv(dev);
186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
189 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
193 rxd = &(xl_priv->xl_rx_ring[i]) ;
218 struct xl_private *xl_priv = netdev_priv(dev);
219 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
250 struct xl_private *xl_priv = netdev_priv(dev);
251 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
293 struct xl_private *xl_priv ;
320 xl_priv = netdev_priv(dev);
329 xl_priv->xl_card_name = pci_name(pdev);
330 xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
331 xl_priv->pdev = pdev ;
334 xl_priv->pkt_buf_sz = PKT_BUF_SZ ;
336 xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
338 dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ;
339 xl_priv->xl_ring_speed = ringspeed[card_no] ;
340 xl_priv->xl_message_level = message_level[card_no] ;
341 xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ;
342 xl_priv->xl_copy_all_options = 0 ;
345 iounmap(xl_priv->xl_mmio) ;
358 iounmap(xl_priv->xl_mmio) ;
364 printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ;
369 static int xl_init_firmware(struct xl_private *xl_priv)
373 err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
379 if (xl_priv->fw->size < 16) {
381 xl_priv->fw->size, FW_NAME);
382 release_firmware(xl_priv->fw);
391 struct xl_private *xl_priv = netdev_priv(dev);
396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
398 spin_lock_init(&xl_priv->xl_lock) ;
400 err = xl_init_firmware(xl_priv);
415 struct xl_private *xl_priv = netdev_priv(dev);
416 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
424 if (xl_priv->fw == NULL)
495 start = (0xFFFF - (xl_priv->fw->size) + 1) ;
499 for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
502 writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
511 writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
587 xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ;
589 xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
606 struct xl_private *xl_priv=netdev_priv(dev);
607 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
630 if (xl_priv->xl_ring_speed != 0) {
631 if (xl_priv->xl_ring_speed == 4)
675 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
676 if (xl_priv->xl_tx_ring == NULL) {
682 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
683 if (xl_priv->xl_rx_ring == NULL) {
687 kfree(xl_priv->xl_tx_ring);
695 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
700 xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
701 xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
702 xl_priv->rx_ring_skb[i] = skb ;
708 kfree(xl_priv->xl_tx_ring);
709 kfree(xl_priv->xl_rx_ring);
713 xl_priv->rx_ring_no = i ;
714 xl_priv->rx_ring_tail = 0 ;
715 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
716 for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
717 xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
719 xl_priv->xl_rx_ring[i].upnextptr = 0 ;
721 writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ;
725 xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
727 xl_priv->tx_ring_head = 1 ;
728 xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
729 xl_priv->free_ring_entries = XL_TX_RING_SIZE ;
735 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
736 xl_priv->xl_tx_ring[0].buffer = 0 ;
737 xl_priv->xl_tx_ring[0].buffer_length = 0 ;
738 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
740 writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ;
760 struct xl_private *xl_priv=netdev_priv(dev);
761 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
773 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
780 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
784 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
786 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
794 if (xl_priv->xl_laa[0]) { /* If using a LAA address */
796 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
797 writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ;
799 memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ;
802 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
809 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
840 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
843 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
847 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
848 xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
850 printk("ASB: %04x",xl_priv->asb ) ;
851 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
856 printk(", ARB: %04x\n",xl_priv->arb );
857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
909 struct xl_private *xl_priv=netdev_priv(dev);
910 int n = xl_priv->rx_ring_tail;
914 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
915 xl_priv->xl_rx_ring[n].framestatus = 0;
916 xl_priv->xl_rx_ring[n].upnextptr = 0;
917 xl_priv->rx_ring_tail++;
918 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
923 struct xl_private *xl_priv=netdev_priv(dev);
924 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
934 while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
936 if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
943 temp_ring_loc = xl_priv->rx_ring_tail ;
945 while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
950 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
956 while (xl_priv->rx_ring_tail != temp_ring_loc)
966 while (xl_priv->rx_ring_tail != temp_ring_loc) {
967 copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
969 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
970 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
973 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
978 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
979 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
981 /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
982 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
989 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
1001 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
1002 pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
1006 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
1007 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
1008 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
1029 struct xl_private *xl_priv=netdev_priv(dev);
1030 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1052 struct xl_private *xl_priv=netdev_priv(dev);
1056 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
1057 pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1058 xl_priv->rx_ring_tail++ ;
1059 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
1063 pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ;
1065 pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ;
1067 kfree(xl_priv->xl_rx_ring) ;
1068 kfree(xl_priv->xl_tx_ring) ;
1076 struct xl_private *xl_priv =netdev_priv(dev);
1077 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1085 spin_lock(&xl_priv->xl_lock) ;
1114 spin_unlock(&xl_priv->xl_lock) ;
1120 if (xl_priv->srb_queued)
1140 if (xl_priv->asb_queued == 1) {
1142 } else if (xl_priv->asb_queued == 2) {
1174 spin_unlock(&xl_priv->xl_lock) ;
1188 spin_unlock(&xl_priv->xl_lock) ;
1198 struct xl_private *xl_priv=netdev_priv(dev);
1203 spin_lock_irqsave(&xl_priv->xl_lock,flags) ;
1207 if (xl_priv->free_ring_entries > 1 ) {
1211 tx_head = xl_priv->tx_ring_head ;
1212 tx_tail = xl_priv->tx_ring_tail ;
1214 txd = &(xl_priv->xl_tx_ring[tx_head]) ;
1217 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1219 xl_priv->tx_ring_skb[tx_head] = skb ;
1228 tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ;
1230 xl_priv->tx_ring_head++ ;
1231 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
1232 xl_priv->free_ring_entries-- ;
1234 xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
1241 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1245 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1259 struct xl_private *xl_priv=netdev_priv(dev);
1260 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1264 if (xl_priv->tx_ring_tail == 255) {/* First time */
1265 xl_priv->xl_tx_ring[0].framestartheader = 0 ;
1266 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
1267 xl_priv->tx_ring_tail = 1 ;
1270 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
1271 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
1272 pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
1276 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
1277 xl_priv->tx_ring_tail++ ;
1278 xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ;
1279 xl_priv->free_ring_entries++ ;
1295 struct xl_private *xl_priv = netdev_priv(dev);
1296 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1393 struct xl_private *xl_priv = netdev_priv(dev);
1403 if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
1404 xl_priv->xl_copy_all_options = options ;
1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
1419 memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ;
1433 struct xl_private *xl_priv = netdev_priv(dev);
1434 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1438 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1440 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1460 if(xl_priv->xl_message_level)
1467 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1468 if(xl_priv->xl_message_level)
1474 if(xl_priv->xl_message_level)
1478 if(xl_priv->xl_message_level)
1482 if(xl_priv->xl_message_level)
1486 if(xl_priv->xl_message_level) {
1487 if (xl_priv->xl_copy_all_options == 0x0004)
1503 struct xl_private *xl_priv = netdev_priv(dev);
1510 memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ;
1512 if (xl_priv->xl_message_level) {
1513 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
1514 xl_priv->xl_laa[1], xl_priv->xl_laa[2],
1515 xl_priv->xl_laa[3], xl_priv->xl_laa[4],
1516 xl_priv->xl_laa[5]);
1524 struct xl_private *xl_priv = netdev_priv(dev);
1525 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1529 writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1533 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1542 lan_status_diff = xl_priv->xl_lan_status ^ lan_status ;
1563 if (xl_priv->xl_message_level) {
1581 if (xl_priv->xl_message_level)
1589 if (xl_priv->xl_message_level)
1593 xl_priv->xl_lan_status = lan_status ;
1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1615 xl_priv->asb_queued = 0 ;
1616 writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1618 xl_priv->asb_queued = 1 ;
1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1651 struct xl_private *xl_priv = netdev_priv(dev);
1652 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1654 if (xl_priv->asb_queued == 1)
1657 writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1660 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1661 writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
1671 xl_priv->asb_queued = 2 ;
1682 struct xl_private *xl_priv = netdev_priv(dev);
1683 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1686 writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1699 xl_priv->asb_queued = 0 ;
1710 struct xl_private *xl_priv = netdev_priv(dev);
1711 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1715 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1720 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1725 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1727 writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1728 writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ;
1732 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1734 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1735 writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ;
1736 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1737 writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ;
1738 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1739 writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ;
1740 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1741 writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
1754 xl_priv->srb_queued = 1 ;
1767 struct xl_private *xl_priv = netdev_priv(dev);
1768 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1792 struct xl_private *xl_priv = netdev_priv(dev);
1795 if (xl_priv->xl_ring_speed == 4)
1806 xl_priv->pkt_buf_sz = mtu + TR_HLEN ;
1814 struct xl_private *xl_priv=netdev_priv(dev);
1816 release_firmware(xl_priv->fw);
1818 iounmap(xl_priv->xl_mmio) ;