Lines Matching refs:cp

110  * also, we need to make cp->lock finer-grained.
160 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
229 static void cas_set_link_modes(struct cas *cp);
231 static inline void cas_lock_tx(struct cas *cp)
236 spin_lock_nested(&cp->tx_lock[i], i);
247 #define cas_lock_all_save(cp, flags) \
249 struct cas *xxxcp = (cp); \
254 static inline void cas_unlock_tx(struct cas *cp)
259 spin_unlock(&cp->tx_lock[i - 1]);
262 #define cas_unlock_all_restore(cp, flags) \
264 struct cas *xxxcp = (cp); \
269 static void cas_disable_irq(struct cas *cp, const int ring)
273 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
278 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
291 cp->regs + REG_PLUS_INTRN_MASK(ring));
295 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
302 static inline void cas_mask_intr(struct cas *cp)
307 cas_disable_irq(cp, i);
310 static void cas_enable_irq(struct cas *cp, const int ring)
313 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
317 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
329 writel(INTRN_MASK_RX_EN, cp->regs +
339 static inline void cas_unmask_intr(struct cas *cp)
344 cas_enable_irq(cp, i);
347 static inline void cas_entropy_gather(struct cas *cp)
350 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
353 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
354 readl(cp->regs + REG_ENTROPY_IV),
359 static inline void cas_entropy_reset(struct cas *cp)
362 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
366 cp->regs + REG_BIM_LOCAL_DEV_EN);
367 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
368 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
371 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
372 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
379 static u16 cas_phy_read(struct cas *cp, int reg)
385 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
388 writel(cmd, cp->regs + REG_MIF_FRAME);
393 cmd = readl(cp->regs + REG_MIF_FRAME);
400 static int cas_phy_write(struct cas *cp, int reg, u16 val)
406 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
410 writel(cmd, cp->regs + REG_MIF_FRAME);
415 cmd = readl(cp->regs + REG_MIF_FRAME);
422 static void cas_phy_powerup(struct cas *cp)
424 u16 ctl = cas_phy_read(cp, MII_BMCR);
429 cas_phy_write(cp, MII_BMCR, ctl);
432 static void cas_phy_powerdown(struct cas *cp)
434 u16 ctl = cas_phy_read(cp, MII_BMCR);
439 cas_phy_write(cp, MII_BMCR, ctl);
442 /* cp->lock held. note: the last put_page will free the buffer */
443 static int cas_page_free(struct cas *cp, cas_page_t *page)
445 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
447 __free_pages(page->buffer, cp->page_order);
463 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
473 page->buffer = alloc_pages(flags, cp->page_order);
476 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
477 cp->page_size, DMA_FROM_DEVICE);
486 static void cas_spare_init(struct cas *cp)
488 spin_lock(&cp->rx_inuse_lock);
489 INIT_LIST_HEAD(&cp->rx_inuse_list);
490 spin_unlock(&cp->rx_inuse_lock);
492 spin_lock(&cp->rx_spare_lock);
493 INIT_LIST_HEAD(&cp->rx_spare_list);
494 cp->rx_spares_needed = RX_SPARE_COUNT;
495 spin_unlock(&cp->rx_spare_lock);
499 static void cas_spare_free(struct cas *cp)
505 spin_lock(&cp->rx_spare_lock);
506 list_splice_init(&cp->rx_spare_list, &list);
507 spin_unlock(&cp->rx_spare_lock);
509 cas_page_free(cp, list_entry(elem, cas_page_t, list));
518 spin_lock(&cp->rx_inuse_lock);
519 list_splice_init(&cp->rx_inuse_list, &list);
520 spin_unlock(&cp->rx_inuse_lock);
522 spin_lock(&cp->rx_spare_lock);
523 list_splice_init(&cp->rx_inuse_list, &list);
524 spin_unlock(&cp->rx_spare_lock);
527 cas_page_free(cp, list_entry(elem, cas_page_t, list));
532 static void cas_spare_recover(struct cas *cp, const gfp_t flags)
543 spin_lock(&cp->rx_inuse_lock);
544 list_splice_init(&cp->rx_inuse_list, &list);
545 spin_unlock(&cp->rx_inuse_lock);
566 spin_lock(&cp->rx_spare_lock);
567 if (cp->rx_spares_needed > 0) {
568 list_add(elem, &cp->rx_spare_list);
569 cp->rx_spares_needed--;
570 spin_unlock(&cp->rx_spare_lock);
572 spin_unlock(&cp->rx_spare_lock);
573 cas_page_free(cp, page);
579 spin_lock(&cp->rx_inuse_lock);
580 list_splice(&list, &cp->rx_inuse_list);
581 spin_unlock(&cp->rx_inuse_lock);
584 spin_lock(&cp->rx_spare_lock);
585 needed = cp->rx_spares_needed;
586 spin_unlock(&cp->rx_spare_lock);
594 cas_page_t *spare = cas_page_alloc(cp, flags);
601 spin_lock(&cp->rx_spare_lock);
602 list_splice(&list, &cp->rx_spare_list);
603 cp->rx_spares_needed -= i;
604 spin_unlock(&cp->rx_spare_lock);
608 static cas_page_t *cas_page_dequeue(struct cas *cp)
613 spin_lock(&cp->rx_spare_lock);
614 if (list_empty(&cp->rx_spare_list)) {
616 spin_unlock(&cp->rx_spare_lock);
617 cas_spare_recover(cp, GFP_ATOMIC);
618 spin_lock(&cp->rx_spare_lock);
619 if (list_empty(&cp->rx_spare_list)) {
620 netif_err(cp, rx_err, cp->dev,
622 spin_unlock(&cp->rx_spare_lock);
627 entry = cp->rx_spare_list.next;
629 recover = ++cp->rx_spares_needed;
630 spin_unlock(&cp->rx_spare_lock);
635 atomic_inc(&cp->reset_task_pending);
636 atomic_inc(&cp->reset_task_pending_spare);
637 schedule_work(&cp->reset_task);
639 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
640 schedule_work(&cp->reset_task);
647 static void cas_mif_poll(struct cas *cp, const int enable)
651 cfg = readl(cp->regs + REG_MIF_CFG);
654 if (cp->phy_type & CAS_PHY_MII_MDIO1)
661 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
664 cp->regs + REG_MIF_MASK);
665 writel(cfg, cp->regs + REG_MIF_CFG);
668 /* Must be invoked under cp->lock */
669 static void cas_begin_auto_negotiation(struct cas *cp,
676 int oldstate = cp->lstate;
682 lcntl = cp->link_cntl;
684 cp->link_cntl = BMCR_ANENABLE;
687 cp->link_cntl = 0;
689 cp->link_cntl |= BMCR_SPEED100;
691 cp->link_cntl |= CAS_BMCR_SPEED1000;
693 cp->link_cntl |= BMCR_FULLDPLX;
696 changed = (lcntl != cp->link_cntl);
699 if (cp->lstate == link_up) {
700 netdev_info(cp->dev, "PCS link down\n");
703 netdev_info(cp->dev, "link configuration changed\n");
706 cp->lstate = link_down;
707 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
708 if (!cp->hw_running)
717 netif_carrier_off(cp->dev);
724 atomic_inc(&cp->reset_task_pending);
725 atomic_inc(&cp->reset_task_pending_all);
726 schedule_work(&cp->reset_task);
727 cp->timer_ticks = 0;
728 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
732 if (cp->phy_type & CAS_PHY_SERDES) {
733 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
735 if (cp->link_cntl & BMCR_ANENABLE) {
737 cp->lstate = link_aneg;
739 if (cp->link_cntl & BMCR_FULLDPLX)
742 cp->lstate = link_force_ok;
744 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
745 writel(val, cp->regs + REG_PCS_MII_CTRL);
748 cas_mif_poll(cp, 0);
749 ctl = cas_phy_read(cp, MII_BMCR);
752 ctl |= cp->link_cntl;
755 cp->lstate = link_aneg;
757 cp->lstate = link_force_ok;
759 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
760 cas_phy_write(cp, MII_BMCR, ctl);
761 cas_mif_poll(cp, 1);
764 cp->timer_ticks = 0;
765 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
768 /* Must be invoked under cp->lock. */
769 static int cas_reset_mii_phy(struct cas *cp)
774 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
777 val = cas_phy_read(cp, MII_BMCR);
785 static void cas_saturn_firmware_init(struct cas *cp)
791 if (PHY_NS_DP83065 != cp->phy_id)
794 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
805 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
806 cp->fw_size = fw->size - 2;
807 cp->fw_data = vmalloc(cp->fw_size);
808 if (!cp->fw_data)
810 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
815 static void cas_saturn_firmware_load(struct cas *cp)
819 if (!cp->fw_data)
822 cas_phy_powerdown(cp);
825 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
828 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
829 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
830 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
831 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
832 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
833 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
834 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
835 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
838 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
839 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
840 for (i = 0; i < cp->fw_size; i++)
841 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
844 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
845 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
850 static void cas_phy_init(struct cas *cp)
855 if (CAS_PHY_MII(cp->phy_type)) {
857 cp->regs + REG_PCS_DATAPATH_MODE);
859 cas_mif_poll(cp, 0);
860 cas_reset_mii_phy(cp); /* take out of isolate mode */
862 if (PHY_LUCENT_B0 == cp->phy_id) {
864 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
865 cas_phy_write(cp, MII_BMCR, 0x00f1);
866 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
868 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
870 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
871 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
872 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
873 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
874 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
875 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
876 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
877 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
878 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
879 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
880 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
882 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
883 val = cas_phy_read(cp, BROADCOM_MII_REG4);
884 val = cas_phy_read(cp, BROADCOM_MII_REG4);
887 cas_phy_write(cp, BROADCOM_MII_REG4,
891 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
892 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
894 cp->regs + REG_SATURN_PCFG);
900 if (PHY_NS_DP83065 == cp->phy_id) {
901 cas_saturn_firmware_load(cp);
903 cas_phy_powerup(cp);
907 val = cas_phy_read(cp, MII_BMCR);
909 cas_phy_write(cp, MII_BMCR, val);
912 cas_phy_write(cp, MII_ADVERTISE,
913 cas_phy_read(cp, MII_ADVERTISE) |
919 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
923 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
926 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
935 cp->regs + REG_PCS_DATAPATH_MODE);
938 if (cp->cas_flags & CAS_FLAG_SATURN)
939 writel(0, cp->regs + REG_SATURN_PCFG);
942 val = readl(cp->regs + REG_PCS_MII_CTRL);
944 writel(val, cp->regs + REG_PCS_MII_CTRL);
949 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
954 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
955 readl(cp->regs + REG_PCS_STATE_MACHINE));
960 writel(0x0, cp->regs + REG_PCS_CFG);
963 val = readl(cp->regs + REG_PCS_MII_ADVERT);
967 writel(val, cp->regs + REG_PCS_MII_ADVERT);
970 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
974 cp->regs + REG_PCS_SERDES_CTRL);
979 static int cas_pcs_link_check(struct cas *cp)
988 stat = readl(cp->regs + REG_PCS_MII_STATUS);
990 stat = readl(cp->regs + REG_PCS_MII_STATUS);
998 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1003 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1011 if (cp->lstate != link_up) {
1012 if (cp->opened) {
1013 cp->lstate = link_up;
1014 cp->link_transition = LINK_TRANSITION_LINK_UP;
1016 cas_set_link_modes(cp);
1017 netif_carrier_on(cp->dev);
1020 } else if (cp->lstate == link_up) {
1021 cp->lstate = link_down;
1023 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1024 !cp->link_transition_jiffies_valid) {
1038 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1039 cp->link_transition_jiffies = jiffies;
1040 cp->link_transition_jiffies_valid = 1;
1042 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1044 netif_carrier_off(cp->dev);
1045 if (cp->opened)
1046 netif_info(cp, link, cp->dev, "PCS link down\n");
1056 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1058 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1062 } else if (cp->lstate == link_down) {
1064 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1065 !cp->link_transition_jiffies_valid) {
1072 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1073 cp->link_transition_jiffies = jiffies;
1074 cp->link_transition_jiffies_valid = 1;
1076 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1084 struct cas *cp, u32 status)
1086 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1090 return cas_pcs_link_check(cp);
1094 struct cas *cp, u32 status)
1096 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1101 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1111 spin_lock(&cp->stat_lock[0]);
1114 cp->net_stats[0].tx_fifo_errors++;
1119 cp->net_stats[0].tx_errors++;
1126 cp->net_stats[0].collisions += 0x10000;
1129 cp->net_stats[0].tx_aborted_errors += 0x10000;
1130 cp->net_stats[0].collisions += 0x10000;
1134 cp->net_stats[0].tx_aborted_errors += 0x10000;
1135 cp->net_stats[0].collisions += 0x10000;
1137 spin_unlock(&cp->stat_lock[0]);
1145 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1153 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1157 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1166 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1172 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1178 static void cas_init_rx_dma(struct cas *cp)
1180 u64 desc_dma = cp->block_dvma;
1189 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1191 writel(val, cp->regs + REG_RX_CFG);
1193 val = (unsigned long) cp->init_rxds[0] -
1194 (unsigned long) cp->init_block;
1195 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1199 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1203 val = (unsigned long) cp->init_rxds[1] -
1204 (unsigned long) cp->init_block;
1205 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206 writel((desc_dma + val) & 0xffffffff, cp->regs +
1208 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1213 val = (unsigned long) cp->init_rxcs[0] -
1214 (unsigned long) cp->init_block;
1215 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1218 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1221 val = (unsigned long) cp->init_rxcs[i] -
1222 (unsigned long) cp->init_block;
1223 writel((desc_dma + val) >> 32, cp->regs +
1225 writel((desc_dma + val) & 0xffffffff, cp->regs +
1234 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1239 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1241 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1242 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1246 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1247 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1248 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1249 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1253 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1254 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1260 writel(val, cp->regs + REG_RX_BLANK);
1262 writel(0x0, cp->regs + REG_RX_BLANK);
1272 writel(val, cp->regs + REG_RX_AE_THRESH);
1273 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1275 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1281 writel(0x0, cp->regs + REG_RX_RED);
1285 if (cp->page_size == 0x1000)
1287 else if (cp->page_size == 0x2000)
1289 else if (cp->page_size == 0x4000)
1293 size = cp->dev->mtu + 64;
1294 if (size > cp->page_size)
1295 size = cp->page_size;
1306 cp->mtu_stride = 1 << (i + 10);
1309 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1311 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1320 writel(val, cp->regs + REG_HP_CFG);
1333 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1335 cas_page_t *page = cp->rx_pages[1][index];
1341 new = cas_page_dequeue(cp);
1343 spin_lock(&cp->rx_inuse_lock);
1344 list_add(&page->list, &cp->rx_inuse_list);
1345 spin_unlock(&cp->rx_inuse_lock);
1351 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1354 cas_page_t **page0 = cp->rx_pages[0];
1355 cas_page_t **page1 = cp->rx_pages[1];
1359 cas_page_t *new = cas_page_spare(cp, index);
1369 static void cas_clean_rxds(struct cas *cp)
1372 struct cas_rx_desc *rxd = cp->init_rxds[0];
1378 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1386 cas_page_t *page = cas_page_swap(cp, 0, i);
1392 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1393 cp->rx_last[0] = 0;
1394 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1397 static void cas_clean_rxcs(struct cas *cp)
1402 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1403 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1405 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1419 static int cas_rxmac_reset(struct cas *cp)
1421 struct net_device *dev = cp->dev;
1426 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1428 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1438 writel(0, cp->regs + REG_RX_CFG);
1440 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1452 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1454 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1464 cas_clean_rxds(cp);
1465 cas_clean_rxcs(cp);
1468 cas_init_rx_dma(cp);
1471 val = readl(cp->regs + REG_RX_CFG);
1472 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474 val = readl(cp->regs + REG_MAC_RX_CFG);
1475 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1480 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1483 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1488 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1491 spin_lock(&cp->stat_lock[0]);
1493 cp->net_stats[0].rx_frame_errors += 0x10000;
1496 cp->net_stats[0].rx_crc_errors += 0x10000;
1499 cp->net_stats[0].rx_length_errors += 0x10000;
1502 cp->net_stats[0].rx_over_errors++;
1503 cp->net_stats[0].rx_fifo_errors++;
1509 spin_unlock(&cp->stat_lock[0]);
1513 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1516 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1521 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1529 cp->pause_entered++;
1532 cp->pause_last_time_recvd = (stat >> 16);
1538 /* Must be invoked under cp->lock. */
1539 static inline int cas_mdio_link_not_up(struct cas *cp)
1543 switch (cp->lstate) {
1545 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1546 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1547 cp->timer_ticks = 5;
1548 cp->lstate = link_force_ok;
1549 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1553 val = cas_phy_read(cp, MII_BMCR);
1560 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1562 cas_phy_write(cp, MII_BMCR, val);
1563 cp->timer_ticks = 5;
1564 cp->lstate = link_force_try;
1565 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1570 val = cas_phy_read(cp, MII_BMCR);
1571 cp->timer_ticks = 5;
1575 cas_phy_write(cp, MII_BMCR, val);
1585 cas_phy_write(cp, MII_BMCR, val);
1596 /* must be invoked with cp->lock held */
1597 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1607 if ((cp->lstate == link_force_try) &&
1608 (cp->link_cntl & BMCR_ANENABLE)) {
1609 cp->lstate = link_force_ret;
1610 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1611 cas_mif_poll(cp, 0);
1612 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1613 cp->timer_ticks = 5;
1614 if (cp->opened)
1615 netif_info(cp, link, cp->dev,
1617 cas_phy_write(cp, MII_BMCR,
1618 cp->link_fcntl | BMCR_ANENABLE |
1620 cas_mif_poll(cp, 1);
1622 } else if (cp->lstate != link_up) {
1623 cp->lstate = link_up;
1624 cp->link_transition = LINK_TRANSITION_LINK_UP;
1626 if (cp->opened) {
1627 cas_set_link_modes(cp);
1628 netif_carrier_on(cp->dev);
1638 if (cp->lstate == link_up) {
1639 cp->lstate = link_down;
1640 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1642 netif_carrier_off(cp->dev);
1643 if (cp->opened)
1644 netif_info(cp, link, cp->dev, "Link down\n");
1647 } else if (++cp->timer_ticks > 10)
1648 cas_mdio_link_not_up(cp);
1653 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1656 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1664 return cas_mii_link_check(cp, bmsr);
1667 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1670 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1676 stat, readl(cp->regs + REG_BIM_DIAG));
1680 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1699 pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1725 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1730 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1732 spin_lock(&cp->stat_lock[0]);
1733 cp->net_stats[0].rx_errors++;
1734 spin_unlock(&cp->stat_lock[0]);
1740 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1742 spin_lock(&cp->stat_lock[0]);
1743 cp->net_stats[0].rx_errors++;
1744 spin_unlock(&cp->stat_lock[0]);
1749 if (cas_pcs_interrupt(dev, cp, status))
1754 if (cas_txmac_interrupt(dev, cp, status))
1759 if (cas_rxmac_interrupt(dev, cp, status))
1764 if (cas_mac_interrupt(dev, cp, status))
1769 if (cas_mif_interrupt(dev, cp, status))
1774 if (cas_pci_interrupt(dev, cp, status))
1781 atomic_inc(&cp->reset_task_pending);
1782 atomic_inc(&cp->reset_task_pending_all);
1784 schedule_work(&cp->reset_task);
1786 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1788 schedule_work(&cp->reset_task);
1798 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1803 if (CAS_TABORT(cp) == 1)
1810 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1814 struct net_device *dev = cp->dev;
1817 spin_lock(&cp->tx_lock[ring]);
1818 txds = cp->init_txds[ring];
1819 skbs = cp->tx_skbs[ring];
1820 entry = cp->tx_old[ring];
1837 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1841 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1845 cp->tx_tiny_use[ring][entry].nbufs = 0;
1853 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1858 if (cp->tx_tiny_use[ring][entry].used) {
1859 cp->tx_tiny_use[ring][entry].used = 0;
1864 spin_lock(&cp->stat_lock[ring]);
1865 cp->net_stats[ring].tx_packets++;
1866 cp->net_stats[ring].tx_bytes += skb->len;
1867 spin_unlock(&cp->stat_lock[ring]);
1870 cp->tx_old[ring] = entry;
1877 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1879 spin_unlock(&cp->tx_lock[ring]);
1882 static void cas_tx(struct net_device *dev, struct cas *cp,
1887 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1889 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1900 limit = readl(cp->regs + REG_TX_COMPN(ring));
1902 if (cp->tx_old[ring] != limit)
1903 cas_tx_ringN(cp, ring, limit);
1908 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1929 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1940 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1946 i += cp->crc_size;
1947 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1950 dma_sync_single_for_device(&cp->pdev->dev,
1964 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1967 hlen = min(cp->page_size - off, dlen);
1969 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1976 i += cp->crc_size;
1977 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1985 dma_sync_single_for_device(&cp->pdev->dev,
1990 RX_USED_ADD(page, cp->mtu_stride);
2010 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011 dma_sync_single_for_cpu(&cp->pdev->dev,
2013 hlen + cp->crc_size,
2015 dma_sync_single_for_device(&cp->pdev->dev,
2017 hlen + cp->crc_size,
2027 RX_USED_ADD(page, hlen + cp->crc_size);
2030 if (cp->crc_size)
2039 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2041 hlen = min(cp->page_size - off, dlen);
2043 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2050 i += cp->crc_size;
2051 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2054 dma_sync_single_for_device(&cp->pdev->dev,
2058 RX_USED_ADD(page, cp->mtu_stride);
2066 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2067 dma_sync_single_for_cpu(&cp->pdev->dev,
2069 dlen + cp->crc_size,
2071 memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2072 dma_sync_single_for_device(&cp->pdev->dev,
2074 dlen + cp->crc_size,
2076 RX_USED_ADD(page, dlen + cp->crc_size);
2079 if (cp->crc_size)
2086 if (cp->crc_size) {
2088 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2091 skb->protocol = eth_type_trans(skb, cp->dev);
2115 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2119 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2136 static void cas_post_page(struct cas *cp, const int ring, const int index)
2141 entry = cp->rx_old[ring];
2143 new = cas_page_swap(cp, ring, index);
2144 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2145 cp->init_rxds[ring][entry].index =
2150 cp->rx_old[ring] = entry;
2156 writel(entry, cp->regs + REG_RX_KICK);
2158 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2159 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2164 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2168 cas_page_t **page = cp->rx_pages[ring];
2170 entry = cp->rx_old[ring];
2172 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2182 cas_page_t *new = cas_page_dequeue(cp);
2187 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2188 if (!timer_pending(&cp->link_timer))
2189 mod_timer(&cp->link_timer, jiffies +
2191 cp->rx_old[ring] = entry;
2192 cp->rx_last[ring] = num ? num - released : 0;
2195 spin_lock(&cp->rx_inuse_lock);
2196 list_add(&page[entry]->list, &cp->rx_inuse_list);
2197 spin_unlock(&cp->rx_inuse_lock);
2198 cp->init_rxds[ring][entry].buffer =
2211 cp->rx_old[ring] = entry;
2217 writel(cluster, cp->regs + REG_RX_KICK);
2219 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2220 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2237 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2239 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2243 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2246 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2248 entry = cp->rx_new[ring];
2274 spin_lock(&cp->stat_lock[ring]);
2275 cp->net_stats[ring].rx_errors++;
2277 cp->net_stats[ring].rx_length_errors++;
2279 cp->net_stats[ring].rx_crc_errors++;
2280 spin_unlock(&cp->stat_lock[ring]);
2284 spin_lock(&cp->stat_lock[ring]);
2285 ++cp->net_stats[ring].rx_dropped;
2286 spin_unlock(&cp->stat_lock[ring]);
2290 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2303 cas_rx_flow_pkt(cp, words, skb);
2306 spin_lock(&cp->stat_lock[ring]);
2307 cp->net_stats[ring].rx_packets++;
2308 cp->net_stats[ring].rx_bytes += len;
2309 spin_unlock(&cp->stat_lock[ring]);
2319 cas_post_page(cp, dring, i);
2326 cas_post_page(cp, dring, i);
2333 cas_post_page(cp, dring, i);
2344 cp->rx_new[ring] = entry;
2347 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2354 struct cas *cp, int ring)
2356 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2359 last = cp->rx_cur[ring];
2360 entry = cp->rx_new[ring];
2361 netif_printk(cp, intr, KERN_DEBUG, dev,
2363 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2370 cp->rx_cur[ring] = last;
2373 writel(last, cp->regs + REG_RX_COMP_TAIL);
2374 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2375 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2385 struct cas *cp, const u32 status,
2389 cas_post_rxcs_ringN(dev, cp, ring);
2395 struct cas *cp = netdev_priv(dev);
2397 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2398 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2404 spin_lock_irqsave(&cp->lock, flags);
2407 cas_mask_intr(cp);
2408 napi_schedule(&cp->napi);
2410 cas_rx_ringN(cp, ring, 0);
2416 cas_handle_irqN(dev, cp, status, ring);
2417 spin_unlock_irqrestore(&cp->lock, flags);
2424 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2429 cas_post_rxds_ringN(cp, 1, 0);
2430 spin_lock(&cp->stat_lock[1]);
2431 cp->net_stats[1].rx_dropped++;
2432 spin_unlock(&cp->stat_lock[1]);
2436 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2440 cas_post_rxcs_ringN(cp, 1);
2447 struct cas *cp = netdev_priv(dev);
2449 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2455 spin_lock_irqsave(&cp->lock, flags);
2458 cas_mask_intr(cp);
2459 napi_schedule(&cp->napi);
2461 cas_rx_ringN(cp, 1, 0);
2466 cas_handle_irq1(cp, status);
2467 spin_unlock_irqrestore(&cp->lock, flags);
2473 struct cas *cp, const u32 status)
2477 cas_abnormal_irq(dev, cp, status);
2483 cas_post_rxds_ringN(cp, 0, 0);
2484 spin_lock(&cp->stat_lock[0]);
2485 cp->net_stats[0].rx_dropped++;
2486 spin_unlock(&cp->stat_lock[0]);
2488 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2493 cas_post_rxcs_ringN(dev, cp, 0);
2499 struct cas *cp = netdev_priv(dev);
2501 u32 status = readl(cp->regs + REG_INTR_STATUS);
2506 spin_lock_irqsave(&cp->lock, flags);
2508 cas_tx(dev, cp, status);
2514 cas_mask_intr(cp);
2515 napi_schedule(&cp->napi);
2517 cas_rx_ringN(cp, 0, 0);
2523 cas_handle_irq(dev, cp, status);
2524 spin_unlock_irqrestore(&cp->lock, flags);
2532 struct cas *cp = container_of(napi, struct cas, napi);
2533 struct net_device *dev = cp->dev;
2535 u32 status = readl(cp->regs + REG_INTR_STATUS);
2538 spin_lock_irqsave(&cp->lock, flags);
2539 cas_tx(dev, cp, status);
2540 spin_unlock_irqrestore(&cp->lock, flags);
2554 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2564 spin_lock_irqsave(&cp->lock, flags);
2566 cas_handle_irq(dev, cp, status);
2570 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2572 cas_handle_irq1(dev, cp, status);
2578 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2580 cas_handle_irqN(dev, cp, status, 2);
2586 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2588 cas_handle_irqN(dev, cp, status, 3);
2591 spin_unlock_irqrestore(&cp->lock, flags);
2594 cas_unmask_intr(cp);
2603 struct cas *cp = netdev_priv(dev);
2605 cas_disable_irq(cp, 0);
2606 cas_interrupt(cp->pdev->irq, dev);
2607 cas_enable_irq(cp, 0);
2629 struct cas *cp = netdev_priv(dev);
2632 if (!cp->hw_running) {
2638 readl(cp->regs + REG_MIF_STATE_MACHINE));
2641 readl(cp->regs + REG_MAC_STATE_MACHINE));
2644 readl(cp->regs + REG_TX_CFG),
2645 readl(cp->regs + REG_MAC_TX_STATUS),
2646 readl(cp->regs + REG_MAC_TX_CFG),
2647 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2648 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2649 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2650 readl(cp->regs + REG_TX_SM_1),
2651 readl(cp->regs + REG_TX_SM_2));
2654 readl(cp->regs + REG_RX_CFG),
2655 readl(cp->regs + REG_MAC_RX_STATUS),
2656 readl(cp->regs + REG_MAC_RX_CFG));
2659 readl(cp->regs + REG_HP_STATE_MACHINE),
2660 readl(cp->regs + REG_HP_STATUS0),
2661 readl(cp->regs + REG_HP_STATUS1),
2662 readl(cp->regs + REG_HP_STATUS2));
2665 atomic_inc(&cp->reset_task_pending);
2666 atomic_inc(&cp->reset_task_pending_all);
2667 schedule_work(&cp->reset_task);
2669 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2670 schedule_work(&cp->reset_task);
2683 static void cas_write_txd(struct cas *cp, int ring, int entry,
2686 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2697 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2700 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2703 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2706 cp->tx_tiny_use[ring][tentry].nbufs++;
2707 cp->tx_tiny_use[ring][entry].used = 1;
2708 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2711 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2714 struct net_device *dev = cp->dev;
2721 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2724 if (TX_BUFFS_AVAIL(cp, ring) <=
2725 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2727 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2742 entry = cp->tx_new[ring];
2743 cp->tx_skbs[ring][entry] = skb;
2747 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2751 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2754 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2759 tx_tiny_buf(cp, ring, entry), tabort);
2760 mapping = tx_tiny_map(cp, ring, entry, tentry);
2761 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2764 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2773 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2776 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2779 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2782 memcpy_from_page(tx_tiny_buf(cp, ring, entry),
2786 mapping = tx_tiny_map(cp, ring, entry, tentry);
2790 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2795 cp->tx_new[ring] = entry;
2796 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2799 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2801 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2802 writel(entry, cp->regs + REG_TX_KICKN(ring));
2803 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2809 struct cas *cp = netdev_priv(dev);
2816 if (skb_padto(skb, cp->min_frame_size))
2822 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2827 static void cas_init_tx_dma(struct cas *cp)
2829 u64 desc_dma = cp->block_dvma;
2837 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2838 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2851 off = (unsigned long) cp->init_txds[i] -
2852 (unsigned long) cp->init_block;
2855 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2856 writel((desc_dma + off) & 0xffffffff, cp->regs +
2862 writel(val, cp->regs + REG_TX_CFG);
2868 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2869 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2870 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2871 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2873 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2874 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2875 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2876 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2880 /* Must be invoked under cp->lock. */
2881 static inline void cas_init_dma(struct cas *cp)
2883 cas_init_tx_dma(cp);
2884 cas_init_rx_dma(cp);
2887 static void cas_process_mc_list(struct cas *cp)
2895 netdev_for_each_mc_addr(ha, cp->dev) {
2901 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2903 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2905 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2918 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2921 /* Must be invoked under cp->lock. */
2922 static u32 cas_setup_multicast(struct cas *cp)
2927 if (cp->dev->flags & IFF_PROMISC) {
2930 } else if (cp->dev->flags & IFF_ALLMULTI) {
2932 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2936 cas_process_mc_list(cp);
2943 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
2944 static void cas_clear_mac_err(struct cas *cp)
2946 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2947 writel(0, cp->regs + REG_MAC_COLL_FIRST);
2948 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2949 writel(0, cp->regs + REG_MAC_COLL_LATE);
2950 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2951 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2952 writel(0, cp->regs + REG_MAC_RECV_FRAME);
2953 writel(0, cp->regs + REG_MAC_LEN_ERR);
2954 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2955 writel(0, cp->regs + REG_MAC_FCS_ERR);
2956 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2960 static void cas_mac_reset(struct cas *cp)
2965 writel(0x1, cp->regs + REG_MAC_TX_RESET);
2966 writel(0x1, cp->regs + REG_MAC_RX_RESET);
2971 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2979 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
2984 if (readl(cp->regs + REG_MAC_TX_RESET) |
2985 readl(cp->regs + REG_MAC_RX_RESET))
2986 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
2987 readl(cp->regs + REG_MAC_TX_RESET),
2988 readl(cp->regs + REG_MAC_RX_RESET),
2989 readl(cp->regs + REG_MAC_STATE_MACHINE));
2993 /* Must be invoked under cp->lock. */
2994 static void cas_init_mac(struct cas *cp)
2996 const unsigned char *e = &cp->dev->dev_addr[0];
2998 cas_mac_reset(cp);
3001 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3007 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3008 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3011 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3013 writel(0x00, cp->regs + REG_MAC_IPG0);
3014 writel(0x08, cp->regs + REG_MAC_IPG1);
3015 writel(0x04, cp->regs + REG_MAC_IPG2);
3018 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3021 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3030 cp->regs + REG_MAC_FRAMESIZE_MAX);
3036 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3037 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3039 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3040 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3041 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3042 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3044 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3046 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3047 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3048 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3049 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3050 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3054 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3056 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3057 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3058 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3060 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3061 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3062 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3064 cp->mac_rx_cfg = cas_setup_multicast(cp);
3066 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3067 cas_clear_mac_err(cp);
3068 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3074 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3075 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3080 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3083 /* Must be invoked under cp->lock. */
3084 static void cas_init_pause_thresholds(struct cas *cp)
3089 if (cp->rx_fifo_size <= (2 * 1024)) {
3090 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3092 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3093 if (max_frame * 3 > cp->rx_fifo_size) {
3094 cp->rx_pause_off = 7104;
3095 cp->rx_pause_on = 960;
3097 int off = (cp->rx_fifo_size - (max_frame * 2));
3099 cp->rx_pause_off = off;
3100 cp->rx_pause_on = on;
3129 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3132 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3148 cp->regs + REG_BIM_LOCAL_DEV_EN);
3253 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3294 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3309 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3314 static void cas_check_pci_invariants(struct cas *cp)
3316 struct pci_dev *pdev = cp->pdev;
3318 cp->cas_flags = 0;
3322 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3324 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3330 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3333 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3340 cp->cas_flags |= CAS_FLAG_SATURN;
3345 static int cas_check_invariants(struct cas *cp)
3347 struct pci_dev *pdev = cp->pdev;
3353 cp->page_order = 0;
3362 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3368 cp->page_size = (PAGE_SIZE << cp->page_order);
3371 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3372 cp->rx_fifo_size = RX_FIFO_SIZE;
3377 cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3378 eth_hw_addr_set(cp->dev, addr);
3379 if (cp->phy_type & CAS_PHY_SERDES) {
3380 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3385 cfg = readl(cp->regs + REG_MIF_CFG);
3387 cp->phy_type = CAS_PHY_MII_MDIO1;
3389 cp->phy_type = CAS_PHY_MII_MDIO0;
3392 cas_mif_poll(cp, 0);
3393 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3400 cp->phy_addr = i;
3401 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3402 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3404 cp->phy_id = phy_id;
3410 readl(cp->regs + REG_MIF_STATE_MACHINE));
3415 cfg = cas_phy_read(cp, MII_BMSR);
3417 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3418 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3422 /* Must be invoked under cp->lock. */
3423 static inline void cas_start_dma(struct cas *cp)
3430 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3431 writel(val, cp->regs + REG_TX_CFG);
3432 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3433 writel(val, cp->regs + REG_RX_CFG);
3436 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3437 writel(val, cp->regs + REG_MAC_TX_CFG);
3438 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3439 writel(val, cp->regs + REG_MAC_RX_CFG);
3443 val = readl(cp->regs + REG_MAC_TX_CFG);
3451 val = readl(cp->regs + REG_MAC_RX_CFG);
3454 netdev_err(cp->dev,
3456 readl(cp->regs + REG_MIF_STATE_MACHINE),
3457 readl(cp->regs + REG_MAC_STATE_MACHINE));
3463 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3465 readl(cp->regs + REG_MIF_STATE_MACHINE),
3466 readl(cp->regs + REG_MAC_STATE_MACHINE));
3469 cas_unmask_intr(cp); /* enable interrupts */
3470 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3471 writel(0, cp->regs + REG_RX_COMP_TAIL);
3473 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3476 cp->regs + REG_PLUS_RX_KICK1);
3480 /* Must be invoked under cp->lock. */
3481 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3484 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3492 /* Must be invoked under cp->lock. */
3493 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3503 val = cas_phy_read(cp, MII_LPA);
3515 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3516 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3527 * Must be invoked under cp->lock.
3529 static void cas_set_link_modes(struct cas *cp)
3538 if (CAS_PHY_MII(cp->phy_type)) {
3539 cas_mif_poll(cp, 0);
3540 val = cas_phy_read(cp, MII_BMCR);
3542 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3551 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3554 cas_mif_poll(cp, 1);
3557 val = readl(cp->regs + REG_PCS_MII_CTRL);
3558 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3565 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3569 if (CAS_PHY_MII(cp->phy_type)) {
3578 writel(val, cp->regs + REG_MAC_XIF_CFG);
3600 cp->regs + REG_MAC_TX_CFG);
3602 val = readl(cp->regs + REG_MAC_RX_CFG);
3605 cp->regs + REG_MAC_RX_CFG);
3607 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3609 cp->crc_size = 4;
3611 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3614 writel(val, cp->regs + REG_MAC_TX_CFG);
3619 val = readl(cp->regs + REG_MAC_RX_CFG);
3622 cp->crc_size = 0;
3623 cp->min_frame_size = CAS_MIN_MTU;
3626 cp->crc_size = 4;
3627 cp->min_frame_size = CAS_MIN_FRAME;
3630 cp->regs + REG_MAC_RX_CFG);
3631 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3634 if (netif_msg_link(cp)) {
3636 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3637 cp->rx_fifo_size,
3638 cp->rx_pause_off,
3639 cp->rx_pause_on);
3641 netdev_info(cp->dev, "TX pause enabled\n");
3643 netdev_info(cp->dev, "Pause is disabled\n");
3647 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3655 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3656 cas_start_dma(cp);
3659 /* Must be invoked under cp->lock. */
3660 static void cas_init_hw(struct cas *cp, int restart_link)
3663 cas_phy_init(cp);
3665 cas_init_pause_thresholds(cp);
3666 cas_init_mac(cp);
3667 cas_init_dma(cp);
3671 cp->timer_ticks = 0;
3672 cas_begin_auto_negotiation(cp, NULL);
3673 } else if (cp->lstate == link_up) {
3674 cas_set_link_modes(cp);
3675 netif_carrier_on(cp->dev);
3679 /* Must be invoked under cp->lock. on earlier cassini boards,
3683 static void cas_hard_reset(struct cas *cp)
3685 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3687 pci_restore_state(cp->pdev);
3691 static void cas_global_reset(struct cas *cp, int blkflag)
3696 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3704 cp->regs + REG_SW_RESET);
3706 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3714 u32 val = readl(cp->regs + REG_SW_RESET);
3719 netdev_err(cp->dev, "sw reset failed\n");
3724 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3732 PCI_ERR_BIM_DMA_READ), cp->regs +
3738 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3741 static void cas_reset(struct cas *cp, int blkflag)
3745 cas_mask_intr(cp);
3746 cas_global_reset(cp, blkflag);
3747 cas_mac_reset(cp);
3748 cas_entropy_reset(cp);
3751 val = readl(cp->regs + REG_TX_CFG);
3753 writel(val, cp->regs + REG_TX_CFG);
3755 val = readl(cp->regs + REG_RX_CFG);
3757 writel(val, cp->regs + REG_RX_CFG);
3760 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3762 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3764 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3768 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3769 cas_clear_mac_err(cp);
3770 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3774 static void cas_shutdown(struct cas *cp)
3779 cp->hw_running = 0;
3781 del_timer_sync(&cp->link_timer);
3785 while (atomic_read(&cp->reset_task_pending_mtu) ||
3786 atomic_read(&cp->reset_task_pending_spare) ||
3787 atomic_read(&cp->reset_task_pending_all))
3791 while (atomic_read(&cp->reset_task_pending))
3795 cas_lock_all_save(cp, flags);
3796 cas_reset(cp, 0);
3797 if (cp->cas_flags & CAS_FLAG_SATURN)
3798 cas_phy_powerdown(cp);
3799 cas_unlock_all_restore(cp, flags);
3804 struct cas *cp = netdev_priv(dev);
3812 atomic_inc(&cp->reset_task_pending);
3813 if ((cp->phy_type & CAS_PHY_SERDES)) {
3814 atomic_inc(&cp->reset_task_pending_all);
3816 atomic_inc(&cp->reset_task_pending_mtu);
3818 schedule_work(&cp->reset_task);
3820 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3823 schedule_work(&cp->reset_task);
3826 flush_work(&cp->reset_task);
3830 static void cas_clean_txd(struct cas *cp, int ring)
3832 struct cas_tx_desc *txd = cp->init_txds[ring];
3833 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3856 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3866 if (cp->tx_tiny_use[ring][ent].used)
3874 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3878 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3880 cas_page_t **page = cp->rx_pages[ring];
3886 cas_page_free(cp, page[i]);
3892 static void cas_free_rxds(struct cas *cp)
3897 cas_free_rx_desc(cp, i);
3900 /* Must be invoked under cp->lock. */
3901 static void cas_clean_rings(struct cas *cp)
3906 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3907 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3909 cas_clean_txd(cp, i);
3912 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3913 cas_clean_rxds(cp);
3914 cas_clean_rxcs(cp);
3918 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3920 cas_page_t **page = cp->rx_pages[ring];
3925 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3931 static int cas_alloc_rxds(struct cas *cp)
3936 if (cas_alloc_rx_desc(cp, i) < 0) {
3937 cas_free_rxds(cp);
3946 struct cas *cp = container_of(work, struct cas, reset_task);
3948 int pending = atomic_read(&cp->reset_task_pending);
3950 int pending_all = atomic_read(&cp->reset_task_pending_all);
3951 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3952 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3958 atomic_dec(&cp->reset_task_pending);
3966 if (cp->hw_running) {
3970 netif_device_detach(cp->dev);
3971 cas_lock_all_save(cp, flags);
3973 if (cp->opened) {
3978 cas_spare_recover(cp, GFP_ATOMIC);
3996 cas_reset(cp, !(pending_all > 0));
3997 if (cp->opened)
3998 cas_clean_rings(cp);
3999 cas_init_hw(cp, (pending_all > 0));
4001 cas_reset(cp, !(pending == CAS_RESET_ALL));
4002 if (cp->opened)
4003 cas_clean_rings(cp);
4004 cas_init_hw(cp, pending == CAS_RESET_ALL);
4008 cas_unlock_all_restore(cp, flags);
4009 netif_device_attach(cp->dev);
4012 atomic_sub(pending_all, &cp->reset_task_pending_all);
4013 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4014 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4015 atomic_dec(&cp->reset_task_pending);
4017 atomic_set(&cp->reset_task_pending, 0);
4023 struct cas *cp = from_timer(cp, t, link_timer);
4028 cp->link_transition_jiffies_valid &&
4029 time_is_before_jiffies(cp->link_transition_jiffies +
4035 cp->link_transition_jiffies_valid = 0;
4038 if (!cp->hw_running)
4041 spin_lock_irqsave(&cp->lock, flags);
4042 cas_lock_tx(cp);
4043 cas_entropy_gather(cp);
4049 if (atomic_read(&cp->reset_task_pending_all) ||
4050 atomic_read(&cp->reset_task_pending_spare) ||
4051 atomic_read(&cp->reset_task_pending_mtu))
4054 if (atomic_read(&cp->reset_task_pending))
4059 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4068 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4072 cp->cas_flags &= ~rmask;
4076 if (CAS_PHY_MII(cp->phy_type)) {
4078 cas_mif_poll(cp, 0);
4079 bmsr = cas_phy_read(cp, MII_BMSR);
4085 bmsr = cas_phy_read(cp, MII_BMSR);
4086 cas_mif_poll(cp, 1);
4087 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4088 reset = cas_mii_link_check(cp, bmsr);
4090 reset = cas_pcs_link_check(cp);
4097 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4098 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4104 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4110 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4111 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4112 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4114 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4121 cas_hard_reset(cp);
4127 atomic_inc(&cp->reset_task_pending);
4128 atomic_inc(&cp->reset_task_pending_all);
4129 schedule_work(&cp->reset_task);
4131 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4133 schedule_work(&cp->reset_task);
4138 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4139 cas_unlock_tx(cp);
4140 spin_unlock_irqrestore(&cp->lock, flags);
4146 static void cas_tx_tiny_free(struct cas *cp)
4148 struct pci_dev *pdev = cp->pdev;
4152 if (!cp->tx_tiny_bufs[i])
4156 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
4157 cp->tx_tiny_bufs[i] = NULL;
4161 static int cas_tx_tiny_alloc(struct cas *cp)
4163 struct pci_dev *pdev = cp->pdev;
4167 cp->tx_tiny_bufs[i] =
4169 &cp->tx_tiny_dvma[i], GFP_KERNEL);
4170 if (!cp->tx_tiny_bufs[i]) {
4171 cas_tx_tiny_free(cp);
4181 struct cas *cp = netdev_priv(dev);
4185 mutex_lock(&cp->pm_mutex);
4187 hw_was_up = cp->hw_running;
4190 * etc. state so it is safe to do this bit without cp->lock
4192 if (!cp->hw_running) {
4194 cas_lock_all_save(cp, flags);
4200 cas_reset(cp, 0);
4201 cp->hw_running = 1;
4202 cas_unlock_all_restore(cp, flags);
4206 if (cas_tx_tiny_alloc(cp) < 0)
4210 if (cas_alloc_rxds(cp) < 0)
4214 cas_spare_init(cp);
4215 cas_spare_recover(cp, GFP_KERNEL);
4222 if (request_irq(cp->pdev->irq, cas_interrupt,
4224 netdev_err(cp->dev, "failed to request irq !\n");
4230 napi_enable(&cp->napi);
4233 cas_lock_all_save(cp, flags);
4234 cas_clean_rings(cp);
4235 cas_init_hw(cp, !hw_was_up);
4236 cp->opened = 1;
4237 cas_unlock_all_restore(cp, flags);
4240 mutex_unlock(&cp->pm_mutex);
4244 cas_spare_free(cp);
4245 cas_free_rxds(cp);
4247 cas_tx_tiny_free(cp);
4249 mutex_unlock(&cp->pm_mutex);
4256 struct cas *cp = netdev_priv(dev);
4259 napi_disable(&cp->napi);
4262 mutex_lock(&cp->pm_mutex);
4267 cas_lock_all_save(cp, flags);
4268 cp->opened = 0;
4269 cas_reset(cp, 0);
4270 cas_phy_init(cp);
4271 cas_begin_auto_negotiation(cp, NULL);
4272 cas_clean_rings(cp);
4273 cas_unlock_all_restore(cp, flags);
4275 free_irq(cp->pdev->irq, (void *) dev);
4276 cas_spare_free(cp);
4277 cas_free_rxds(cp);
4278 cas_tx_tiny_free(cp);
4279 mutex_unlock(&cp->pm_mutex);
4330 static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4336 spin_lock_irqsave(&cp->lock, flags);
4341 hval = cas_phy_read(cp,
4345 val= readl(cp->regs+ethtool_register_table[i].offsets);
4349 spin_unlock_irqrestore(&cp->lock, flags);
4354 struct cas *cp = netdev_priv(dev);
4355 struct net_device_stats *stats = cp->net_stats;
4361 if (!cp->hw_running)
4372 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4374 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4376 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4378 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4380 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4381 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4384 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4387 readl(cp->regs + REG_MAC_COLL_EXCESS);
4388 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4389 readl(cp->regs + REG_MAC_COLL_LATE);
4391 cas_clear_mac_err(cp);
4394 spin_lock(&cp->stat_lock[0]);
4401 spin_unlock(&cp->stat_lock[0]);
4404 spin_lock(&cp->stat_lock[i]);
4417 spin_unlock(&cp->stat_lock[i]);
4419 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4426 struct cas *cp = netdev_priv(dev);
4431 if (!cp->hw_running)
4434 spin_lock_irqsave(&cp->lock, flags);
4435 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4438 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4439 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4448 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4449 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4456 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4458 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4459 spin_unlock_irqrestore(&cp->lock, flags);
4464 struct cas *cp = netdev_priv(dev);
4467 strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4473 struct cas *cp = netdev_priv(dev);
4482 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4488 spin_lock_irqsave(&cp->lock, flags);
4490 linkstate = cp->lstate;
4491 if (CAS_PHY_MII(cp->phy_type)) {
4493 cmd->base.phy_address = cp->phy_addr;
4507 if (cp->hw_running) {
4508 cas_mif_poll(cp, 0);
4509 bmcr = cas_phy_read(cp, MII_BMCR);
4510 cas_read_mii_link_mode(cp, &full_duplex,
4512 cas_mif_poll(cp, 1);
4521 if (cp->hw_running) {
4523 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4524 cas_read_pcs_link_mode(cp, &full_duplex,
4528 spin_unlock_irqrestore(&cp->lock, flags);
4558 if (cp->link_cntl & BMCR_ANENABLE) {
4563 if (cp->link_cntl & BMCR_SPEED100) {
4565 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4568 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4584 struct cas *cp = netdev_priv(dev);
4602 spin_lock_irqsave(&cp->lock, flags);
4603 cas_begin_auto_negotiation(cp, cmd);
4604 spin_unlock_irqrestore(&cp->lock, flags);
4610 struct cas *cp = netdev_priv(dev);
4613 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4617 spin_lock_irqsave(&cp->lock, flags);
4618 cas_begin_auto_negotiation(cp, NULL);
4619 spin_unlock_irqrestore(&cp->lock, flags);
4626 struct cas *cp = netdev_priv(dev);
4627 return cp->lstate == link_up;
4632 struct cas *cp = netdev_priv(dev);
4633 return cp->msg_enable;
4638 struct cas *cp = netdev_priv(dev);
4639 cp->msg_enable = value;
4644 struct cas *cp = netdev_priv(dev);
4645 return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4651 struct cas *cp = netdev_priv(dev);
4653 /* cas_read_regs handles locks (cp->lock). */
4654 cas_read_regs(cp, p, regs->len / sizeof(u32));
4676 struct cas *cp = netdev_priv(dev);
4677 struct net_device_stats *stats = cas_get_stats(cp->dev);
4715 struct cas *cp = netdev_priv(dev);
4723 mutex_lock(&cp->pm_mutex);
4726 data->phy_id = cp->phy_addr;
4730 spin_lock_irqsave(&cp->lock, flags);
4731 cas_mif_poll(cp, 0);
4732 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4733 cas_mif_poll(cp, 1);
4734 spin_unlock_irqrestore(&cp->lock, flags);
4739 spin_lock_irqsave(&cp->lock, flags);
4740 cas_mif_poll(cp, 0);
4741 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4742 cas_mif_poll(cp, 1);
4743 spin_unlock_irqrestore(&cp->lock, flags);
4749 mutex_unlock(&cp->pm_mutex);
4858 struct cas *cp;
4879 dev = alloc_etherdev(sizeof(*cp));
4939 cp = netdev_priv(dev);
4940 cp->pdev = pdev;
4943 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4945 cp->dev = dev;
4946 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4950 cp->of_node = pci_device_to_OF_node(pdev);
4953 cp->link_transition = LINK_TRANSITION_UNKNOWN;
4954 cp->link_transition_jiffies_valid = 0;
4956 spin_lock_init(&cp->lock);
4957 spin_lock_init(&cp->rx_inuse_lock);
4958 spin_lock_init(&cp->rx_spare_lock);
4960 spin_lock_init(&cp->stat_lock[i]);
4961 spin_lock_init(&cp->tx_lock[i]);
4963 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4964 mutex_init(&cp->pm_mutex);
4966 timer_setup(&cp->link_timer, cas_link_timer, 0);
4972 atomic_set(&cp->reset_task_pending, 0);
4973 atomic_set(&cp->reset_task_pending_all, 0);
4974 atomic_set(&cp->reset_task_pending_spare, 0);
4975 atomic_set(&cp->reset_task_pending_mtu, 0);
4977 INIT_WORK(&cp->reset_task, cas_reset_task);
4981 cp->link_cntl = link_modes[link_mode];
4983 cp->link_cntl = BMCR_ANENABLE;
4984 cp->lstate = link_down;
4985 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4986 netif_carrier_off(cp->dev);
4987 cp->timer_ticks = 0;
4990 cp->regs = pci_iomap(pdev, 0, casreg_len);
4991 if (!cp->regs) {
4995 cp->casreg_len = casreg_len;
4998 cas_check_pci_invariants(cp);
4999 cas_hard_reset(cp);
5000 cas_reset(cp, 0);
5001 if (cas_check_invariants(cp))
5003 if (cp->cas_flags & CAS_FLAG_SATURN)
5004 cas_saturn_firmware_init(cp);
5006 cp->init_block =
5008 &cp->block_dvma, GFP_KERNEL);
5009 if (!cp->init_block) {
5015 cp->init_txds[i] = cp->init_block->txds[i];
5018 cp->init_rxds[i] = cp->init_block->rxds[i];
5021 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5024 skb_queue_head_init(&cp->rx_flows[i]);
5031 netif_napi_add(dev, &cp->napi, cas_poll);
5037 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5051 i = readl(cp->regs + REG_BIM_CFG);
5053 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5056 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5060 cp->hw_running = 1;
5061 cas_entropy_reset(cp);
5062 cas_phy_init(cp);
5063 cas_begin_auto_negotiation(cp, NULL);
5068 cp->init_block, cp->block_dvma);
5071 mutex_lock(&cp->pm_mutex);
5072 if (cp->hw_running)
5073 cas_shutdown(cp);
5074 mutex_unlock(&cp->pm_mutex);
5076 vfree(cp->fw_data);
5078 pci_iounmap(pdev, cp->regs);
5100 struct cas *cp;
5104 cp = netdev_priv(dev);
5107 vfree(cp->fw_data);
5109 mutex_lock(&cp->pm_mutex);
5110 cancel_work_sync(&cp->reset_task);
5111 if (cp->hw_running)
5112 cas_shutdown(cp);
5113 mutex_unlock(&cp->pm_mutex);
5116 if (cp->orig_cacheline_size) {
5121 cp->orig_cacheline_size);
5125 cp->init_block, cp->block_dvma);
5126 pci_iounmap(pdev, cp->regs);
5135 struct cas *cp = netdev_priv(dev);
5138 mutex_lock(&cp->pm_mutex);
5141 if (cp->opened) {
5144 cas_lock_all_save(cp, flags);
5151 cas_reset(cp, 0);
5152 cas_clean_rings(cp);
5153 cas_unlock_all_restore(cp, flags);
5156 if (cp->hw_running)
5157 cas_shutdown(cp);
5158 mutex_unlock(&cp->pm_mutex);
5166 struct cas *cp = netdev_priv(dev);
5170 mutex_lock(&cp->pm_mutex);
5171 cas_hard_reset(cp);
5172 if (cp->opened) {
5174 cas_lock_all_save(cp, flags);
5175 cas_reset(cp, 0);
5176 cp->hw_running = 1;
5177 cas_clean_rings(cp);
5178 cas_init_hw(cp, 1);
5179 cas_unlock_all_restore(cp, flags);
5183 mutex_unlock(&cp->pm_mutex);