• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/

Lines Matching refs:pep

269 static int pxa168_init_hw(struct pxa168_eth_private *pep);
276 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
278 return readl(pep->base + offset);
281 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
283 writel(data, pep->base + offset);
286 static void abort_dma(struct pxa168_eth_private *pep)
292 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
296 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
306 static int ethernet_phy_get(struct pxa168_eth_private *pep)
310 reg_data = rdl(pep, PHY_ADDRESS);
312 return (reg_data >> (5 * pep->port_num)) & 0x1f;
315 static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
318 int addr_shift = 5 * pep->port_num;
320 reg_data = rdl(pep, PHY_ADDRESS);
323 wrl(pep, PHY_ADDRESS, reg_data);
326 static void ethernet_phy_reset(struct pxa168_eth_private *pep)
330 data = phy_read(pep->phy, MII_BMCR);
335 if (phy_write(pep->phy, MII_BMCR, data) < 0)
339 data = phy_read(pep->phy, MII_BMCR);
345 struct pxa168_eth_private *pep = netdev_priv(dev);
350 while (pep->rx_desc_count < pep->rx_ring_size) {
353 skb = dev_alloc_skb(pep->skb_size);
358 pep->rx_desc_count++;
360 used_rx_desc = pep->rx_used_desc_q;
361 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
368 pep->rx_skb[used_rx_desc] = skb;
376 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
379 pep->rx_resource_err = 0;
388 if (pep->rx_desc_count == 0) {
389 pep->timeout.expires = jiffies + (HZ / 10);
390 add_timer(&pep->timeout);
396 struct pxa168_eth_private *pep = (void *)data;
397 napi_schedule(&pep->napi);
463 * pep - ETHERNET .
476 static int add_del_hash_entry(struct pxa168_eth_private *pep,
506 start = (struct addr_table_entry *)(pep->htpr);
561 static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
567 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
569 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
572 static int init_hash_table(struct pxa168_eth_private *pep)
587 if (pep->htpr == NULL) {
588 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
590 &pep->htpr_dma, GFP_KERNEL);
591 if (pep->htpr == NULL)
594 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
595 wrl(pep, HTPR, pep->htpr_dma);
601 struct pxa168_eth_private *pep = netdev_priv(dev);
605 val = rdl(pep, PORT_CONFIG);
610 wrl(pep, PORT_CONFIG, val);
616 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
617 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
620 update_hash_table_mac_address(pep, NULL, ha->addr);
626 struct pxa168_eth_private *pep = netdev_priv(dev);
634 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
642 struct pxa168_eth_private *pep = netdev_priv(dev);
646 if (pep->phy != NULL) {
649 pxa168_get_settings(pep->dev, &cmd);
650 ethernet_phy_reset(pep);
651 pxa168_set_settings(pep->dev, &cmd);
655 tx_curr_desc = pep->tx_curr_desc_q;
656 wrl(pep, ETH_C_TX_DESC_1,
657 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
660 rx_curr_desc = pep->rx_curr_desc_q;
661 wrl(pep, ETH_C_RX_DESC_0,
662 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
664 wrl(pep, ETH_F_RX_DESC_0,
665 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
668 wrl(pep, INT_CAUSE, 0);
671 wrl(pep, INT_MASK, ALL_INTS);
673 val = rdl(pep, PORT_CONFIG);
675 wrl(pep, PORT_CONFIG, val);
678 val = rdl(pep, SDMA_CMD);
680 wrl(pep, SDMA_CMD, val);
685 struct pxa168_eth_private *pep = netdev_priv(dev);
689 wrl(pep, INT_MASK, 0);
692 wrl(pep, INT_CAUSE, 0);
695 val = rdl(pep, SDMA_CMD);
701 abort_dma(pep);
704 val = rdl(pep, PORT_CONFIG);
706 wrl(pep, PORT_CONFIG, val);
715 struct pxa168_eth_private *pep = netdev_priv(dev);
726 pep->work_todo &= ~WORK_TX_DONE;
727 while (pep->tx_desc_count > 0) {
728 tx_index = pep->tx_used_desc_q;
729 desc = &pep->p_tx_desc_area[tx_index];
739 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
740 pep->tx_desc_count--;
743 skb = pep->tx_skb[tx_index];
745 pep->tx_skb[tx_index] = NULL;
764 struct pxa168_eth_private *pep = netdev_priv(dev);
767 dev->name, pep->tx_desc_count);
769 schedule_work(&pep->tx_timeout_task);
774 struct pxa168_eth_private *pep = container_of(work,
777 struct net_device *dev = pep->dev;
784 struct pxa168_eth_private *pep = netdev_priv(dev);
795 if (pep->rx_resource_err)
797 rx_curr_desc = pep->rx_curr_desc_q;
798 rx_used_desc = pep->rx_used_desc_q;
799 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
804 skb = pep->rx_skb[rx_curr_desc];
805 pep->rx_skb[rx_curr_desc] = NULL;
807 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
808 pep->rx_curr_desc_q = rx_next_curr_desc;
813 pep->rx_resource_err = 1;
814 pep->rx_desc_count--;
860 static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
866 icr = rdl(pep, INT_CAUSE);
870 wrl(pep, INT_CAUSE, ~icr);
872 pep->work_todo |= WORK_TX_DONE;
878 pep->work_todo |= WORK_LINK;
884 static void handle_link_event(struct pxa168_eth_private *pep)
886 struct net_device *dev = pep->dev;
892 port_status = rdl(pep, PORT_STATUS);
918 struct pxa168_eth_private *pep = netdev_priv(dev);
920 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
923 wrl(pep, INT_MASK, 0);
924 napi_schedule(&pep->napi);
928 static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
938 skb_size = pep->dev->mtu + 36;
945 pep->skb_size = (skb_size + 7) & ~7;
953 pep->skb_size += SKB_DMA_REALIGN;
957 static int set_port_config_ext(struct pxa168_eth_private *pep)
961 pxa168_eth_recalc_skb_size(pep);
962 if (pep->skb_size <= 1518)
964 else if (pep->skb_size <= 1536)
966 else if (pep->skb_size <= 2048)
972 wrl(pep,
981 static int pxa168_init_hw(struct pxa168_eth_private *pep)
986 wrl(pep, INT_MASK, 0);
987 wrl(pep, INT_CAUSE, 0);
989 wrl(pep, INT_W_CLEAR, 0);
993 abort_dma(pep);
995 err = init_hash_table(pep);
999 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
1005 wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1006 set_port_config_ext(pep);
1013 struct pxa168_eth_private *pep = netdev_priv(dev);
1016 int rx_desc_num = pep->rx_ring_size;
1019 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1021 if (!pep->rx_skb) {
1026 pep->rx_desc_count = 0;
1027 size = pep->rx_ring_size * sizeof(struct rx_desc);
1028 pep->rx_desc_area_size = size;
1029 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1030 &pep->rx_desc_dma, GFP_KERNEL);
1031 if (!pep->p_rx_desc_area) {
1036 memset((void *)pep->p_rx_desc_area, 0, size);
1038 p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
1040 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1044 pep->rx_curr_desc_q = 0;
1045 pep->rx_used_desc_q = 0;
1046 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1049 kfree(pep->rx_skb);
1055 struct pxa168_eth_private *pep = netdev_priv(dev);
1059 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1060 if (pep->rx_skb[curr]) {
1061 dev_kfree_skb(pep->rx_skb[curr]);
1062 pep->rx_desc_count--;
1065 if (pep->rx_desc_count)
1068 pep->rx_desc_count);
1070 if (pep->p_rx_desc_area)
1071 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1072 pep->p_rx_desc_area, pep->rx_desc_dma);
1073 kfree(pep->rx_skb);
1078 struct pxa168_eth_private *pep = netdev_priv(dev);
1081 int tx_desc_num = pep->tx_ring_size;
1083 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1085 if (!pep->tx_skb) {
1090 pep->tx_desc_count = 0;
1091 size = pep->tx_ring_size * sizeof(struct tx_desc);
1092 pep->tx_desc_area_size = size;
1093 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1094 &pep->tx_desc_dma, GFP_KERNEL);
1095 if (!pep->p_tx_desc_area) {
1100 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1102 p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
1104 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1107 pep->tx_curr_desc_q = 0;
1108 pep->tx_used_desc_q = 0;
1109 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1112 kfree(pep->tx_skb);
1118 struct pxa168_eth_private *pep = netdev_priv(dev);
1122 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1124 if (pep->p_tx_desc_area)
1125 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1126 pep->p_tx_desc_area, pep->tx_desc_dma);
1127 kfree(pep->tx_skb);
1132 struct pxa168_eth_private *pep = netdev_priv(dev);
1141 pep->rx_resource_err = 0;
1148 pep->rx_used_desc_q = 0;
1149 pep->rx_curr_desc_q = 0;
1153 pep->rx_used_desc_q = 0;
1154 pep->rx_curr_desc_q = 0;
1157 napi_enable(&pep->napi);
1168 struct pxa168_eth_private *pep = netdev_priv(dev);
1172 wrl(pep, INT_MASK, 0);
1173 wrl(pep, INT_CAUSE, 0);
1175 wrl(pep, INT_W_CLEAR, 0);
1176 napi_disable(&pep->napi);
1177 del_timer_sync(&pep->timeout);
1189 struct pxa168_eth_private *pep = netdev_priv(dev);
1195 retval = set_port_config_ext(pep);
1216 static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1220 tx_desc_curr = pep->tx_curr_desc_q;
1221 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1222 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1223 pep->tx_desc_count++;
1230 struct pxa168_eth_private *pep =
1232 struct net_device *dev = pep->dev;
1235 if (unlikely(pep->work_todo & WORK_LINK)) {
1236 pep->work_todo &= ~(WORK_LINK);
1237 handle_link_event(pep);
1246 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1252 wrl(pep, INT_MASK, ALL_INTS);
1260 struct pxa168_eth_private *pep = netdev_priv(dev);
1266 tx_index = eth_alloc_tx_desc_index(pep);
1267 desc = &pep->p_tx_desc_area[tx_index];
1269 pep->tx_skb[tx_index] = skb;
1276 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1281 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1289 static int smi_wait_ready(struct pxa168_eth_private *pep)
1294 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1305 struct pxa168_eth_private *pep = bus->priv;
1309 if (smi_wait_ready(pep)) {
1313 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1315 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1330 struct pxa168_eth_private *pep = bus->priv;
1332 if (smi_wait_ready(pep)) {
1337 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1340 if (smi_wait_ready(pep)) {
1351 struct pxa168_eth_private *pep = netdev_priv(dev);
1352 if (pep->phy != NULL)
1353 return phy_mii_ioctl(pep->phy, ifr, cmd);
1358 static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
1360 struct mii_bus *bus = pep->smi_bus;
1368 start = ethernet_phy_get(pep);
1384 ethernet_phy_set_addr(pep, addr);
1391 static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
1393 struct phy_device *phy = pep->phy;
1394 ethernet_phy_reset(pep);
1396 phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
1415 struct pxa168_eth_private *pep = netdev_priv(dev);
1417 if (pep->pd->init)
1418 pep->pd->init();
1419 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1420 if (pep->phy != NULL)
1421 phy_init(pep, pep->pd->speed, pep->pd->duplex);
1422 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
1429 struct pxa168_eth_private *pep = netdev_priv(dev);
1432 err = phy_read_status(pep->phy);
1434 err = phy_ethtool_gset(pep->phy, cmd);
1441 struct pxa168_eth_private *pep = netdev_priv(dev);
1443 return phy_ethtool_sset(pep->phy, cmd);
1481 struct pxa168_eth_private *pep = NULL;
1504 pep = netdev_priv(dev);
1505 pep->dev = dev;
1506 pep->clk = clk;
1512 pep->base = ioremap(res->start, res->end - res->start + 1);
1513 if (pep->base == NULL) {
1525 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1530 pep->pd = pdev->dev.platform_data;
1531 pep->rx_ring_size = NUM_RX_DESCS;
1532 if (pep->pd->rx_queue_size)
1533 pep->rx_ring_size = pep->pd->rx_queue_size;
1535 pep->tx_ring_size = NUM_TX_DESCS;
1536 if (pep->pd->tx_queue_size)
1537 pep->tx_ring_size = pep->pd->tx_queue_size;
1539 pep->port_num = pep->pd->port_number;
1541 BUG_ON(pep->port_num > 2);
1542 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1544 memset(&pep->timeout, 0, sizeof(struct timer_list));
1545 init_timer(&pep->timeout);
1546 pep->timeout.function = rxq_refill_timer_wrapper;
1547 pep->timeout.data = (unsigned long)pep;
1549 pep->smi_bus = mdiobus_alloc();
1550 if (pep->smi_bus == NULL) {
1554 pep->smi_bus->priv = pep;
1555 pep->smi_bus->name = "pxa168_eth smi";
1556 pep->smi_bus->read = pxa168_smi_read;
1557 pep->smi_bus->write = pxa168_smi_write;
1558 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1559 pep->smi_bus->parent = &pdev->dev;
1560 pep->smi_bus->phy_mask = 0xffffffff;
1561 err = mdiobus_register(pep->smi_bus);
1565 pxa168_init_hw(pep);
1576 mdiobus_unregister(pep->smi_bus);
1578 mdiobus_free(pep->smi_bus);
1580 iounmap(pep->base);
1592 struct pxa168_eth_private *pep = netdev_priv(dev);
1594 if (pep->htpr) {
1595 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1596 pep->htpr, pep->htpr_dma);
1597 pep->htpr = NULL;
1599 if (pep->clk) {
1600 clk_disable(pep->clk);
1601 clk_put(pep->clk);
1602 pep->clk = NULL;
1604 if (pep->phy != NULL)
1605 phy_detach(pep->phy);
1607 iounmap(pep->base);
1608 pep->base = NULL;
1609 mdiobus_unregister(pep->smi_bus);
1610 mdiobus_free(pep->smi_bus);