Lines Matching refs:bp

164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 return ssb_read32(bp->sdev, reg);
169 static inline void bw32(const struct b44 *bp,
172 ssb_write32(bp->sdev, reg, val);
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
181 u32 val = br32(bp, reg);
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
199 static inline void __b44_cam_write(struct b44 *bp,
208 bw32(bp, B44_CAM_DATA_LO, val);
212 bw32(bp, B44_CAM_DATA_HI, val);
213 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
215 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
218 static inline void __b44_disable_ints(struct b44 *bp)
220 bw32(bp, B44_IMASK, 0);
223 static void b44_disable_ints(struct b44 *bp)
225 __b44_disable_ints(bp);
228 br32(bp, B44_IMASK);
231 static void b44_enable_ints(struct b44 *bp)
233 bw32(bp, B44_IMASK, bp->imask);
236 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
240 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
241 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
246 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
247 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
252 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
254 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
255 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
261 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
264 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
266 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
269 return __b44_readphy(bp, bp->phy_addr, reg, val);
272 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
274 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
277 return __b44_writephy(bp, bp->phy_addr, reg, val);
284 struct b44 *bp = netdev_priv(dev);
285 int rc = __b44_readphy(bp, phy_id, location, &val);
294 struct b44 *bp = netdev_priv(dev);
295 __b44_writephy(bp, phy_id, location, val);
301 struct b44 *bp = bus->priv;
302 int rc = __b44_readphy(bp, phy_id, location, &val);
311 struct b44 *bp = bus->priv;
312 return __b44_writephy(bp, phy_id, location, val);
315 static int b44_phy_reset(struct b44 *bp)
320 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
322 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
326 err = b44_readphy(bp, MII_BMCR, &val);
329 netdev_err(bp->dev, "PHY Reset would not complete\n");
337 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
341 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
342 bp->flags |= pause_flags;
344 val = br32(bp, B44_RXCONFIG);
349 bw32(bp, B44_RXCONFIG, val);
351 val = br32(bp, B44_MAC_FLOW);
357 bw32(bp, B44_MAC_FLOW, val);
360 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
376 __b44_set_flow_ctrl(bp, pause_enab);
381 static void b44_wap54g10_workaround(struct b44 *bp)
395 err = __b44_readphy(bp, 0, MII_BMCR, &val);
401 err = __b44_writephy(bp, 0, MII_BMCR, val);
410 static inline void b44_wap54g10_workaround(struct b44 *bp)
415 static int b44_setup_phy(struct b44 *bp)
420 b44_wap54g10_workaround(bp);
422 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
424 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
426 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
429 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
431 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
435 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
438 if (bp->flags & B44_FLAG_ADV_10HALF)
440 if (bp->flags & B44_FLAG_ADV_10FULL)
442 if (bp->flags & B44_FLAG_ADV_100HALF)
444 if (bp->flags & B44_FLAG_ADV_100FULL)
447 if (bp->flags & B44_FLAG_PAUSE_AUTO)
450 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
452 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
458 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
461 if (bp->flags & B44_FLAG_100_BASE_T)
463 if (bp->flags & B44_FLAG_FULL_DUPLEX)
465 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
472 b44_set_flow_ctrl(bp, 0, 0);
479 static void b44_stats_update(struct b44 *bp)
484 val = &bp->hw_stats.tx_good_octets;
485 u64_stats_update_begin(&bp->hw_stats.syncp);
488 *val++ += br32(bp, reg);
492 *val++ += br32(bp, reg);
495 u64_stats_update_end(&bp->hw_stats.syncp);
498 static void b44_link_report(struct b44 *bp)
500 if (!netif_carrier_ok(bp->dev)) {
501 netdev_info(bp->dev, "Link is down\n");
503 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
504 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
505 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
507 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
508 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
509 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
513 static void b44_check_phy(struct b44 *bp)
517 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
518 bp->flags |= B44_FLAG_100_BASE_T;
519 if (!netif_carrier_ok(bp->dev)) {
520 u32 val = br32(bp, B44_TX_CTRL);
521 if (bp->flags & B44_FLAG_FULL_DUPLEX)
525 bw32(bp, B44_TX_CTRL, val);
526 netif_carrier_on(bp->dev);
527 b44_link_report(bp);
532 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
536 bp->flags |= B44_FLAG_100_BASE_T;
538 bp->flags &= ~B44_FLAG_100_BASE_T;
540 bp->flags |= B44_FLAG_FULL_DUPLEX;
542 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544 if (!netif_carrier_ok(bp->dev) &&
546 u32 val = br32(bp, B44_TX_CTRL);
549 if (bp->flags & B44_FLAG_FULL_DUPLEX)
553 bw32(bp, B44_TX_CTRL, val);
555 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557 !b44_readphy(bp, MII_LPA, &remote_adv))
558 b44_set_flow_ctrl(bp, local_adv, remote_adv);
561 netif_carrier_on(bp->dev);
562 b44_link_report(bp);
563 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 netif_carrier_off(bp->dev);
566 b44_link_report(bp);
570 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
572 netdev_warn(bp->dev, "Jabber detected in PHY\n");
578 struct b44 *bp = from_timer(bp, t, timer);
580 spin_lock_irq(&bp->lock);
582 b44_check_phy(bp);
584 b44_stats_update(bp);
586 spin_unlock_irq(&bp->lock);
588 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
591 static void b44_tx(struct b44 *bp)
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 struct ring_info *rp = &bp->tx_buffers[cons];
606 dma_unmap_single(bp->sdev->dma_dev,
618 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
619 bp->tx_cons = cons;
620 if (netif_queue_stopped(bp->dev) &&
621 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622 netif_wake_queue(bp->dev);
624 bw32(bp, B44_GPTIMER, 0);
632 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 src_map = &bp->rx_buffers[src_idx];
646 map = &bp->rx_buffers[dest_idx];
647 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
651 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
657 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
660 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661 dma_unmap_single(bp->sdev->dma_dev, mapping,
667 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
670 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
672 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
677 bp->force_copybreak = 1;
695 dp = &bp->rx_ring[dest_idx];
697 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
699 if (bp->flags & B44_FLAG_RX_RING_HACK)
700 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
707 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
716 dest_desc = &bp->rx_ring[dest_idx];
717 dest_map = &bp->rx_buffers[dest_idx];
718 src_desc = &bp->rx_ring[src_idx];
719 src_map = &bp->rx_buffers[src_idx];
727 if (bp->flags & B44_FLAG_RX_RING_HACK)
728 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
743 if (bp->flags & B44_FLAG_RX_RING_HACK)
744 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
748 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
753 static int b44_rx(struct b44 *bp, int budget)
759 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
761 cons = bp->rx_cons;
764 struct ring_info *rp = &bp->rx_buffers[cons];
770 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
778 b44_recycle_rx(bp, cons, bp->rx_prod);
780 bp->dev->stats.rx_dropped++;
799 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
804 dma_unmap_single(bp->sdev->dma_dev, map,
812 b44_recycle_rx(bp, cons, bp->rx_prod);
813 copy_skb = napi_alloc_skb(&bp->napi, len);
824 skb->protocol = eth_type_trans(skb, bp->dev);
829 bp->rx_prod = (bp->rx_prod + 1) &
834 bp->rx_cons = cons;
835 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
842 struct b44 *bp = container_of(napi, struct b44, napi);
846 spin_lock_irqsave(&bp->lock, flags);
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 /* spin_lock(&bp->tx_lock); */
850 b44_tx(bp);
851 /* spin_unlock(&bp->tx_lock); */
853 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
854 bp->istat &= ~ISTAT_RFO;
855 b44_disable_ints(bp);
856 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857 b44_init_rings(bp);
858 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859 netif_wake_queue(bp->dev);
862 spin_unlock_irqrestore(&bp->lock, flags);
865 if (bp->istat & ISTAT_RX)
866 work_done += b44_rx(bp, budget);
868 if (bp->istat & ISTAT_ERRORS) {
869 spin_lock_irqsave(&bp->lock, flags);
870 b44_halt(bp);
871 b44_init_rings(bp);
872 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873 netif_wake_queue(bp->dev);
874 spin_unlock_irqrestore(&bp->lock, flags);
880 b44_enable_ints(bp);
889 struct b44 *bp = netdev_priv(dev);
893 spin_lock(&bp->lock);
895 istat = br32(bp, B44_ISTAT);
896 imask = br32(bp, B44_IMASK);
911 if (napi_schedule_prep(&bp->napi)) {
915 bp->istat = istat;
916 __b44_disable_ints(bp);
917 __napi_schedule(&bp->napi);
921 bw32(bp, B44_ISTAT, istat);
922 br32(bp, B44_ISTAT);
924 spin_unlock(&bp->lock);
930 struct b44 *bp = netdev_priv(dev);
934 spin_lock_irq(&bp->lock);
936 b44_halt(bp);
937 b44_init_rings(bp);
938 b44_init_hw(bp, B44_FULL_RESET);
940 spin_unlock_irq(&bp->lock);
942 b44_enable_ints(bp);
949 struct b44 *bp = netdev_priv(dev);
956 spin_lock_irqsave(&bp->lock, flags);
959 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
965 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
970 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
978 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
980 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982 dma_unmap_single(bp->sdev->dma_dev, mapping,
993 entry = bp->tx_prod;
994 bp->tx_buffers[entry].skb = skb;
995 bp->tx_buffers[entry].mapping = mapping;
1002 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1005 if (bp->flags & B44_FLAG_TX_RING_HACK)
1006 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007 entry * sizeof(bp->tx_ring[0]),
1012 bp->tx_prod = entry;
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 if (bp->flags & B44_FLAG_REORDER_BUG)
1020 br32(bp, B44_DMATX_PTR);
1024 if (TX_BUFFS_AVAIL(bp) < 1)
1028 spin_unlock_irqrestore(&bp->lock, flags);
1039 struct b44 *bp = netdev_priv(dev);
1049 spin_lock_irq(&bp->lock);
1050 b44_halt(bp);
1052 b44_init_rings(bp);
1053 b44_init_hw(bp, B44_FULL_RESET);
1054 spin_unlock_irq(&bp->lock);
1056 b44_enable_ints(bp);
1065 * end up in the driver. bp->lock is not held and we are not
1068 static void b44_free_rings(struct b44 *bp)
1074 rp = &bp->rx_buffers[i];
1078 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1086 rp = &bp->tx_buffers[i];
1090 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1103 static void b44_init_rings(struct b44 *bp)
1107 b44_free_rings(bp);
1109 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1112 if (bp->flags & B44_FLAG_RX_RING_HACK)
1113 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1116 if (bp->flags & B44_FLAG_TX_RING_HACK)
1117 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1120 for (i = 0; i < bp->rx_pending; i++) {
1121 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1130 static void b44_free_consistent(struct b44 *bp)
1132 kfree(bp->rx_buffers);
1133 bp->rx_buffers = NULL;
1134 kfree(bp->tx_buffers);
1135 bp->tx_buffers = NULL;
1136 if (bp->rx_ring) {
1137 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1140 kfree(bp->rx_ring);
1142 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143 bp->rx_ring, bp->rx_ring_dma);
1144 bp->rx_ring = NULL;
1145 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1147 if (bp->tx_ring) {
1148 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1151 kfree(bp->tx_ring);
1153 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154 bp->tx_ring, bp->tx_ring_dma);
1155 bp->tx_ring = NULL;
1156 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1169 bp->rx_buffers = kzalloc(size, gfp);
1170 if (!bp->rx_buffers)
1174 bp->tx_buffers = kzalloc(size, gfp);
1175 if (!bp->tx_buffers)
1179 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180 &bp->rx_ring_dma, gfp);
1181 if (!bp->rx_ring) {
1192 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1196 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1202 bp->rx_ring = rx_ring;
1203 bp->rx_ring_dma = rx_ring_dma;
1204 bp->flags |= B44_FLAG_RX_RING_HACK;
1207 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208 &bp->tx_ring_dma, gfp);
1209 if (!bp->tx_ring) {
1220 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1224 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1230 bp->tx_ring = tx_ring;
1231 bp->tx_ring_dma = tx_ring_dma;
1232 bp->flags |= B44_FLAG_TX_RING_HACK;
1238 b44_free_consistent(bp);
1242 /* bp->lock is held. */
1243 static void b44_clear_stats(struct b44 *bp)
1247 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1249 br32(bp, reg);
1251 br32(bp, reg);
1254 /* bp->lock is held. */
1255 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1257 struct ssb_device *sdev = bp->sdev;
1260 was_enabled = ssb_device_is_enabled(bp->sdev);
1262 ssb_device_enable(bp->sdev, 0);
1266 bw32(bp, B44_RCV_LAZY, 0);
1267 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269 bw32(bp, B44_DMATX_CTRL, 0);
1270 bp->tx_prod = bp->tx_cons = 0;
1271 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1275 bw32(bp, B44_DMARX_CTRL, 0);
1276 bp->rx_prod = bp->rx_cons = 0;
1279 b44_clear_stats(bp);
1290 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 br32(bp, B44_MDIO_CTRL);
1307 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309 br32(bp, B44_ENET_CTRL);
1310 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1312 u32 val = br32(bp, B44_DEVCTRL);
1315 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316 br32(bp, B44_DEVCTRL);
1319 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1323 /* bp->lock is held. */
1324 static void b44_halt(struct b44 *bp)
1326 b44_disable_ints(bp);
1328 b44_phy_reset(bp);
1330 netdev_info(bp->dev, "powering down PHY\n");
1331 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1334 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1335 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1337 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1340 /* bp->lock is held. */
1341 static void __b44_set_mac_addr(struct b44 *bp)
1343 bw32(bp, B44_CAM_CTRL, 0);
1344 if (!(bp->dev->flags & IFF_PROMISC)) {
1347 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1348 val = br32(bp, B44_CAM_CTRL);
1349 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1355 struct b44 *bp = netdev_priv(dev);
1367 spin_lock_irq(&bp->lock);
1369 val = br32(bp, B44_RXCONFIG);
1371 __b44_set_mac_addr(bp);
1373 spin_unlock_irq(&bp->lock);
1379 * packet processing. Invoked with bp->lock held.
1382 static void b44_init_hw(struct b44 *bp, int reset_kind)
1386 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1388 b44_phy_reset(bp);
1389 b44_setup_phy(bp);
1393 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1394 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1397 __b44_set_rx_mode(bp->dev);
1400 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1403 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1408 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1409 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1410 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1412 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1414 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1415 bp->rx_prod = bp->rx_pending;
1417 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1420 val = br32(bp, B44_ENET_CTRL);
1421 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1423 netdev_reset_queue(bp->dev);
1428 struct b44 *bp = netdev_priv(dev);
1431 err = b44_alloc_consistent(bp, GFP_KERNEL);
1435 napi_enable(&bp->napi);
1437 b44_init_rings(bp);
1438 b44_init_hw(bp, B44_FULL_RESET);
1440 b44_check_phy(bp);
1444 napi_disable(&bp->napi);
1445 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1446 b44_free_rings(bp);
1447 b44_free_consistent(bp);
1451 timer_setup(&bp->timer, b44_timer, 0);
1452 bp->timer.expires = jiffies + HZ;
1453 add_timer(&bp->timer);
1455 b44_enable_ints(bp);
1457 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1484 bw32(bp, B44_FILT_ADDR, table_offset + i);
1485 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1522 static void b44_setup_pseudo_magicp(struct b44 *bp)
1536 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1545 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1548 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1550 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1556 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1561 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1568 bw32(bp, B44_WKUP_LEN, val);
1571 val = br32(bp, B44_DEVCTRL);
1572 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1577 static void b44_setup_wol_pci(struct b44 *bp)
1581 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1582 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1583 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1584 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1588 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1591 static void b44_setup_wol(struct b44 *bp)
1595 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1597 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1599 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1601 val = bp->dev->dev_addr[2] << 24 |
1602 bp->dev->dev_addr[3] << 16 |
1603 bp->dev->dev_addr[4] << 8 |
1604 bp->dev->dev_addr[5];
1605 bw32(bp, B44_ADDR_LO, val);
1607 val = bp->dev->dev_addr[0] << 8 |
1608 bp->dev->dev_addr[1];
1609 bw32(bp, B44_ADDR_HI, val);
1611 val = br32(bp, B44_DEVCTRL);
1612 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1615 b44_setup_pseudo_magicp(bp);
1617 b44_setup_wol_pci(bp);
1622 struct b44 *bp = netdev_priv(dev);
1626 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1629 napi_disable(&bp->napi);
1631 del_timer_sync(&bp->timer);
1633 spin_lock_irq(&bp->lock);
1635 b44_halt(bp);
1636 b44_free_rings(bp);
1639 spin_unlock_irq(&bp->lock);
1643 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1644 b44_init_hw(bp, B44_PARTIAL_RESET);
1645 b44_setup_wol(bp);
1648 b44_free_consistent(bp);
1656 struct b44 *bp = netdev_priv(dev);
1657 struct b44_hw_stats *hwstat = &bp->hw_stats;
1699 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1709 __b44_cam_write(bp, ha->addr, i++ + 1);
1716 struct b44 *bp = netdev_priv(dev);
1719 val = br32(bp, B44_RXCONFIG);
1723 bw32(bp, B44_RXCONFIG, val);
1728 __b44_set_mac_addr(bp);
1734 i = __b44_load_mcast(bp, dev);
1737 __b44_cam_write(bp, zero, i);
1739 bw32(bp, B44_RXCONFIG, val);
1740 val = br32(bp, B44_CAM_CTRL);
1741 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1747 struct b44 *bp = netdev_priv(dev);
1749 spin_lock_irq(&bp->lock);
1751 spin_unlock_irq(&bp->lock);
1756 struct b44 *bp = netdev_priv(dev);
1757 return bp->msg_enable;
1762 struct b44 *bp = netdev_priv(dev);
1763 bp->msg_enable = value;
1768 struct b44 *bp = netdev_priv(dev);
1769 struct ssb_bus *bus = bp->sdev->bus;
1788 struct b44 *bp = netdev_priv(dev);
1792 spin_lock_irq(&bp->lock);
1793 b44_readphy(bp, MII_BMCR, &bmcr);
1794 b44_readphy(bp, MII_BMCR, &bmcr);
1797 r = b44_writephy(bp, MII_BMCR,
1799 spin_unlock_irq(&bp->lock);
1807 struct b44 *bp = netdev_priv(dev);
1810 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1825 if (bp->flags & B44_FLAG_ADV_10HALF)
1827 if (bp->flags & B44_FLAG_ADV_10FULL)
1829 if (bp->flags & B44_FLAG_ADV_100HALF)
1831 if (bp->flags & B44_FLAG_ADV_100FULL)
1834 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1836 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1839 cmd->base.phy_address = bp->phy_addr;
1840 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1861 struct b44 *bp = netdev_priv(dev);
1866 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1868 spin_lock_irq(&bp->lock);
1870 b44_setup_phy(bp);
1874 spin_unlock_irq(&bp->lock);
1897 spin_lock_irq(&bp->lock);
1900 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1908 bp->flags |= (B44_FLAG_ADV_10HALF |
1914 bp->flags |= B44_FLAG_ADV_10HALF;
1916 bp->flags |= B44_FLAG_ADV_10FULL;
1918 bp->flags |= B44_FLAG_ADV_100HALF;
1920 bp->flags |= B44_FLAG_ADV_100FULL;
1923 bp->flags |= B44_FLAG_FORCE_LINK;
1924 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1926 bp->flags |= B44_FLAG_100_BASE_T;
1928 bp->flags |= B44_FLAG_FULL_DUPLEX;
1932 b44_setup_phy(bp);
1934 spin_unlock_irq(&bp->lock);
1944 struct b44 *bp = netdev_priv(dev);
1947 ering->rx_pending = bp->rx_pending;
1957 struct b44 *bp = netdev_priv(dev);
1965 spin_lock_irq(&bp->lock);
1967 bp->rx_pending = ering->rx_pending;
1968 bp->tx_pending = ering->tx_pending;
1970 b44_halt(bp);
1971 b44_init_rings(bp);
1972 b44_init_hw(bp, B44_FULL_RESET);
1973 netif_wake_queue(bp->dev);
1974 spin_unlock_irq(&bp->lock);
1976 b44_enable_ints(bp);
1984 struct b44 *bp = netdev_priv(dev);
1987 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1989 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1991 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1997 struct b44 *bp = netdev_priv(dev);
1999 spin_lock_irq(&bp->lock);
2001 bp->flags |= B44_FLAG_PAUSE_AUTO;
2003 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2005 bp->flags |= B44_FLAG_RX_PAUSE;
2007 bp->flags &= ~B44_FLAG_RX_PAUSE;
2009 bp->flags |= B44_FLAG_TX_PAUSE;
2011 bp->flags &= ~B44_FLAG_TX_PAUSE;
2013 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2014 b44_halt(bp);
2015 b44_init_rings(bp);
2016 b44_init_hw(bp, B44_FULL_RESET);
2018 __b44_set_flow_ctrl(bp, bp->flags);
2021 spin_unlock_irq(&bp->lock);
2023 b44_enable_ints(bp);
2050 struct b44 *bp = netdev_priv(dev);
2051 struct b44_hw_stats *hwstat = &bp->hw_stats;
2056 spin_lock_irq(&bp->lock);
2057 b44_stats_update(bp);
2058 spin_unlock_irq(&bp->lock);
2073 struct b44 *bp = netdev_priv(dev);
2076 if (bp->flags & B44_FLAG_WOL_ENABLE)
2085 struct b44 *bp = netdev_priv(dev);
2087 spin_lock_irq(&bp->lock);
2089 bp->flags |= B44_FLAG_WOL_ENABLE;
2091 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2092 spin_unlock_irq(&bp->lock);
2094 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2119 struct b44 *bp = netdev_priv(dev);
2125 spin_lock_irq(&bp->lock);
2126 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2130 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2132 spin_unlock_irq(&bp->lock);
2137 static int b44_get_invariants(struct b44 *bp)
2139 struct ssb_device *sdev = bp->sdev;
2143 bp->dma_offset = ssb_dma_translation(sdev);
2148 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2151 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2156 bp->phy_addr &= 0x1F;
2158 eth_hw_addr_set(bp->dev, addr);
2160 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2165 bp->imask = IMASK_DEF;
2168 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2171 if (bp->sdev->id.revision >= 7)
2172 bp->flags |= B44_FLAG_B0_ANDLATER;
2195 struct b44 *bp = netdev_priv(dev);
2201 if (bp->old_link != phydev->link) {
2203 bp->old_link = phydev->link;
2209 (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2211 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2213 !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2215 bp->flags |= B44_FLAG_FULL_DUPLEX;
2220 u32 val = br32(bp, B44_TX_CTRL);
2221 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2225 bw32(bp, B44_TX_CTRL, val);
2230 static int b44_register_phy_one(struct b44 *bp)
2234 struct ssb_device *sdev = bp->sdev;
2247 mii_bus->priv = bp;
2252 mii_bus->phy_mask = ~(1 << bp->phy_addr);
2255 bp->mii_bus = mii_bus;
2263 if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2268 bp->phy_addr);
2270 bp->phy_addr = 0;
2272 bp->phy_addr);
2275 bp->phy_addr);
2278 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2282 bp->phy_addr);
2295 bp->old_link = 0;
2296 bp->phy_addr = phydev->mdio.addr;
2312 static void b44_unregister_phy_one(struct b44 *bp)
2314 struct net_device *dev = bp->dev;
2315 struct mii_bus *mii_bus = bp->mii_bus;
2326 struct b44 *bp;
2331 dev = alloc_etherdev(sizeof(*bp));
2342 bp = netdev_priv(dev);
2343 bp->sdev = sdev;
2344 bp->dev = dev;
2345 bp->force_copybreak = 0;
2347 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2349 spin_lock_init(&bp->lock);
2350 u64_stats_init(&bp->hw_stats.syncp);
2352 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2353 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2356 netif_napi_add(dev, &bp->napi, b44_poll);
2377 err = b44_get_invariants(bp);
2384 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2390 bp->mii_if.dev = dev;
2391 bp->mii_if.mdio_read = b44_mdio_read_mii;
2392 bp->mii_if.mdio_write = b44_mdio_write_mii;
2393 bp->mii_if.phy_id = bp->phy_addr;
2394 bp->mii_if.phy_id_mask = 0x1f;
2395 bp->mii_if.reg_num_mask = 0x1f;
2398 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2402 bp->flags |= B44_FLAG_PAUSE_AUTO;
2417 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2420 err = b44_phy_reset(bp);
2426 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2427 err = b44_register_phy_one(bp);
2445 netif_napi_del(&bp->napi);
2455 struct b44 *bp = netdev_priv(dev);
2458 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2459 b44_unregister_phy_one(bp);
2462 netif_napi_del(&bp->napi);
2471 struct b44 *bp = netdev_priv(dev);
2476 del_timer_sync(&bp->timer);
2478 spin_lock_irq(&bp->lock);
2480 b44_halt(bp);
2481 netif_carrier_off(bp->dev);
2482 netif_device_detach(bp->dev);
2483 b44_free_rings(bp);
2485 spin_unlock_irq(&bp->lock);
2488 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2489 b44_init_hw(bp, B44_PARTIAL_RESET);
2490 b44_setup_wol(bp);
2500 struct b44 *bp = netdev_priv(dev);
2513 spin_lock_irq(&bp->lock);
2514 b44_init_rings(bp);
2515 b44_init_hw(bp, B44_FULL_RESET);
2516 spin_unlock_irq(&bp->lock);
2526 spin_lock_irq(&bp->lock);
2527 b44_halt(bp);
2528 b44_free_rings(bp);
2529 spin_unlock_irq(&bp->lock);
2533 netif_device_attach(bp->dev);
2535 b44_enable_ints(bp);
2538 mod_timer(&bp->timer, jiffies + 1);