Lines Matching refs:sc

194 #define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
195 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
196 #define BARRIER(sc, off, len, flags) \
197 (bus_barrier((sc)->mem_res, (off), (len), (flags))
199 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
200 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
201 #define CGEM_LOCK_INIT(sc) \
202 mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
204 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
205 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
221 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
228 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
229 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
252 device_printf(sc->dev, "no mac address found, assigning "
259 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
261 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
264 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
265 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
298 cgem_rx_filter(struct cgem_softc *sc)
300 if_t ifp = sc->ifp;
310 net_cfg = RD4(sc, CGEM_NET_CFG);
329 device_printf(sc->dev,
350 WR4(sc, CGEM_HASH_TOP, hash_hi);
351 WR4(sc, CGEM_HASH_BOT, hash_lo);
352 WR4(sc, CGEM_NET_CFG, net_cfg);
367 cgem_setup_descs(struct cgem_softc *sc)
371 sc->txring = NULL;
372 sc->rxring = NULL;
376 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
385 &sc->sc_mtx,
386 &sc->desc_dma_tag);
391 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
400 &sc->sc_mtx,
401 &sc->mbuf_dma_tag);
406 err = bus_dmamem_alloc(sc->desc_dma_tag,
407 (void **)&sc->rxring,
409 &sc->rxring_dma_map);
414 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
415 (void *)sc->rxring,
417 cgem_getaddr, &sc->rxring_physaddr,
424 sc->rxring[i].addr = CGEM_RXDESC_OWN;
425 sc->rxring[i].ctl = 0;
426 sc->rxring_m[i] = NULL;
427 sc->rxring_m_dmamap[i] = NULL;
429 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
431 sc->rxring_hd_ptr = 0;
432 sc->rxring_tl_ptr = 0;
433 sc->rxring_queued = 0;
436 err = bus_dmamem_alloc(sc->desc_dma_tag,
437 (void **)&sc->txring,
439 &sc->txring_dma_map);
444 err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
445 (void *)sc->txring,
447 cgem_getaddr, &sc->txring_physaddr,
454 sc->txring[i].addr = 0;
455 sc->txring[i].ctl = CGEM_TXDESC_USED;
456 sc->txring_m[i] = NULL;
457 sc->txring_m_dmamap[i] = NULL;
459 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
461 sc->txring_hd_ptr = 0;
462 sc->txring_tl_ptr = 0;
463 sc->txring_queued = 0;
470 cgem_fill_rqueue(struct cgem_softc *sc)
476 CGEM_ASSERT_LOCKED(sc);
478 while (sc->rxring_queued < sc->rxbufs) {
486 m->m_pkthdr.rcvif = sc->ifp;
489 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
490 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
491 sc->rxdmamapfails++;
495 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
496 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
498 sc->rxdmamapfails++;
499 bus_dmamap_destroy(sc->mbuf_dma_tag,
500 sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
501 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
505 sc->rxring_m[sc->rxring_hd_ptr] = m;
508 bus_dmamap_sync(sc->mbuf_dma_tag,
509 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
513 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
514 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
515 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
517 sc->rxring_hd_ptr = 0;
519 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
521 sc->rxring_queued++;
527 cgem_recv(struct cgem_softc *sc)
529 if_t ifp = sc->ifp;
533 CGEM_ASSERT_LOCKED(sc);
538 while (sc->rxring_queued > 0 &&
539 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
541 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
544 m = sc->rxring_m[sc->rxring_tl_ptr];
545 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
548 bus_dmamap_sync(sc->mbuf_dma_tag,
549 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
553 bus_dmamap_unload(sc->mbuf_dma_tag,
554 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
555 bus_dmamap_destroy(sc->mbuf_dma_tag,
556 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
557 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
560 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
561 sc->rxring_tl_ptr = 0;
562 sc->rxring_queued--;
611 cgem_fill_rqueue(sc);
614 CGEM_UNLOCK(sc);
622 CGEM_LOCK(sc);
627 cgem_clean_tx(struct cgem_softc *sc)
632 CGEM_ASSERT_LOCKED(sc);
635 while (sc->txring_queued > 0 &&
636 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
640 bus_dmamap_sync(sc->mbuf_dma_tag,
641 sc->txring_m_dmamap[sc->txring_tl_ptr],
645 bus_dmamap_unload(sc->mbuf_dma_tag,
646 sc->txring_m_dmamap[sc->txring_tl_ptr]);
647 bus_dmamap_destroy(sc->mbuf_dma_tag,
648 sc->txring_m_dmamap[sc->txring_tl_ptr]);
649 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
652 m = sc->txring_m[sc->txring_tl_ptr];
653 sc->txring_m[sc->txring_tl_ptr] = NULL;
659 device_printf(sc->dev, "cgem_clean_tx: Whoa! "
661 sc->txring[sc->txring_tl_ptr].addr);
664 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
666 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
674 sc->txring_tl_ptr = 0;
676 sc->txring_tl_ptr++;
677 sc->txring_queued--;
679 ctl = sc->txring[sc->txring_tl_ptr].ctl;
681 sc->txring[sc->txring_tl_ptr].ctl =
687 sc->txring_tl_ptr = 0;
689 sc->txring_tl_ptr++;
690 sc->txring_queued--;
692 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
700 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
706 CGEM_ASSERT_LOCKED(sc);
713 if (sc->txring_queued >=
717 cgem_clean_tx(sc);
720 if (sc->txring_queued >=
723 sc->txfull++;
734 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
735 &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
737 sc->txdmamapfails++;
740 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
741 sc->txring_m_dmamap[sc->txring_hd_ptr],
748 sc->txdefragfails++;
750 bus_dmamap_destroy(sc->mbuf_dma_tag,
751 sc->txring_m_dmamap[sc->txring_hd_ptr]);
752 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
756 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
757 sc->txring_m_dmamap[sc->txring_hd_ptr],
759 sc->txdefrags++;
764 bus_dmamap_destroy(sc->mbuf_dma_tag,
765 sc->txring_m_dmamap[sc->txring_hd_ptr]);
766 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
767 sc->txdmamapfails++;
770 sc->txring_m[sc->txring_hd_ptr] = m;
773 bus_dmamap_sync(sc->mbuf_dma_tag,
774 sc->txring_m_dmamap[sc->txring_hd_ptr],
778 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
786 sc->txring[sc->txring_hd_ptr + i].addr =
796 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
799 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
803 sc->txring_hd_ptr = 0;
805 sc->txring_hd_ptr += nsegs;
806 sc->txring_queued += nsegs;
809 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
820 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
822 CGEM_LOCK(sc);
824 CGEM_UNLOCK(sc);
828 cgem_poll_hw_stats(struct cgem_softc *sc)
832 CGEM_ASSERT_LOCKED(sc);
834 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
835 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
837 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
838 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
839 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
840 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
841 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
842 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
843 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
844 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
845 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
846 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
847 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
849 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
850 sc->stats.tx_single_collisn += n;
851 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
852 n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
853 sc->stats.tx_multi_collisn += n;
854 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
855 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
856 sc->stats.tx_excsv_collisn += n;
857 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
858 n = RD4(sc, CGEM_LATE_COLL);
859 sc->stats.tx_late_collisn += n;
860 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
862 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
863 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
865 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
866 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
868 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
869 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
870 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
871 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
872 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
873 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
874 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
875 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
876 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
877 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
878 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
879 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
880 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
881 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
882 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
883 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
884 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
885 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
886 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
887 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
888 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
889 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
895 struct cgem_softc *sc = (struct cgem_softc *)arg;
898 CGEM_ASSERT_LOCKED(sc);
901 if (sc->miibus != NULL) {
902 mii = device_get_softc(sc->miibus);
907 cgem_poll_hw_stats(sc);
910 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
916 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
919 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
921 sc->rx_frames_prev = sc->stats.rx_frames;
924 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
931 struct cgem_softc *sc = (struct cgem_softc *)arg;
932 if_t ifp = sc->ifp;
935 CGEM_LOCK(sc);
938 CGEM_UNLOCK(sc);
943 istatus = RD4(sc, CGEM_INTR_STAT);
944 WR4(sc, CGEM_INTR_STAT, istatus);
948 cgem_recv(sc);
951 cgem_clean_tx(sc);
955 device_printf(sc->dev, "cgem_intr: hresp not okay! "
956 "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
957 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
963 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
964 sc->rxoverruns++;
969 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
971 cgem_fill_rqueue(sc);
972 sc->rxnobufs++;
979 CGEM_UNLOCK(sc);
984 cgem_reset(struct cgem_softc *sc)
987 CGEM_ASSERT_LOCKED(sc);
989 WR4(sc, CGEM_NET_CTRL, 0);
990 WR4(sc, CGEM_NET_CFG, 0);
991 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
992 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
993 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
994 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
995 WR4(sc, CGEM_HASH_BOT, 0);
996 WR4(sc, CGEM_HASH_TOP, 0);
997 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
998 WR4(sc, CGEM_RX_QBAR, 0);
1001 WR4(sc, CGEM_NET_CFG,
1005 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1006 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1011 cgem_config(struct cgem_softc *sc)
1013 if_t ifp = sc->ifp;
1018 CGEM_ASSERT_LOCKED(sc);
1034 WR4(sc, CGEM_NET_CFG, net_cfg);
1047 WR4(sc, CGEM_DMA_CFG, dma_cfg);
1050 WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
1051 WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
1054 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1055 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1058 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1060 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1063 WR4(sc, CGEM_INTR_EN,
1071 cgem_init_locked(struct cgem_softc *sc)
1075 CGEM_ASSERT_LOCKED(sc);
1077 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1080 cgem_config(sc);
1081 cgem_fill_rqueue(sc);
1083 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1085 mii = device_get_softc(sc->miibus);
1088 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1094 struct cgem_softc *sc = (struct cgem_softc *)arg;
1096 CGEM_LOCK(sc);
1097 cgem_init_locked(sc);
1098 CGEM_UNLOCK(sc);
1103 cgem_stop(struct cgem_softc *sc)
1107 CGEM_ASSERT_LOCKED(sc);
1109 callout_stop(&sc->tick_ch);
1112 cgem_reset(sc);
1116 sc->txring[i].ctl = CGEM_TXDESC_USED;
1117 sc->txring[i].addr = 0;
1118 if (sc->txring_m[i]) {
1120 bus_dmamap_unload(sc->mbuf_dma_tag,
1121 sc->txring_m_dmamap[i]);
1122 bus_dmamap_destroy(sc->mbuf_dma_tag,
1123 sc->txring_m_dmamap[i]);
1124 sc->txring_m_dmamap[i] = NULL;
1125 m_freem(sc->txring_m[i]);
1126 sc->txring_m[i] = NULL;
1129 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1131 sc->txring_hd_ptr = 0;
1132 sc->txring_tl_ptr = 0;
1133 sc->txring_queued = 0;
1137 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1138 sc->rxring[i].ctl = 0;
1139 if (sc->rxring_m[i]) {
1141 bus_dmamap_unload(sc->mbuf_dma_tag,
1142 sc->rxring_m_dmamap[i]);
1143 bus_dmamap_destroy(sc->mbuf_dma_tag,
1144 sc->rxring_m_dmamap[i]);
1145 sc->rxring_m_dmamap[i] = NULL;
1147 m_freem(sc->rxring_m[i]);
1148 sc->rxring_m[i] = NULL;
1151 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1153 sc->rxring_hd_ptr = 0;
1154 sc->rxring_tl_ptr = 0;
1155 sc->rxring_queued = 0;
1158 sc->mii_media_active = 0;
1165 struct cgem_softc *sc = if_getsoftc(ifp);
1172 CGEM_LOCK(sc);
1175 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1177 cgem_rx_filter(sc);
1180 cgem_init_locked(sc);
1184 cgem_stop(sc);
1186 sc->if_old_flags = if_getflags(ifp);
1187 CGEM_UNLOCK(sc);
1194 CGEM_LOCK(sc);
1195 cgem_rx_filter(sc);
1196 CGEM_UNLOCK(sc);
1202 mii = device_get_softc(sc->miibus);
1207 CGEM_LOCK(sc);
1217 WR4(sc, CGEM_DMA_CFG,
1218 RD4(sc, CGEM_DMA_CFG) |
1226 WR4(sc, CGEM_DMA_CFG,
1227 RD4(sc, CGEM_DMA_CFG) &
1236 WR4(sc, CGEM_NET_CFG,
1237 RD4(sc, CGEM_NET_CFG) |
1243 WR4(sc, CGEM_NET_CFG,
1244 RD4(sc, CGEM_NET_CFG) &
1254 CGEM_UNLOCK(sc);
1269 struct cgem_softc *sc = device_get_softc(dev);
1271 if (child == sc->miibus)
1272 sc->miibus = NULL;
1278 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1283 mii = device_get_softc(sc->miibus);
1284 CGEM_LOCK(sc);
1290 CGEM_UNLOCK(sc);
1298 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1301 mii = device_get_softc(sc->miibus);
1302 CGEM_LOCK(sc);
1306 CGEM_UNLOCK(sc);
1312 struct cgem_softc *sc = device_get_softc(dev);
1315 WR4(sc, CGEM_PHY_MAINT,
1323 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1331 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1346 struct cgem_softc *sc = device_get_softc(dev);
1349 WR4(sc, CGEM_PHY_MAINT,
1358 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1372 struct cgem_softc *sc = device_get_softc(dev);
1373 struct mii_data *mii = device_get_softc(sc->miibus);
1375 CGEM_ASSERT_LOCKED(sc);
1379 sc->mii_media_active != mii->mii_media_active)
1380 cgem_mediachange(sc, mii);
1386 struct cgem_softc *sc = device_get_softc(dev);
1387 struct mii_data *mii = device_get_softc(sc->miibus);
1389 CGEM_ASSERT_LOCKED(sc);
1393 sc->mii_media_active != mii->mii_media_active)
1394 cgem_mediachange(sc, mii);
1411 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1416 CGEM_ASSERT_LOCKED(sc);
1419 net_cfg = RD4(sc, CGEM_NET_CFG);
1440 WR4(sc, CGEM_NET_CFG, net_cfg);
1443 if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1444 device_printf(sc->dev, "cgem_mediachange: "
1446 sc->ref_clk_num, ref_clk_freq);
1448 sc->mii_media_active = mii->mii_media_active;
1454 struct cgem_softc *sc = device_get_softc(dev);
1463 &sc->rxbufs, 0,
1467 &sc->rxhangwar, 0,
1471 &sc->rxoverruns, 0,
1475 &sc->rxnobufs, 0,
1479 &sc->rxdmamapfails, 0,
1483 &sc->txfull, 0,
1487 &sc->txdmamapfails, 0,
1491 &sc->txdefrags, 0,
1495 &sc->txdefragfails, 0,
1503 &sc->stats.tx_bytes, "Total bytes transmitted");
1506 &sc->stats.tx_frames, 0, "Total frames transmitted");
1508 &sc->stats.tx_frames_bcast, 0,
1511 &sc->stats.tx_frames_multi, 0,
1514 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1517 &sc->stats.tx_frames_64b, 0,
1520 &sc->stats.tx_frames_65to127b, 0,
1523 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1526 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1529 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1532 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1535 CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1538 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1541 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1544 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1547 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1550 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1553 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1557 &sc->stats.rx_bytes, "Total bytes received");
1560 &sc->stats.rx_frames, 0, "Total frames received");
1562 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1565 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1568 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1571 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1574 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1577 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1580 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1583 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1586 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1589 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1592 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1595 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1598 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1601 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1604 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1607 CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1610 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1613 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1617 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1621 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1624 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1646 struct cgem_softc *sc = device_get_softc(dev);
1653 sc->dev = dev;
1654 CGEM_LOCK_INIT(sc);
1658 sc->ref_clk_num = 0;
1660 sc->ref_clk_num = fdt32_to_cpu(cell);
1664 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1666 if (sc->mem_res == NULL) {
1673 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1675 if (sc->irq_res == NULL) {
1682 ifp = sc->ifp = if_alloc(IFT_ETHER);
1688 if_setsoftc(ifp, sc);
1704 sc->if_old_flags = if_getflags(ifp);
1705 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1706 sc->rxhangwar = 1;
1709 CGEM_LOCK(sc);
1710 cgem_reset(sc);
1711 CGEM_UNLOCK(sc);
1714 err = mii_attach(dev, &sc->miibus, ifp,
1724 err = cgem_setup_descs(sc);
1732 cgem_get_mac(sc, eaddr);
1735 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1739 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1740 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1756 struct cgem_softc *sc = device_get_softc(dev);
1759 if (sc == NULL)
1763 CGEM_LOCK(sc);
1764 cgem_stop(sc);
1765 CGEM_UNLOCK(sc);
1766 callout_drain(&sc->tick_ch);
1767 if_setflagbits(sc->ifp, 0, IFF_UP);
1768 ether_ifdetach(sc->ifp);
1771 if (sc->miibus != NULL) {
1772 device_delete_child(dev, sc->miibus);
1773 sc->miibus = NULL;
1777 if (sc->mem_res != NULL) {
1779 rman_get_rid(sc->mem_res), sc->mem_res);
1780 sc->mem_res = NULL;
1782 if (sc->irq_res != NULL) {
1783 if (sc->intrhand)
1784 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1786 rman_get_rid(sc->irq_res), sc->irq_res);
1787 sc->irq_res = NULL;
1791 if (sc->rxring != NULL) {
1792 if (sc->rxring_physaddr != 0) {
1793 bus_dmamap_unload(sc->desc_dma_tag,
1794 sc->rxring_dma_map);
1795 sc->rxring_physaddr = 0;
1797 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1798 sc->rxring_dma_map);
1799 sc->rxring = NULL;
1801 if (sc->rxring_m_dmamap[i] != NULL) {
1802 bus_dmamap_destroy(sc->mbuf_dma_tag,
1803 sc->rxring_m_dmamap[i]);
1804 sc->rxring_m_dmamap[i] = NULL;
1807 if (sc->txring != NULL) {
1808 if (sc->txring_physaddr != 0) {
1809 bus_dmamap_unload(sc->desc_dma_tag,
1810 sc->txring_dma_map);
1811 sc->txring_physaddr = 0;
1813 bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1814 sc->txring_dma_map);
1815 sc->txring = NULL;
1817 if (sc->txring_m_dmamap[i] != NULL) {
1818 bus_dmamap_destroy(sc->mbuf_dma_tag,
1819 sc->txring_m_dmamap[i]);
1820 sc->txring_m_dmamap[i] = NULL;
1823 if (sc->desc_dma_tag != NULL) {
1824 bus_dma_tag_destroy(sc->desc_dma_tag);
1825 sc->desc_dma_tag = NULL;
1827 if (sc->mbuf_dma_tag != NULL) {
1828 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1829 sc->mbuf_dma_tag = NULL;
1834 CGEM_LOCK_DESTROY(sc);