1/*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 20 unchanged lines hidden (view full) --- 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h> |
37__FBSDID("$FreeBSD: head/sys/dev/sge/if_sge.c 207628 2010-05-04 19:04:51Z yongari $"); |
38 39/* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for --- 705 unchanged lines hidden (view full) --- 751 return (0); 752} 753 754static int 755sge_dma_alloc(struct sge_softc *sc) 756{ 757 struct sge_chain_data *cd; 758 struct sge_list_data *ld; |
759 struct sge_rxdesc *rxd; 760 struct sge_txdesc *txd; |
761 int error, i; 762 763 cd = &sc->sge_cdata; 764 ld = &sc->sge_ldata; 765 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 766 1, 0, /* alignment, boundary */ 767 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 768 BUS_SPACE_MAXADDR, /* highaddr */ --- 97 unchanged lines hidden (view full) --- 866 if (error != 0) { 867 device_printf(sc->sge_dev, 868 "could not create Rx mbuf DMA tag.\n"); 869 goto fail; 870 } 871 872 /* Create DMA maps for Tx buffers. */ 873 for (i = 0; i < SGE_TX_RING_CNT; i++) { |
874 txd = &cd->sge_txdesc[i]; 875 txd->tx_m = NULL; 876 txd->tx_dmamap = NULL; 877 txd->tx_ndesc = 0; |
878 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, |
879 &txd->tx_dmamap); |
880 if (error != 0) { 881 device_printf(sc->sge_dev, 882 "could not create Tx DMA map.\n"); 883 goto fail; 884 } 885 } 886 /* Create spare DMA map for Rx buffer. */ 887 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 888 if (error != 0) { 889 device_printf(sc->sge_dev, 890 "could not create spare Rx DMA map.\n"); 891 goto fail; 892 } 893 /* Create DMA maps for Rx buffers. */ 894 for (i = 0; i < SGE_RX_RING_CNT; i++) { |
895 rxd = &cd->sge_rxdesc[i]; 896 rxd->rx_m = NULL; 897 rxd->rx_dmamap = NULL; |
898 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, |
899 &rxd->rx_dmamap); |
900 if (error) { 901 device_printf(sc->sge_dev, 902 "could not create Rx DMA map.\n"); 903 goto fail; 904 } 905 } 906fail: 907 return (error); 908} 909 910static void 911sge_dma_free(struct sge_softc *sc) 912{ 913 struct sge_chain_data *cd; 914 struct sge_list_data *ld; |
915 struct sge_rxdesc *rxd; 916 struct sge_txdesc *txd; |
917 int i; 918 919 cd = &sc->sge_cdata; 920 ld = &sc->sge_ldata; 921 /* Rx ring. */ 922 if (cd->sge_rx_tag != NULL) { 923 if (cd->sge_rx_dmamap != NULL) 924 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); --- 15 unchanged lines hidden (view full) --- 940 ld->sge_tx_ring = NULL; 941 cd->sge_tx_dmamap = NULL; 942 bus_dma_tag_destroy(cd->sge_tx_tag); 943 cd->sge_tx_tag = NULL; 944 } 945 /* Rx buffers. */ 946 if (cd->sge_rxmbuf_tag != NULL) { 947 for (i = 0; i < SGE_RX_RING_CNT; i++) { |
948 rxd = &cd->sge_rxdesc[i]; 949 if (rxd->rx_dmamap != NULL) { |
950 bus_dmamap_destroy(cd->sge_rxmbuf_tag, |
951 rxd->rx_dmamap); 952 rxd->rx_dmamap = NULL; |
953 } 954 } 955 if (cd->sge_rx_spare_map != NULL) { 956 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 957 cd->sge_rx_spare_map); 958 cd->sge_rx_spare_map = NULL; 959 } 960 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 961 cd->sge_rxmbuf_tag = NULL; 962 } 963 /* Tx buffers. */ 964 if (cd->sge_txmbuf_tag != NULL) { 965 for (i = 0; i < SGE_TX_RING_CNT; i++) { |
966 txd = &cd->sge_txdesc[i]; 967 if (txd->tx_dmamap != NULL) { |
968 bus_dmamap_destroy(cd->sge_txmbuf_tag, |
969 txd->tx_dmamap); 970 txd->tx_dmamap = NULL; |
971 } 972 } 973 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 974 cd->sge_txmbuf_tag = NULL; 975 } 976 if (cd->sge_tag != NULL) 977 bus_dma_tag_destroy(cd->sge_tag); 978 cd->sge_tag = NULL; --- 20 unchanged lines hidden (view full) --- 999 cd->sge_tx_cnt = 0; 1000 return (0); 1001} 1002 1003static int 1004sge_list_tx_free(struct sge_softc *sc) 1005{ 1006 struct sge_chain_data *cd; |
1007 struct sge_txdesc *txd; |
1008 int i; 1009 1010 SGE_LOCK_ASSERT(sc); 1011 cd = &sc->sge_cdata; 1012 for (i = 0; i < SGE_TX_RING_CNT; i++) { |
1013 txd = &cd->sge_txdesc[i]; 1014 if (txd->tx_m != NULL) { 1015 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1016 BUS_DMASYNC_POSTWRITE); 1017 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1018 m_free(txd->tx_m); 1019 txd->tx_m = NULL; 1020 txd->tx_ndesc = 0; |
1021 } 1022 } 1023 1024 return (0); 1025} 1026 1027/* 1028 * Initialize the RX descriptors and allocate mbufs for them. Note that --- 18 unchanged lines hidden (view full) --- 1047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1048 return (0); 1049} 1050 1051static int 1052sge_list_rx_free(struct sge_softc *sc) 1053{ 1054 struct sge_chain_data *cd; |
1055 struct sge_rxdesc *rxd; |
1056 int i; 1057 1058 SGE_LOCK_ASSERT(sc); 1059 cd = &sc->sge_cdata; 1060 for (i = 0; i < SGE_RX_RING_CNT; i++) { |
1061 rxd = &cd->sge_rxdesc[i]; 1062 if (rxd->rx_m != NULL) { 1063 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, |
1064 BUS_DMASYNC_POSTREAD); 1065 bus_dmamap_unload(cd->sge_rxmbuf_tag, |
1066 rxd->rx_dmamap); 1067 m_free(rxd->rx_m); 1068 rxd->rx_m = NULL; |
1069 } 1070 } 1071 return (0); 1072} 1073 1074/* 1075 * Initialize an RX descriptor and attach an MBUF cluster. 1076 */ 1077static int 1078sge_newbuf(struct sge_softc *sc, int prod) 1079{ 1080 struct mbuf *m; 1081 struct sge_desc *desc; 1082 struct sge_chain_data *cd; |
1083 struct sge_rxdesc *rxd; |
1084 bus_dma_segment_t segs[1]; 1085 bus_dmamap_t map; 1086 int error, nsegs; 1087 1088 SGE_LOCK_ASSERT(sc); 1089 1090 cd = &sc->sge_cdata; 1091 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1092 if (m == NULL) 1093 return (ENOBUFS); 1094 m->m_len = m->m_pkthdr.len = MCLBYTES; 1095 m_adj(m, SGE_RX_BUF_ALIGN); 1096 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1097 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1098 if (error != 0) { 1099 m_freem(m); 1100 return (error); 1101 } 1102 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); |
1103 rxd = &cd->sge_rxdesc[prod]; 1104 if (rxd->rx_m != NULL) { 1105 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, |
1106 BUS_DMASYNC_POSTREAD); |
1107 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); |
1108 } |
1109 map = rxd->rx_dmamap; 1110 rxd->rx_dmamap = cd->sge_rx_spare_map; |
1111 cd->sge_rx_spare_map = map; |
1112 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, |
1113 BUS_DMASYNC_PREREAD); |
1114 rxd->rx_m = m; |
1115 1116 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1117 desc->sge_sts_size = 0; 1118 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1119 desc->sge_flags = htole32(segs[0].ds_len); 1120 if (prod == SGE_RX_RING_CNT - 1) 1121 desc->sge_flags |= htole32(RING_END); 1122 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | --- 69 unchanged lines hidden (view full) --- 1192#ifdef SGE_SHOW_ERRORS 1193 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1194 RX_ERR_BITS); 1195#endif 1196 sge_discard_rxbuf(sc, cons); 1197 ifp->if_ierrors++; 1198 continue; 1199 } |
1200 m = cd->sge_rxdesc[cons].rx_m; |
1201 if (sge_newbuf(sc, cons) != 0) { 1202 sge_discard_rxbuf(sc, cons); 1203 ifp->if_iqdrops++; 1204 continue; 1205 } 1206 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1207 if ((rxinfo & RDC_IP_CSUM) != 0 && 1208 (rxinfo & RDC_IP_CSUM_OK) != 0) --- 50 unchanged lines hidden (view full) --- 1259 * the list buffers. 1260 */ 1261static void 1262sge_txeof(struct sge_softc *sc) 1263{ 1264 struct ifnet *ifp; 1265 struct sge_list_data *ld; 1266 struct sge_chain_data *cd; |
1267 struct sge_txdesc *txd; |
1268 uint32_t txstat; |
1269 int cons, nsegs, prod; |
1270 1271 SGE_LOCK_ASSERT(sc); 1272 1273 ifp = sc->sge_ifp; 1274 ld = &sc->sge_ldata; 1275 cd = &sc->sge_cdata; 1276 1277 if (cd->sge_tx_cnt == 0) 1278 return; 1279 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1280 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1281 cons = cd->sge_tx_cons; 1282 prod = cd->sge_tx_prod; |
1283 for (; cons != prod;) { |
1284 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1285 if ((txstat & TDC_OWN) != 0) 1286 break; |
1287 /* 1288 * Only the first descriptor of multi-descriptor transmission 1289 * is updated by controller. Driver should skip entire 1290 * chained buffers for the transmitted frame. In other words 1291 * TDC_OWN bit is valid only at the first descriptor of a 1292 * multi-descriptor transmission. 1293 */ 1294 if (SGE_TX_ERROR(txstat) != 0) { |
1295#ifdef SGE_SHOW_ERRORS |
1296 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1297 txstat, TX_ERR_BITS); |
1298#endif |
1299 ifp->if_oerrors++; 1300 } else { |
1301#ifdef notyet |
1302 ifp->if_collisions += (txstat & 0xFFFF) - 1; |
1303#endif |
1304 ifp->if_opackets++; |
1305 } |
1306 txd = &cd->sge_txdesc[cons]; 1307 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1308 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1309 SGE_INC(cons, SGE_TX_RING_CNT); 1310 } 1311 /* Reclaim transmitted mbuf. */ 1312 KASSERT(txd->tx_m != NULL, 1313 ("%s: freeing NULL mbuf\n", __func__)); 1314 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1315 BUS_DMASYNC_POSTWRITE); 1316 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1317 m_freem(txd->tx_m); 1318 txd->tx_m = NULL; 1319 cd->sge_tx_cnt -= txd->tx_ndesc; 1320 KASSERT(cd->sge_tx_cnt >= 0, 1321 ("%s: Active Tx desc counter was garbled\n", __func__)); 1322 txd->tx_ndesc = 0; 1323 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
1324 } 1325 cd->sge_tx_cons = cons; 1326 if (cd->sge_tx_cnt == 0) 1327 sc->sge_timer = 0; 1328} 1329 1330static void 1331sge_tick(void *arg) --- 85 unchanged lines hidden (view full) --- 1417 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1418 * pointers to the fragment pointers. 1419 */ 1420static int 1421sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1422{ 1423 struct mbuf *m; 1424 struct sge_desc *desc; |
1425 struct sge_txdesc *txd; |
1426 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; |
1427 uint32_t cflags; |
1428 int error, i, nsegs, prod, si; |
1429 1430 SGE_LOCK_ASSERT(sc); 1431 |
1432 si = prod = sc->sge_cdata.sge_tx_prod; 1433 txd = &sc->sge_cdata.sge_txdesc[prod]; 1434 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1435 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1436 if (error == EFBIG) { 1437 m = m_collapse(*m_head, M_DONTWAIT, SGE_MAXTXSEGS); |
1438 if (m == NULL) { 1439 m_freem(*m_head); 1440 *m_head = NULL; 1441 return (ENOBUFS); 1442 } 1443 *m_head = m; |
1444 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1445 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1446 if (error != 0) { 1447 m_freem(*m_head); 1448 *m_head = NULL; 1449 return (error); 1450 } 1451 } else if (error != 0) |
1452 return (error); |
1453 1454 KASSERT(nsegs != 0, ("zero segment returned")); |
1455 /* Check descriptor overrun. */ 1456 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { |
1457 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); |
1458 return (ENOBUFS); 1459 } |
1460 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, |
1461 BUS_DMASYNC_PREWRITE); 1462 |
1463 m = *m_head; |
1464 cflags = 0; |
1465 if (m->m_pkthdr.csum_flags & CSUM_IP) |
1466 cflags |= TDC_IP_CSUM; |
1467 if (m->m_pkthdr.csum_flags & CSUM_TCP) |
1468 cflags |= TDC_TCP_CSUM; |
1469 if (m->m_pkthdr.csum_flags & CSUM_UDP) |
1470 cflags |= TDC_UDP_CSUM; |
1471 for (i = 0; i < nsegs; i++) { 1472 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1473 if (i == 0) { 1474 desc->sge_sts_size = htole32(m->m_pkthdr.len); 1475 desc->sge_cmdsts = 0; 1476 } else { 1477 desc->sge_sts_size = 0; 1478 desc->sge_cmdsts = htole32(TDC_OWN); 1479 } 1480 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1481 desc->sge_flags = htole32(txsegs[i].ds_len); 1482 if (prod == SGE_TX_RING_CNT - 1) 1483 desc->sge_flags |= htole32(RING_END); 1484 sc->sge_cdata.sge_tx_cnt++; 1485 SGE_INC(prod, SGE_TX_RING_CNT); 1486 } 1487 /* Update producer index. */ 1488 sc->sge_cdata.sge_tx_prod = prod; 1489 1490 desc = &sc->sge_ldata.sge_tx_ring[si]; |
1491 /* Configure VLAN. */ |
1492 if((m->m_flags & M_VLANTAG) != 0) { 1493 cflags |= m->m_pkthdr.ether_vtag; |
1494 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1495 } |
1496 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); |
1497#if 1 1498 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1499 desc->sge_cmdsts |= htole32(TDC_BST); 1500#else 1501 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1502 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1503 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1504 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1505 } 1506#endif 1507 /* Request interrupt and give ownership to controller. */ |
1508 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1509 txd->tx_m = m; 1510 txd->tx_ndesc = nsegs; |
1511 return (0); 1512} 1513 1514static void 1515sge_start(struct ifnet *ifp) 1516{ 1517 struct sge_softc *sc; 1518 --- 14 unchanged lines hidden (view full) --- 1533 SGE_LOCK_ASSERT(sc); 1534 1535 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1536 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1537 IFF_DRV_RUNNING) 1538 return; 1539 1540 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { |
1541 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1542 SGE_MAXTXSEGS)) { |
1543 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1544 break; 1545 } 1546 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1547 if (m_head == NULL) 1548 break; 1549 if (sge_encap(sc, &m_head)) { 1550 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); --- 307 unchanged lines hidden --- |