Deleted Added
full compact
oce_if.c (246799) oce_if.c (247880)
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,

--- 22 unchanged lines hidden (view full) ---

31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,

--- 22 unchanged lines hidden (view full) ---

31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 246799 2013-02-14 17:34:17Z jpaetzel $ */
40
39
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 247880 2013-03-06 09:53:38Z delphij $ */
41
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46
47/* Driver entry points prototypes */
48static int oce_probe(device_t dev);

--- 40 unchanged lines hidden (view full) ---

89static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90static void oce_local_timer(void *arg);
91static void oce_if_deactivate(POCE_SOFTC sc);
92static void oce_if_activate(POCE_SOFTC sc);
93static void setup_max_queues_want(POCE_SOFTC sc);
94static void update_queues_got(POCE_SOFTC sc);
95static void process_link_state(POCE_SOFTC sc,
96 struct oce_async_cqe_link_state *acqe);
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
47
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);

--- 40 unchanged lines hidden (view full) ---

90static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
91static void oce_local_timer(void *arg);
92static void oce_if_deactivate(POCE_SOFTC sc);
93static void oce_if_activate(POCE_SOFTC sc);
94static void setup_max_queues_want(POCE_SOFTC sc);
95static void update_queues_got(POCE_SOFTC sc);
96static void process_link_state(POCE_SOFTC sc,
97 struct oce_async_cqe_link_state *acqe);
98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
99static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
97
100
98
99/* IP specific */
100#if defined(INET6) || defined(INET)
101static int oce_init_lro(POCE_SOFTC sc);
102static void oce_rx_flush_lro(struct oce_rq *rq);
103static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104#endif
105
106static device_method_t oce_dispatch[] = {

--- 154 unchanged lines hidden (view full) ---

261 goto vlan_free;
262
263 oce_add_sysctls(sc);
264
265 callout_init(&sc->timer, CALLOUT_MPSAFE);
266 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
267 if (rc)
268 goto stats_free;
101/* IP specific */
102#if defined(INET6) || defined(INET)
103static int oce_init_lro(POCE_SOFTC sc);
104static void oce_rx_flush_lro(struct oce_rq *rq);
105static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
106#endif
107
108static device_method_t oce_dispatch[] = {

--- 154 unchanged lines hidden (view full) ---

263 goto vlan_free;
264
265 oce_add_sysctls(sc);
266
267 callout_init(&sc->timer, CALLOUT_MPSAFE);
268 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
269 if (rc)
270 goto stats_free;
269#ifdef DEV_NETMAP
270#endif /* DEV_NETMAP */
271
272 return 0;
273
274stats_free:
275 callout_drain(&sc->timer);
276 oce_stats_free(sc);
277vlan_free:
278 if (sc->vlan_attach)

--- 201 unchanged lines hidden (view full) ---

480
481static int
482oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
483{
484 POCE_SOFTC sc = ifp->if_softc;
485 struct oce_wq *wq = NULL;
486 int queue_index = 0;
487 int status = 0;
271
272 return 0;
273
274stats_free:
275 callout_drain(&sc->timer);
276 oce_stats_free(sc);
277vlan_free:
278 if (sc->vlan_attach)

--- 201 unchanged lines hidden (view full) ---

480
481static int
482oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
483{
484 POCE_SOFTC sc = ifp->if_softc;
485 struct oce_wq *wq = NULL;
486 int queue_index = 0;
487 int status = 0;
488
488
489 if ((m->m_flags & M_FLOWID) != 0)
490 queue_index = m->m_pkthdr.flowid % sc->nwqs;
491
492 wq = sc->wq[queue_index];
493
494 if (TRY_LOCK(&wq->tx_lock)) {
495 status = oce_multiq_transmit(ifp, m, wq);
496 UNLOCK(&wq->tx_lock);

--- 66 unchanged lines hidden (view full) ---

563 /* Arm all cqs connected to this EQ */
564 for (i = 0; i < eq->cq_valid; i++) {
565 cq = eq->cq[i];
566 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
567 }
568
569eq_arm:
570 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
489 if ((m->m_flags & M_FLOWID) != 0)
490 queue_index = m->m_pkthdr.flowid % sc->nwqs;
491
492 wq = sc->wq[queue_index];
493
494 if (TRY_LOCK(&wq->tx_lock)) {
495 status = oce_multiq_transmit(ifp, m, wq);
496 UNLOCK(&wq->tx_lock);

--- 66 unchanged lines hidden (view full) ---

563 /* Arm all cqs connected to this EQ */
564 for (i = 0; i < eq->cq_valid; i++) {
565 cq = eq->cq[i];
566 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
567 }
568
569eq_arm:
570 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
571
571 return;
572}
573
574
575static int
576oce_setup_intr(POCE_SOFTC sc)
577{
578 int rc = 0, use_intx = 0;

--- 49 unchanged lines hidden (view full) ---

628
629 if (ii->eq == NULL)
630 return FILTER_STRAY;
631
632 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
633
634 taskqueue_enqueue_fast(ii->tq, &ii->task);
635
572 return;
573}
574
575
576static int
577oce_setup_intr(POCE_SOFTC sc)
578{
579 int rc = 0, use_intx = 0;

--- 49 unchanged lines hidden (view full) ---

629
630 if (ii->eq == NULL)
631 return FILTER_STRAY;
632
633 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
634
635 taskqueue_enqueue_fast(ii->tq, &ii->task);
636
637 ii->eq->intr++;
638
636 return FILTER_HANDLED;
637}
638
639
640static int
641oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
642{
643 POCE_INTR_INFO ii = &sc->intrs[vector];

--- 131 unchanged lines hidden (view full) ---

775 struct mbuf *m, *m_temp;
776 struct oce_wq *wq = sc->wq[wq_index];
777 struct oce_packet_desc *pd;
778 uint32_t out;
779 struct oce_nic_hdr_wqe *nichdr;
780 struct oce_nic_frag_wqe *nicfrag;
781 int num_wqes;
782 uint32_t reg_value;
639 return FILTER_HANDLED;
640}
641
642
643static int
644oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
645{
646 POCE_INTR_INFO ii = &sc->intrs[vector];

--- 131 unchanged lines hidden (view full) ---

778 struct mbuf *m, *m_temp;
779 struct oce_wq *wq = sc->wq[wq_index];
780 struct oce_packet_desc *pd;
781 uint32_t out;
782 struct oce_nic_hdr_wqe *nichdr;
783 struct oce_nic_frag_wqe *nicfrag;
784 int num_wqes;
785 uint32_t reg_value;
786 boolean_t complete = TRUE;
783
784 m = *mpp;
785 if (!m)
786 return EINVAL;
787
788 if (!(m->m_flags & M_PKTHDR)) {
789 rc = ENXIO;
790 goto free_ret;
791 }
792
787
788 m = *mpp;
789 if (!m)
790 return EINVAL;
791
792 if (!(m->m_flags & M_PKTHDR)) {
793 rc = ENXIO;
794 goto free_ret;
795 }
796
797 if(oce_tx_asic_stall_verify(sc, m)) {
798 m = oce_insert_vlan_tag(sc, m, &complete);
799 if(!m) {
800 device_printf(sc->dev, "Insertion unsuccessful\n");
801 return 0;
802 }
803
804 }
805
793 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
794 /* consolidate packet buffers for TSO/LSO segment offload */
795#if defined(INET6) || defined(INET)
796 m = oce_tso_setup(sc, mpp);
797#else
798 m = NULL;
799#endif
800 if (m == NULL) {

--- 31 unchanged lines hidden (view full) ---

832
833 nichdr =
834 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
835 nichdr->u0.dw[0] = 0;
836 nichdr->u0.dw[1] = 0;
837 nichdr->u0.dw[2] = 0;
838 nichdr->u0.dw[3] = 0;
839
806 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
807 /* consolidate packet buffers for TSO/LSO segment offload */
808#if defined(INET6) || defined(INET)
809 m = oce_tso_setup(sc, mpp);
810#else
811 m = NULL;
812#endif
813 if (m == NULL) {

--- 31 unchanged lines hidden (view full) ---

845
846 nichdr =
847 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
848 nichdr->u0.dw[0] = 0;
849 nichdr->u0.dw[1] = 0;
850 nichdr->u0.dw[2] = 0;
851 nichdr->u0.dw[3] = 0;
852
840 nichdr->u0.s.complete = 1;
853 nichdr->u0.s.complete = complete;
841 nichdr->u0.s.event = 1;
842 nichdr->u0.s.crc = 1;
843 nichdr->u0.s.forward = 0;
844 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
845 nichdr->u0.s.udpcs =
854 nichdr->u0.s.event = 1;
855 nichdr->u0.s.crc = 1;
856 nichdr->u0.s.forward = 0;
857 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
858 nichdr->u0.s.udpcs =
846 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
859 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
847 nichdr->u0.s.tcpcs =
860 nichdr->u0.s.tcpcs =
848 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
861 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
849 nichdr->u0.s.num_wqe = num_wqes;
850 nichdr->u0.s.total_length = m->m_pkthdr.len;
851 if (m->m_flags & M_VLANTAG) {
852 nichdr->u0.s.vlan = 1; /*Vlan present*/
853 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
854 }
855 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
856 if (m->m_pkthdr.tso_segsz) {

--- 33 unchanged lines hidden (view full) ---

890 pd->nsegs++;
891 }
892
893 sc->ifp->if_opackets++;
894 wq->tx_stats.tx_reqs++;
895 wq->tx_stats.tx_wrbs += num_wqes;
896 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
897 wq->tx_stats.tx_pkts++;
862 nichdr->u0.s.num_wqe = num_wqes;
863 nichdr->u0.s.total_length = m->m_pkthdr.len;
864 if (m->m_flags & M_VLANTAG) {
865 nichdr->u0.s.vlan = 1; /*Vlan present*/
866 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
867 }
868 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
869 if (m->m_pkthdr.tso_segsz) {

--- 33 unchanged lines hidden (view full) ---

903 pd->nsegs++;
904 }
905
906 sc->ifp->if_opackets++;
907 wq->tx_stats.tx_reqs++;
908 wq->tx_stats.tx_wrbs += num_wqes;
909 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
910 wq->tx_stats.tx_pkts++;
898
911
899 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
900 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 reg_value = (num_wqes << 16) | wq->wq_id;
902 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
903
904 } else if (rc == EFBIG) {
905 if (retry_cnt == 0) {
906 m_temp = m_defrag(m, M_NOWAIT);

--- 169 unchanged lines hidden (view full) ---

1076 POCE_SOFTC sc = ifp->if_softc;
1077 struct mbuf *m;
1078 int rc = 0;
1079 int def_q = 0; /* Defualt tx queue is 0*/
1080
1081 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1082 IFF_DRV_RUNNING)
1083 return;
912 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
914 reg_value = (num_wqes << 16) | wq->wq_id;
915 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
916
917 } else if (rc == EFBIG) {
918 if (retry_cnt == 0) {
919 m_temp = m_defrag(m, M_NOWAIT);

--- 169 unchanged lines hidden (view full) ---

1089 POCE_SOFTC sc = ifp->if_softc;
1090 struct mbuf *m;
1091 int rc = 0;
1092 int def_q = 0; /* Defualt tx queue is 0*/
1093
1094 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1095 IFF_DRV_RUNNING)
1096 return;
1097
1098 if (!sc->link_status)
1099 return;
1084
1085 do {
1086 IF_DEQUEUE(&sc->ifp->if_snd, m);
1087 if (m == NULL)
1088 break;
1089
1090 LOCK(&sc->wq[def_q]->tx_lock);
1091 rc = oce_tx(sc, &m, def_q);

--- 206 unchanged lines hidden (view full) ---

1298 m->m_flags |= M_VLANTAG;
1299 }
1300 }
1301
1302 sc->ifp->if_ipackets++;
1303#if defined(INET6) || defined(INET)
1304 /* Try to queue to LRO */
1305 if (IF_LRO_ENABLED(sc) &&
1100
1101 do {
1102 IF_DEQUEUE(&sc->ifp->if_snd, m);
1103 if (m == NULL)
1104 break;
1105
1106 LOCK(&sc->wq[def_q]->tx_lock);
1107 rc = oce_tx(sc, &m, def_q);

--- 206 unchanged lines hidden (view full) ---

1314 m->m_flags |= M_VLANTAG;
1315 }
1316 }
1317
1318 sc->ifp->if_ipackets++;
1319#if defined(INET6) || defined(INET)
1320 /* Try to queue to LRO */
1321 if (IF_LRO_ENABLED(sc) &&
1306 !(m->m_flags & M_VLANTAG) &&
1307 (cqe->u0.s.ip_cksum_pass) &&
1308 (cqe->u0.s.l4_cksum_pass) &&
1309 (!cqe->u0.s.ip_ver) &&
1310 (rq->lro.lro_cnt != 0)) {
1311
1312 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1313 rq->lro_pkts_queued ++;
1314 goto post_done;

--- 23 unchanged lines hidden (view full) ---

1338static void
1339oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1340{
1341 uint32_t out, i = 0;
1342 struct oce_packet_desc *pd;
1343 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1344 int num_frags = cqe->u0.s.num_fragments;
1345
1322 (cqe->u0.s.ip_cksum_pass) &&
1323 (cqe->u0.s.l4_cksum_pass) &&
1324 (!cqe->u0.s.ip_ver) &&
1325 (rq->lro.lro_cnt != 0)) {
1326
1327 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1328 rq->lro_pkts_queued ++;
1329 goto post_done;

--- 23 unchanged lines hidden (view full) ---

1353static void
1354oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1355{
1356 uint32_t out, i = 0;
1357 struct oce_packet_desc *pd;
1358 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1359 int num_frags = cqe->u0.s.num_fragments;
1360
1346 if (IS_XE201(sc) && cqe->u0.s.error) {
1347 /* Lancer A0 workaround
1348 * num_frags will be 1 more than actual in case of error
1349 */
1350 if (num_frags)
1351 num_frags -= 1;
1352 }
1353 for (i = 0; i < num_frags; i++) {
1354 if (rq->packets_out == rq->packets_in) {
1355 device_printf(sc->dev,
1356 "RQ transmit descriptor missing\n");
1357 }
1358 out = rq->packets_out + 1;
1359 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1360 out = 0;

--- 92 unchanged lines hidden (view full) ---

1453 int i = 0;
1454
1455 for (i = 0; i < sc->nrqs; i++) {
1456 lro = &sc->rq[i]->lro;
1457 if (lro)
1458 tcp_lro_free(lro);
1459 }
1460}
1361 for (i = 0; i < num_frags; i++) {
1362 if (rq->packets_out == rq->packets_in) {
1363 device_printf(sc->dev,
1364 "RQ transmit descriptor missing\n");
1365 }
1366 out = rq->packets_out + 1;
1367 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1368 out = 0;

--- 92 unchanged lines hidden (view full) ---

1461 int i = 0;
1462
1463 for (i = 0; i < sc->nrqs; i++) {
1464 lro = &sc->rq[i]->lro;
1465 if (lro)
1466 tcp_lro_free(lro);
1467 }
1468}
1461#endif /* INET6 || INET */
1469#endif
1462
1463int
1464oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1465{
1466 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1467 int i, in, rc;
1468 struct oce_packet_desc *pd;
1469 bus_dma_segment_t segs[6];
1470 int nsegs, added = 0;
1471 struct oce_nic_rqe *rqe;
1472 pd_rxulp_db_t rxdb_reg;
1473
1470
1471int
1472oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1473{
1474 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1475 int i, in, rc;
1476 struct oce_packet_desc *pd;
1477 bus_dma_segment_t segs[6];
1478 int nsegs, added = 0;
1479 struct oce_nic_rqe *rqe;
1480 pd_rxulp_db_t rxdb_reg;
1481
1474
1482 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1475 for (i = 0; i < count; i++) {
1476 in = rq->packets_in + 1;
1477 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1478 in = 0;
1479 if (in == rq->packets_out)
1480 break; /* no more room */
1481
1482 pd = &rq->pckts[rq->packets_in];

--- 24 unchanged lines hidden (view full) ---

1507 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1508 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1509 RING_PUT(rq->ring, 1);
1510 added++;
1511 rq->pending++;
1512 }
1513 if (added != 0) {
1514 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1483 for (i = 0; i < count; i++) {
1484 in = rq->packets_in + 1;
1485 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1486 in = 0;
1487 if (in == rq->packets_out)
1488 break; /* no more room */
1489
1490 pd = &rq->pckts[rq->packets_in];

--- 24 unchanged lines hidden (view full) ---

1515 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1516 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1517 RING_PUT(rq->ring, 1);
1518 added++;
1519 rq->pending++;
1520 }
1521 if (added != 0) {
1522 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1515 DELAY(1);
1516 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1517 rxdb_reg.bits.qid = rq->rq_id;
1518 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1519 added -= OCE_MAX_RQ_POSTS;
1520 }
1521 if (added > 0) {
1523 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1524 rxdb_reg.bits.qid = rq->rq_id;
1525 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1526 added -= OCE_MAX_RQ_POSTS;
1527 }
1528 if (added > 0) {
1522 DELAY(1);
1523 rxdb_reg.bits.qid = rq->rq_id;
1524 rxdb_reg.bits.num_posted = added;
1525 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1526 }
1527 }
1528
1529 return 0;
1530}

--- 18 unchanged lines hidden (view full) ---

1549 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1550
1551 RING_GET(rq->ring, 1);
1552 if (cqe->u0.s.error == 0) {
1553 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1554 } else {
1555 rq->rx_stats.rxcp_err++;
1556 sc->ifp->if_ierrors++;
1529 rxdb_reg.bits.qid = rq->rq_id;
1530 rxdb_reg.bits.num_posted = added;
1531 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1532 }
1533 }
1534
1535 return 0;
1536}

--- 18 unchanged lines hidden (view full) ---

1555 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1556
1557 RING_GET(rq->ring, 1);
1558 if (cqe->u0.s.error == 0) {
1559 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1560 } else {
1561 rq->rx_stats.rxcp_err++;
1562 sc->ifp->if_ierrors++;
1557 if (IS_XE201(sc))
1558 /* Lancer A0 no buffer workaround */
1559 oce_discard_rx_comp(rq, cqe);
1560 else
1561 /* Post L3/L4 errors to stack.*/
1562 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1563
1563 /* Post L3/L4 errors to stack.*/
1564 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1564 }
1565 rq->rx_stats.rx_compl++;
1566 cqe->u0.dw[2] = 0;
1567
1568#if defined(INET6) || defined(INET)
1569 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1570 oce_rx_flush_lro(rq);
1571 }

--- 180 unchanged lines hidden (view full) ---

1752 struct ifreq *ifr = (struct ifreq *)data;
1753 int rc = ENXIO;
1754 char cookie[32] = {0};
1755 void *priv_data = (void *)ifr->ifr_data;
1756 void *ioctl_ptr;
1757 uint32_t req_size;
1758 struct mbx_hdr req;
1759 OCE_DMA_MEM dma_mem;
1565 }
1566 rq->rx_stats.rx_compl++;
1567 cqe->u0.dw[2] = 0;
1568
1569#if defined(INET6) || defined(INET)
1570 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1571 oce_rx_flush_lro(rq);
1572 }

--- 180 unchanged lines hidden (view full) ---

1753 struct ifreq *ifr = (struct ifreq *)data;
1754 int rc = ENXIO;
1755 char cookie[32] = {0};
1756 void *priv_data = (void *)ifr->ifr_data;
1757 void *ioctl_ptr;
1758 uint32_t req_size;
1759 struct mbx_hdr req;
1760 OCE_DMA_MEM dma_mem;
1761 struct mbx_common_get_cntl_attr *fw_cmd;
1760
1762
1761
1762 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1763 return EFAULT;
1763 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1764 return EFAULT;
1764
1765
1765 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1766 return EINVAL;
1766 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1767 return EINVAL;
1767
1768
1768 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1769 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1770 return EFAULT;
1769 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1770 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1771 return EFAULT;
1771
1772
1772 req_size = le32toh(req.u0.req.request_length);
1773 if (req_size > 65536)
1774 return EINVAL;
1775
1776 req_size += sizeof(struct mbx_hdr);
1777 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1778 if (rc)
1779 return ENOMEM;

--- 7 unchanged lines hidden (view full) ---

1787 if (rc) {
1788 rc = EIO;
1789 goto dma_free;
1790 }
1791
1792 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1793 rc = EFAULT;
1794
1773 req_size = le32toh(req.u0.req.request_length);
1774 if (req_size > 65536)
1775 return EINVAL;
1776
1777 req_size += sizeof(struct mbx_hdr);
1778 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1779 if (rc)
1780 return ENOMEM;

--- 7 unchanged lines hidden (view full) ---

1788 if (rc) {
1789 rc = EIO;
1790 goto dma_free;
1791 }
1792
1793 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1794 rc = EFAULT;
1795
1796 /*
1797 firmware is filling all the attributes for this ioctl except
1798 the driver version..so fill it
1799 */
1800 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1801 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1802 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1803 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1804 }
1805
1795dma_free:
1796 oce_dma_free(sc, &dma_mem);
1797 return rc;
1798
1799}
1800
1806dma_free:
1807 oce_dma_free(sc, &dma_mem);
1808 return rc;
1809
1810}
1811
1812static void
1813oce_eqd_set_periodic(POCE_SOFTC sc)
1814{
1815 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1816 struct oce_aic_obj *aic;
1817 struct oce_eq *eqo;
1818 uint64_t now = 0, delta;
1819 int eqd, i, num = 0;
1820 uint32_t ips = 0;
1821 int tps;
1801
1822
1823 for (i = 0 ; i < sc->neqs; i++) {
1824 eqo = sc->eq[i];
1825 aic = &sc->aic_obj[i];
1826 /* When setting the static eq delay from the user space */
1827 if (!aic->enable) {
1828 eqd = aic->et_eqd;
1829 goto modify_eqd;
1830 }
1831
1832 now = ticks;
1833
1834 /* Over flow check */
1835 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1836 goto done;
1837
1838 delta = now - aic->ticks;
1839 tps = delta/hz;
1840
1841 /* Interrupt rate based on elapsed ticks */
1842 if(tps)
1843 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1844
1845 if (ips > INTR_RATE_HWM)
1846 eqd = aic->cur_eqd + 20;
1847 else if (ips < INTR_RATE_LWM)
1848 eqd = aic->cur_eqd / 2;
1849 else
1850 goto done;
1851
1852 if (eqd < 10)
1853 eqd = 0;
1854
1855 /* Make sure that the eq delay is in the known range */
1856 eqd = min(eqd, aic->max_eqd);
1857 eqd = max(eqd, aic->min_eqd);
1858
1859modify_eqd:
1860 if (eqd != aic->cur_eqd) {
1861 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1862 set_eqd[num].eq_id = eqo->eq_id;
1863 aic->cur_eqd = eqd;
1864 num++;
1865 }
1866done:
1867 aic->intr_prev = eqo->intr;
1868 aic->ticks = now;
1869 }
1870
1871 /* Is there atleast one eq that needs to be modified? */
1872 if(num)
1873 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1874
1875}
1876
1802static void
1803oce_local_timer(void *arg)
1804{
1805 POCE_SOFTC sc = arg;
1806 int i = 0;
1807
1808 oce_refresh_nic_stats(sc);
1809 oce_refresh_queue_stats(sc);
1810 oce_mac_addr_set(sc);
1811
1812 /* TX Watch Dog*/
1813 for (i = 0; i < sc->nwqs; i++)
1814 oce_tx_restart(sc, sc->wq[i]);
1815
1877static void
1878oce_local_timer(void *arg)
1879{
1880 POCE_SOFTC sc = arg;
1881 int i = 0;
1882
1883 oce_refresh_nic_stats(sc);
1884 oce_refresh_queue_stats(sc);
1885 oce_mac_addr_set(sc);
1886
1887 /* TX Watch Dog*/
1888 for (i = 0; i < sc->nwqs; i++)
1889 oce_tx_restart(sc, sc->wq[i]);
1890
1891 /* calculate and set the eq delay for optimal interrupt rate */
1892 if (IS_BE(sc))
1893 oce_eqd_set_periodic(sc);
1894
1816 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1817}
1818
1819
1820/* NOTE : This should only be called holding
1821 * DEVICE_LOCK.
1822*/
1823static void

--- 20 unchanged lines hidden (view full) ---

1844 mtime += 1;
1845 if (!wait_req)
1846 break;
1847 }
1848
1849 /* Stop intrs and finish any bottom halves pending */
1850 oce_hw_intr_disable(sc);
1851
1895 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1896}
1897
1898
1899/* NOTE : This should only be called holding
1900 * DEVICE_LOCK.
1901*/
1902static void

--- 20 unchanged lines hidden (view full) ---

1923 mtime += 1;
1924 if (!wait_req)
1925 break;
1926 }
1927
1928 /* Stop intrs and finish any bottom halves pending */
1929 oce_hw_intr_disable(sc);
1930
1852 /* Since taskqueue_drain takes a Giant Lock, We should not acquire
1853 any other lock. So unlock device lock and require after
1854 completing taskqueue_drain.
1855 */
1856 UNLOCK(&sc->dev_lock);
1931 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1932 any other lock. So unlock device lock and require after
1933 completing taskqueue_drain.
1934 */
1935 UNLOCK(&sc->dev_lock);
1857 for (i = 0; i < sc->intr_count; i++) {
1858 if (sc->intrs[i].tq != NULL) {
1859 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1860 }
1861 }
1936 for (i = 0; i < sc->intr_count; i++) {
1937 if (sc->intrs[i].tq != NULL) {
1938 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1939 }
1940 }
1862 LOCK(&sc->dev_lock);
1941 LOCK(&sc->dev_lock);
1863
1864 /* Delete RX queue in card with flush param */
1865 oce_stop_rx(sc);
1866
1867 /* Invalidate any pending cq and eq entries*/
1868 for_all_evnt_queues(sc, eq, i)
1869 oce_drain_eq(eq);
1870 for_all_rq_queues(sc, rq, i)
1871 oce_drain_rq_cq(rq);
1872 for_all_wq_queues(sc, wq, i)
1873 oce_drain_wq_cq(wq);
1874
1875 /* But still we need to get MCC aync events.
1876 So enable intrs and also arm first EQ
1942
1943 /* Delete RX queue in card with flush param */
1944 oce_stop_rx(sc);
1945
1946 /* Invalidate any pending cq and eq entries*/
1947 for_all_evnt_queues(sc, eq, i)
1948 oce_drain_eq(eq);
1949 for_all_rq_queues(sc, rq, i)
1950 oce_drain_rq_cq(rq);
1951 for_all_wq_queues(sc, wq, i)
1952 oce_drain_wq_cq(wq);
1953
1954 /* But still we need to get MCC aync events.
1955 So enable intrs and also arm first EQ
1877 */
1956 */
1878 oce_hw_intr_enable(sc);
1879 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1880
1881 DELAY(10);
1882}
1883
1884
1885static void

--- 56 unchanged lines hidden (view full) ---

1942{
1943 struct oce_mq *mq = (struct oce_mq *)arg;
1944 POCE_SOFTC sc = mq->parent;
1945 struct oce_cq *cq = mq->cq;
1946 int num_cqes = 0, evt_type = 0, optype = 0;
1947 struct oce_mq_cqe *cqe;
1948 struct oce_async_cqe_link_state *acqe;
1949 struct oce_async_event_grp5_pvid_state *gcqe;
1957 oce_hw_intr_enable(sc);
1958 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1959
1960 DELAY(10);
1961}
1962
1963
1964static void

--- 56 unchanged lines hidden (view full) ---

2021{
2022 struct oce_mq *mq = (struct oce_mq *)arg;
2023 POCE_SOFTC sc = mq->parent;
2024 struct oce_cq *cq = mq->cq;
2025 int num_cqes = 0, evt_type = 0, optype = 0;
2026 struct oce_mq_cqe *cqe;
2027 struct oce_async_cqe_link_state *acqe;
2028 struct oce_async_event_grp5_pvid_state *gcqe;
2029 struct oce_async_event_qnq *dbgcqe;
1950
1951
1952 bus_dmamap_sync(cq->ring->dma.tag,
1953 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1954 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1955
1956 while (cqe->u0.dw[3]) {
1957 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));

--- 10 unchanged lines hidden (view full) ---

1968 gcqe =
1969 (struct oce_async_event_grp5_pvid_state *)cqe;
1970 if (gcqe->enabled)
1971 sc->pvid = gcqe->tag & VLAN_VID_MASK;
1972 else
1973 sc->pvid = 0;
1974
1975 }
2030
2031
2032 bus_dmamap_sync(cq->ring->dma.tag,
2033 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2034 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2035
2036 while (cqe->u0.dw[3]) {
2037 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));

--- 10 unchanged lines hidden (view full) ---

2048 gcqe =
2049 (struct oce_async_event_grp5_pvid_state *)cqe;
2050 if (gcqe->enabled)
2051 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2052 else
2053 sc->pvid = 0;
2054
2055 }
2056 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2057 optype == ASYNC_EVENT_DEBUG_QNQ) {
2058 dbgcqe =
2059 (struct oce_async_event_qnq *)cqe;
2060 if(dbgcqe->valid)
2061 sc->qnqid = dbgcqe->vlan_tag;
2062 sc->qnq_debug_event = TRUE;
2063 }
1976 }
1977 cqe->u0.dw[3] = 0;
1978 RING_GET(cq->ring, 1);
1979 bus_dmamap_sync(cq->ring->dma.tag,
1980 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1981 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1982 num_cqes++;
1983 }

--- 43 unchanged lines hidden (view full) ---

2027 sc->nrqs = sc->intr_count + 1;
2028 sc->nwqs = sc->intr_count;
2029 } else {
2030 sc->nrqs = 1;
2031 sc->nwqs = 1;
2032 }
2033}
2034
2064 }
2065 cqe->u0.dw[3] = 0;
2066 RING_GET(cq->ring, 1);
2067 bus_dmamap_sync(cq->ring->dma.tag,
2068 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2069 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2070 num_cqes++;
2071 }

--- 43 unchanged lines hidden (view full) ---

2115 sc->nrqs = sc->intr_count + 1;
2116 sc->nwqs = sc->intr_count;
2117 } else {
2118 sc->nrqs = 1;
2119 sc->nwqs = 1;
2120 }
2121}
2122
2123static int
2124oce_check_ipv6_ext_hdr(struct mbuf *m)
2125{
2126 struct ether_header *eh = mtod(m, struct ether_header *);
2127 caddr_t m_datatemp = m->m_data;
2128
2129 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2130 m->m_data += sizeof(struct ether_header);
2131 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2132
2133 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2134 (ip6->ip6_nxt != IPPROTO_UDP)){
2135 struct ip6_ext *ip6e = NULL;
2136 m->m_data += sizeof(struct ip6_hdr);
2137
2138 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2139 if(ip6e->ip6e_len == 0xff) {
2140 m->m_data = m_datatemp;
2141 return TRUE;
2142 }
2143 }
2144 m->m_data = m_datatemp;
2145 }
2146 return FALSE;
2147}
2148
2149static int
2150is_be3_a1(POCE_SOFTC sc)
2151{
2152 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2153 return TRUE;
2154 }
2155 return FALSE;
2156}
2157
2158static struct mbuf *
2159oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2160{
2161 uint16_t vlan_tag = 0;
2162
2163 if(!M_WRITABLE(m))
2164 return NULL;
2165
2166 /* Embed vlan tag in the packet if it is not part of it */
2167 if(m->m_flags & M_VLANTAG) {
2168 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2169 m->m_flags &= ~M_VLANTAG;
2170 }
2171
2172 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2173 if(sc->pvid) {
2174 if(!vlan_tag)
2175 vlan_tag = sc->pvid;
2176 *complete = FALSE;
2177 }
2178
2179 if(vlan_tag) {
2180 m = ether_vlanencap(m, vlan_tag);
2181 }
2182
2183 if(sc->qnqid) {
2184 m = ether_vlanencap(m, sc->qnqid);
2185 *complete = FALSE;
2186 }
2187 return m;
2188}
2189
2190static int
2191oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2192{
2193 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2194 oce_check_ipv6_ext_hdr(m)) {
2195 return TRUE;
2196 }
2197 return FALSE;
2198}