Deleted Added
sdiff udiff text old ( 231511 ) new ( 231879 )
full compact
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,

--- 22 unchanged lines hidden (view full) ---

31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 231511 2012-02-11 08:33:52Z bz $ */
41
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
47
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);

--- 16 unchanged lines hidden (view full) ---

66static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67static int oce_media_change(struct ifnet *ifp);
68
69/* Transmit routines prototypes */
70static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73 uint32_t status);
74#if defined(INET6) || defined(INET)
75static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp,
76 uint16_t *mss);
77#endif
78static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
79 struct oce_wq *wq);
80
81/* Receive routines prototypes */
82static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
84static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85#if defined(INET6) || defined(INET)
86static void oce_rx_flush_lro(struct oce_rq *rq);
87#endif
88static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
89 struct oce_nic_rx_cqe *cqe);
90
91/* Helper function prototypes in this file */
92static int oce_attach_ifp(POCE_SOFTC sc);
93static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
95static int oce_vid_config(POCE_SOFTC sc);
96static void oce_mac_addr_set(POCE_SOFTC sc);
97static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
98static void oce_local_timer(void *arg);
99#if defined(INET6) || defined(INET)
100static int oce_init_lro(POCE_SOFTC sc);
101#endif
102static void oce_if_deactivate(POCE_SOFTC sc);
103static void oce_if_activate(POCE_SOFTC sc);
104static void setup_max_queues_want(POCE_SOFTC sc);
105static void update_queues_got(POCE_SOFTC sc);
106
107static device_method_t oce_dispatch[] = {
108 DEVMETHOD(device_probe, oce_probe),
109 DEVMETHOD(device_attach, oce_attach),
110 DEVMETHOD(device_detach, oce_detach),
111 DEVMETHOD(device_shutdown, oce_shutdown),
112 {0, 0}
113};
114

--- 37 unchanged lines hidden (view full) ---

152
153/*****************************************************************************
154 * Driver entry points functions *
155 *****************************************************************************/
156
157static int
158oce_probe(device_t dev)
159{
160 uint16_t vendor;
161 uint16_t device;
162 int i;
163 char str[80];
164 POCE_SOFTC sc;
165
166 sc = device_get_softc(dev);
167 bzero(sc, sizeof(OCE_SOFTC));
168 sc->dev = dev;
169
170 vendor = pci_get_vendor(dev);
171 device = pci_get_device(dev);
172
173 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint16_t)); i++) {
174 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175 if (device == (supportedDevices[i] & 0xffff)) {
176 sprintf(str, "%s:%s",
177 "Emulex CNA NIC function",
178 component_revision);
179 device_set_desc_copy(dev, str);
180
181 switch (device) {
182 case PCI_PRODUCT_BE2:
183 sc->flags |= OCE_FLAGS_BE2;
184 break;
185 case PCI_PRODUCT_BE3:

--- 37 unchanged lines hidden (view full) ---

223 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
224 LOCK_CREATE(&sc->dev_lock, "Device_lock");
225
226 /* initialise the hardware */
227 rc = oce_hw_init(sc);
228 if (rc)
229 goto pci_res_free;
230
231
232 setup_max_queues_want(sc);
233
234
235 rc = oce_setup_intr(sc);
236 if (rc)
237 goto mbox_free;
238
239
240 rc = oce_queue_init_all(sc);
241 if (rc)
242 goto intr_free;
243
244
245 rc = oce_attach_ifp(sc);
246 if (rc)
247 goto queues_free;
248
249
250#if defined(INET6) || defined(INET)
251 rc = oce_init_lro(sc);
252 if (rc)
253 goto ifp_free;
254#endif
255
256
257 rc = oce_hw_start(sc);
258 if (rc)
259 goto lro_free;;
260
261
262 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
266
267 rc = oce_stats_init(sc);
268 if (rc)
269 goto vlan_free;
270
271 oce_add_sysctls(sc);
272
273
274 callout_init(&sc->timer, CALLOUT_MPSAFE);
275 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
276 if (rc)
277 goto stats_free;
278
279 return 0;
280
281stats_free:
282 callout_drain(&sc->timer);
283 oce_stats_free(sc);
284vlan_free:
285 if (sc->vlan_attach)

--- 24 unchanged lines hidden (view full) ---

310
311
312static int
313oce_detach(device_t dev)
314{
315 POCE_SOFTC sc = device_get_softc(dev);
316
317 LOCK(&sc->dev_lock);
318
319 oce_if_deactivate(sc);
320
321 UNLOCK(&sc->dev_lock);
322
323 callout_drain(&sc->timer);
324
325 if (sc->vlan_attach != NULL)
326 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
327 if (sc->vlan_detach != NULL)
328 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);

--- 25 unchanged lines hidden (view full) ---

354oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
355{
356 struct ifreq *ifr = (struct ifreq *)data;
357 POCE_SOFTC sc = ifp->if_softc;
358 int rc = 0;
359 uint32_t u;
360
361 switch (command) {
362 case SIOCGIFPSRCADDR_IN6:
363 rc = ether_ioctl(ifp, command, data);
364 break;
365
366 case SIOCGIFPSRCADDR:
367 rc = ether_ioctl(ifp, command, data);
368 break;
369
370 case SIOCGIFSTATUS:
371 rc = ether_ioctl(ifp, command, data);
372 break;
373
374 case SIOCGIFMEDIA:
375 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376 break;
377
378 case SIOCSIFMEDIA:
379 rc = ether_ioctl(ifp, command, data);
380 break;
381
382 case SIOCGIFGENERIC:
383 rc = ether_ioctl(ifp, command, data);
384 break;
385
386 case SIOCGETMIFCNT_IN6:
387 rc = ether_ioctl(ifp, command, data);
388 break;
389
390 case SIOCSIFMTU:
391 if (ifr->ifr_mtu > OCE_MAX_MTU)
392 rc = EINVAL;
393 else
394 ifp->if_mtu = ifr->ifr_mtu;
395 break;
396
397 case SIOCSIFFLAGS:

--- 71 unchanged lines hidden (view full) ---

469
470 if (u & IFCAP_VLAN_HWTAGGING)
471 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
472
473 if (u & IFCAP_VLAN_HWFILTER) {
474 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
475 oce_vid_config(sc);
476 }
477
478#if defined(INET6) || defined(INET)
479 if (u & IFCAP_LRO)
480 ifp->if_capenable ^= IFCAP_LRO;
481#endif
482
483 break;
484
485 case SIOCGPRIVATE_0:

--- 322 unchanged lines hidden (view full) ---

808 struct mbuf *m, *m_temp;
809 struct oce_wq *wq = sc->wq[wq_index];
810 struct oce_packet_desc *pd;
811 uint32_t out;
812 struct oce_nic_hdr_wqe *nichdr;
813 struct oce_nic_frag_wqe *nicfrag;
814 int num_wqes;
815 uint32_t reg_value;
816#if defined(INET6) || defined(INET)
817 uint16_t mss = 0;
818#endif
819
820 m = *mpp;
821 if (!m)
822 return EINVAL;
823
824 if (!(m->m_flags & M_PKTHDR)) {
825 rc = ENXIO;
826 goto free_ret;
827 }
828
829 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
830#if defined(INET6) || defined(INET)
831 /* consolidate packet buffers for TSO/LSO segment offload */
832 m = oce_tso_setup(sc, mpp, &mss);
833#else
834 m = NULL;
835#endif
836 if (m == NULL) {
837 rc = ENXIO;
838 goto free_ret;
839 }
840 }

--- 166 unchanged lines hidden (view full) ---

1007 if (!drbr_empty(sc->ifp, wq->br))
1008#else
1009 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1010#endif
1011 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1012
1013}
1014
1015#if defined(INET6) || defined(INET)
1016static struct mbuf *
1017oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp, uint16_t *mss)
1018{
1019 struct mbuf *m;
1020#ifdef INET
1021 struct ip *ip;
1022#endif
1023#ifdef INET6
1024 struct ip6_hdr *ip6;
1025#endif
1026 struct ether_vlan_header *eh;
1027 struct tcphdr *th;
1028 int total_len = 0;
1029 uint16_t etype;
1030 int ehdrlen = 0;
1031
1032 m = *mpp;
1033 *mss = m->m_pkthdr.tso_segsz;
1034
1035 if (M_WRITABLE(m) == 0) {
1036 m = m_dup(*mpp, M_DONTWAIT);
1037 if (!m)
1038 return NULL;
1039 m_freem(*mpp);
1040 *mpp = m;
1041 }
1042
1043 eh = mtod(m, struct ether_vlan_header *);
1044 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1045 etype = ntohs(eh->evl_proto);
1046 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1047 } else {
1048 etype = ntohs(eh->evl_encap_proto);
1049 ehdrlen = ETHER_HDR_LEN;
1050 }
1051
1052
1053 switch (etype) {
1054#ifdef INET
1055 case ETHERTYPE_IP:
1056 ip = (struct ip *)(m->m_data + ehdrlen);
1057 if (ip->ip_p != IPPROTO_TCP)
1058 return NULL;
1059 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1060

--- 18 unchanged lines hidden (view full) ---

1079 if (!m)
1080 return NULL;
1081 *mpp = m;
1082 return m;
1083
1084}
1085#endif /* INET6 || INET */
1086
1087
1088void
1089oce_tx_task(void *arg, int npending)
1090{
1091 struct oce_wq *wq = arg;
1092 POCE_SOFTC sc = wq->parent;
1093 struct ifnet *ifp = sc->ifp;
1094 int rc = 0;
1095

--- 14 unchanged lines hidden (view full) ---

1110
1111
1112void
1113oce_start(struct ifnet *ifp)
1114{
1115 POCE_SOFTC sc = ifp->if_softc;
1116 struct mbuf *m;
1117 int rc = 0;
1118
1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120 IFF_DRV_RUNNING)
1121 return;
1122
1123 do {
1124 IF_DEQUEUE(&sc->ifp->if_snd, m);
1125 if (m == NULL)
1126 break;
1127 /* oce_start always uses default TX queue 0 */
1128 LOCK(&sc->wq[0]->tx_lock);
1129 rc = oce_tx(sc, &m, 0);
1130 UNLOCK(&sc->wq[0]->tx_lock);
1131 if (rc) {
1132 if (m != NULL) {
1133 sc->wq[0]->tx_stats.tx_stops ++;
1134 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1135 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1136 m = NULL;
1137 }
1138 break;
1139 }
1140 if (m != NULL)
1141 ETHER_BPF_MTAP(ifp, m);
1142
1143 } while (1);
1144
1145 return;
1146}
1147
1148
1149/* Handle the Completion Queue for transmit */
1150uint16_t
1151oce_wq_handler(void *arg)

--- 91 unchanged lines hidden (view full) ---

1243 uint32_t out;
1244 struct oce_packet_desc *pd;
1245 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1246 int i, len, frag_len;
1247 struct mbuf *m = NULL, *tail = NULL;
1248 uint16_t vtag;
1249
1250 len = cqe->u0.s.pkt_size;
1251 vtag = cqe->u0.s.vlan_tag;
1252 if (!len) {
1253 /*partial DMA workaround for Lancer*/
1254 oce_discard_rx_comp(rq, cqe);
1255 goto exit;
1256 }
1257
1258 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1259
1260 if (rq->packets_out == rq->packets_in) {
1261 device_printf(sc->dev,
1262 "RQ transmit descriptor missing\n");
1263 }
1264 out = rq->packets_out + 1;
1265 if (out == OCE_RQ_PACKET_ARRAY_SIZE)

--- 19 unchanged lines hidden (view full) ---

1285 pd->mbuf->m_pkthdr.csum_flags = 0;
1286 if (IF_CSUM_ENABLED(sc)) {
1287 if (cqe->u0.s.l4_cksum_pass) {
1288 pd->mbuf->m_pkthdr.csum_flags |=
1289 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1290 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1291 }
1292 if (cqe->u0.s.ip_cksum_pass) {
1293 if (!cqe->u0.s.ip_ver) { //IPV4
1294 pd->mbuf->m_pkthdr.csum_flags |=
1295 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1296 }
1297 }
1298 }
1299 m = tail = pd->mbuf;
1300 }
1301 pd->mbuf = NULL;

--- 6 unchanged lines hidden (view full) ---

1308 goto exit;
1309 }
1310
1311 m->m_pkthdr.rcvif = sc->ifp;
1312#if __FreeBSD_version >= 800000
1313 m->m_pkthdr.flowid = rq->queue_index;
1314 m->m_flags |= M_FLOWID;
1315#endif
1316 //This deternies if vlan tag is present
1317 if (oce_cqe_vtp_valid(sc, cqe)) {
1318 if (sc->function_mode & FNM_FLEX10_MODE) {
1319 /* FLEX10 */
1320 if (cqe->u0.s.qnq) {
1321 /* If QnQ is not set, neglect VLAN */
1322 if (IS_BE(sc))
1323 m->m_pkthdr.ether_vtag =
1324 BSWAP_16(vtag);
1325 else
1326 m->m_pkthdr.ether_vtag = vtag;
1327 m->m_flags |= M_VLANTAG;
1328 }
1329 } else {
1330 if (IS_BE(sc))
1331 m->m_pkthdr.ether_vtag = BSWAP_16(vtag);
1332 else
1333 m->m_pkthdr.ether_vtag = vtag;
1334 m->m_flags |= M_VLANTAG;
1335 }
1336 }
1337
1338 sc->ifp->if_ipackets++;
1339#if defined(INET6) || defined(INET)
1340 /* Try to queue to LRO */
1341 if (IF_LRO_ENABLED(sc) &&

--- 68 unchanged lines hidden (view full) ---

1410oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1411{
1412 struct oce_nic_rx_cqe_v1 *cqe_v1;
1413 int vtp = 0;
1414
1415 if (sc->be3_native) {
1416 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1417 vtp = cqe_v1->u0.s.vlan_tag_present;
1418 } else {
1419 vtp = cqe->u0.s.vlan_tag_present;
1420 }
1421
1422 return vtp;
1423
1424}
1425
1426
1427static int
1428oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)

--- 8 unchanged lines hidden (view full) ---

1437 return 0;
1438 } else
1439 ;/* For BE3 legacy and Lancer this is dummy */
1440
1441 return 1;
1442
1443}
1444
1445
1446#if defined(INET6) || defined(INET)
1447static void
1448oce_rx_flush_lro(struct oce_rq *rq)
1449{
1450 struct lro_ctrl *lro = &rq->lro;
1451 struct lro_entry *queued;
1452 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1453

--- 23 unchanged lines hidden (view full) ---

1477 device_printf(sc->dev, "LRO init failed\n");
1478 return rc;
1479 }
1480 lro->ifp = sc->ifp;
1481 }
1482
1483 return rc;
1484}
1485#endif /* INET6 || INET */
1486
1487void
1488oce_free_lro(POCE_SOFTC sc)
1489{
1490#if defined(INET6) || defined(INET)
1491 struct lro_ctrl *lro = NULL;
1492 int i = 0;
1493
1494 for (i = 0; i < sc->nrqs; i++) {
1495 lro = &sc->rq[i]->lro;
1496 if (lro)
1497 tcp_lro_free(lro);
1498 }
1499#endif
1500}
1501
1502
1503int
1504oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1505{
1506 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1507 int i, in, rc;
1508 struct oce_packet_desc *pd;
1509 bus_dma_segment_t segs[6];
1510 int nsegs, added = 0;

--- 104 unchanged lines hidden (view full) ---

1615 bus_dmamap_sync(cq->ring->dma.tag,
1616 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1617 cqe =
1618 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1619 num_cqes++;
1620 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1621 break;
1622 }
1623#if defined(INET6) || defined(INET)
1624 if (IF_LRO_ENABLED(sc))
1625 oce_rx_flush_lro(rq);
1626#endif
1627
1628 if (num_cqes) {
1629 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1630 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;

--- 46 unchanged lines hidden (view full) ---

1677
1678 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1679 sc->ifp->if_hwassist |= CSUM_TSO;
1680 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1681
1682 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1683 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1684 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1685#if defined(INET6) || defined(INET)
1686 sc->ifp->if_capabilities |= IFCAP_TSO;
1687 sc->ifp->if_capabilities |= IFCAP_LRO;
1688#endif
1689
1690 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1691 sc->ifp->if_baudrate = IF_Gbps(10UL);
1692
1693 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1694
1695 return 0;

--- 243 unchanged lines hidden (view full) ---

1939
1940 for_all_evnt_queues(sc, eq, i)
1941 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1942
1943 oce_hw_intr_enable(sc);
1944
1945}
1946
1947/* Handle the Completion Queue for the Mailbox/Async notifications */
1948uint16_t
1949oce_mq_handler(void *arg)
1950{
1951 struct oce_mq *mq = (struct oce_mq *)arg;
1952 POCE_SOFTC sc = mq->parent;
1953 struct oce_cq *cq = mq->cq;
1954 int num_cqes = 0;
1955 struct oce_mq_cqe *cqe;
1956 struct oce_async_cqe_link_state *acqe;
1957
1958 bus_dmamap_sync(cq->ring->dma.tag,
1959 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1960 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1961 while (cqe->u0.dw[3]) {
1962 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1963 if (cqe->u0.s.async_event) {
1964 acqe = (struct oce_async_cqe_link_state *)cqe;
1965 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1966 ASYNC_EVENT_LINK_UP) {
1967 sc->link_status = ASYNC_EVENT_LINK_UP;
1968 if_link_state_change(sc->ifp, LINK_STATE_UP);
1969 } else {
1970 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1971 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1972 }
1973
1974 if (acqe->u0.s.event_code ==
1975 ASYNC_EVENT_CODE_LINK_STATE) {
1976 sc->link_speed = acqe->u0.s.speed;
1977 sc->qos_link_speed =
1978 (uint32_t )acqe->u0.s.qos_link_speed * 10;
1979 }
1980 }
1981 cqe->u0.dw[3] = 0;
1982 RING_GET(cq->ring, 1);
1983 RING_GET(mq->ring, 1);
1984 bus_dmamap_sync(cq->ring->dma.tag,
1985 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1986 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1987 num_cqes++;
1988 }
1989
1990 if (num_cqes)
1991 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);

--- 4 unchanged lines hidden (view full) ---

1996
1997static void
1998setup_max_queues_want(POCE_SOFTC sc)
1999{
2000 int max_rss = 0;
2001
2002 /* Check if it is FLEX machine. Is so dont use RSS */
2003 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2004 (!sc->rss_enable) ||
2005 (sc->flags & OCE_FLAGS_BE2)) {
2006 sc->nrqs = 1;
2007 sc->nwqs = 1;
2008 sc->rss_enable = 0;
2009 } else {
2010 /* For multiq, our deisgn is to have TX rings equal to
2011 RSS rings. So that we can pair up one RSS ring and TX
2012 to a single intr, which improves CPU cache efficiency.
2013 */
2014 if (IS_BE(sc) && (!sc->be3_native))
2015 max_rss = OCE_LEGACY_MODE_RSS;
2016 else
2017 max_rss = OCE_MAX_RSS;
2018
2019 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2020 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2021
2022 /*Hardware issue. Turn off multi TX for be2 */
2023 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2024 sc->nwqs = 1;
2025
2026 }
2027
2028}
2029
2030
2031static void
2032update_queues_got(POCE_SOFTC sc)
2033{
2034 if (sc->rss_enable) {
2035 sc->nrqs = sc->intr_count + 1;
2036 sc->nwqs = sc->intr_count;
2037 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2038 sc->nwqs = 1;
2039 } else {
2040 sc->nrqs = 1;
2041 sc->nwqs = 1;
2042 }
2043}
2044