Deleted Added
sdiff udiff text old ( 231511 ) new ( 231879 )
full compact
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,

--- 22 unchanged lines hidden (view full) ---

31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 231879 2012-02-17 13:55:17Z luigi $ */
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46
47/* Driver entry points prototypes */
48static int oce_probe(device_t dev);

--- 16 unchanged lines hidden (view full) ---

65static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66static int oce_media_change(struct ifnet *ifp);
67
68/* Transmit routines prototypes */
69static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72 uint32_t status);
73static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74 struct oce_wq *wq);
75
76/* Receive routines prototypes */
77static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81 struct oce_nic_rx_cqe *cqe);
82
83/* Helper function prototypes in this file */
84static int oce_attach_ifp(POCE_SOFTC sc);
85static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static int oce_vid_config(POCE_SOFTC sc);
88static void oce_mac_addr_set(POCE_SOFTC sc);
89static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90static void oce_local_timer(void *arg);
91static void oce_if_deactivate(POCE_SOFTC sc);
92static void oce_if_activate(POCE_SOFTC sc);
93static void setup_max_queues_want(POCE_SOFTC sc);
94static void update_queues_got(POCE_SOFTC sc);
95static void process_link_state(POCE_SOFTC sc,
96 struct oce_async_cqe_link_state *acqe);
97
98
99/* IP specific */
100#if defined(INET6) || defined(INET)
101static int oce_init_lro(POCE_SOFTC sc);
102static void oce_rx_flush_lro(struct oce_rq *rq);
103static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104#endif
105
106static device_method_t oce_dispatch[] = {
107 DEVMETHOD(device_probe, oce_probe),
108 DEVMETHOD(device_attach, oce_attach),
109 DEVMETHOD(device_detach, oce_detach),
110 DEVMETHOD(device_shutdown, oce_shutdown),
111 {0, 0}
112};
113

--- 37 unchanged lines hidden (view full) ---

151
152/*****************************************************************************
153 * Driver entry points functions *
154 *****************************************************************************/
155
156static int
157oce_probe(device_t dev)
158{
159 uint16_t vendor = 0;
160 uint16_t device = 0;
161 int i = 0;
162 char str[256] = {0};
163 POCE_SOFTC sc;
164
165 sc = device_get_softc(dev);
166 bzero(sc, sizeof(OCE_SOFTC));
167 sc->dev = dev;
168
169 vendor = pci_get_vendor(dev);
170 device = pci_get_device(dev);
171
172 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
173 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
174 if (device == (supportedDevices[i] & 0xffff)) {
175 sprintf(str, "%s:%s", "Emulex CNA NIC function",
176 component_revision);
177 device_set_desc_copy(dev, str);
178
179 switch (device) {
180 case PCI_PRODUCT_BE2:
181 sc->flags |= OCE_FLAGS_BE2;
182 break;
183 case PCI_PRODUCT_BE3:

--- 37 unchanged lines hidden (view full) ---

221 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
222 LOCK_CREATE(&sc->dev_lock, "Device_lock");
223
224 /* initialise the hardware */
225 rc = oce_hw_init(sc);
226 if (rc)
227 goto pci_res_free;
228
229 setup_max_queues_want(sc);
230
231 rc = oce_setup_intr(sc);
232 if (rc)
233 goto mbox_free;
234
235 rc = oce_queue_init_all(sc);
236 if (rc)
237 goto intr_free;
238
239 rc = oce_attach_ifp(sc);
240 if (rc)
241 goto queues_free;
242
243#if defined(INET6) || defined(INET)
244 rc = oce_init_lro(sc);
245 if (rc)
246 goto ifp_free;
247#endif
248
249 rc = oce_hw_start(sc);
250 if (rc)
251 goto lro_free;;
252
253 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
254 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
255 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
256 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
257
258 rc = oce_stats_init(sc);
259 if (rc)
260 goto vlan_free;
261
262 oce_add_sysctls(sc);
263
264 callout_init(&sc->timer, CALLOUT_MPSAFE);
265 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
266 if (rc)
267 goto stats_free;
268#ifdef DEV_NETMAP
269#endif /* DEV_NETMAP */
270
271 return 0;
272
273stats_free:
274 callout_drain(&sc->timer);
275 oce_stats_free(sc);
276vlan_free:
277 if (sc->vlan_attach)

--- 24 unchanged lines hidden (view full) ---

302
303
304static int
305oce_detach(device_t dev)
306{
307 POCE_SOFTC sc = device_get_softc(dev);
308
309 LOCK(&sc->dev_lock);
310 oce_if_deactivate(sc);
311 UNLOCK(&sc->dev_lock);
312
313 callout_drain(&sc->timer);
314
315 if (sc->vlan_attach != NULL)
316 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
317 if (sc->vlan_detach != NULL)
318 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);

--- 25 unchanged lines hidden (view full) ---

344oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
345{
346 struct ifreq *ifr = (struct ifreq *)data;
347 POCE_SOFTC sc = ifp->if_softc;
348 int rc = 0;
349 uint32_t u;
350
351 switch (command) {
352
353 case SIOCGIFMEDIA:
354 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
355 break;
356
357 case SIOCSIFMTU:
358 if (ifr->ifr_mtu > OCE_MAX_MTU)
359 rc = EINVAL;
360 else
361 ifp->if_mtu = ifr->ifr_mtu;
362 break;
363
364 case SIOCSIFFLAGS:

--- 71 unchanged lines hidden (view full) ---

436
437 if (u & IFCAP_VLAN_HWTAGGING)
438 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
439
440 if (u & IFCAP_VLAN_HWFILTER) {
441 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
442 oce_vid_config(sc);
443 }
444#if defined(INET6) || defined(INET)
445 if (u & IFCAP_LRO)
446 ifp->if_capenable ^= IFCAP_LRO;
447#endif
448
449 break;
450
451 case SIOCGPRIVATE_0:

--- 322 unchanged lines hidden (view full) ---

774 struct mbuf *m, *m_temp;
775 struct oce_wq *wq = sc->wq[wq_index];
776 struct oce_packet_desc *pd;
777 uint32_t out;
778 struct oce_nic_hdr_wqe *nichdr;
779 struct oce_nic_frag_wqe *nicfrag;
780 int num_wqes;
781 uint32_t reg_value;
782
783 m = *mpp;
784 if (!m)
785 return EINVAL;
786
787 if (!(m->m_flags & M_PKTHDR)) {
788 rc = ENXIO;
789 goto free_ret;
790 }
791
792 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
793 /* consolidate packet buffers for TSO/LSO segment offload */
794#if defined(INET6) || defined(INET)
795 m = oce_tso_setup(sc, mpp);
796#else
797 m = NULL;
798#endif
799 if (m == NULL) {
800 rc = ENXIO;
801 goto free_ret;
802 }
803 }

--- 166 unchanged lines hidden (view full) ---

970 if (!drbr_empty(sc->ifp, wq->br))
971#else
972 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
973#endif
974 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
975
976}
977
978
979#if defined(INET6) || defined(INET)
980static struct mbuf *
981oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
982{
983 struct mbuf *m;
984#ifdef INET
985 struct ip *ip;
986#endif
987#ifdef INET6
988 struct ip6_hdr *ip6;
989#endif
990 struct ether_vlan_header *eh;
991 struct tcphdr *th;
992 uint16_t etype;
993 int total_len = 0, ehdrlen = 0;
994
995 m = *mpp;
996
997 if (M_WRITABLE(m) == 0) {
998 m = m_dup(*mpp, M_DONTWAIT);
999 if (!m)
1000 return NULL;
1001 m_freem(*mpp);
1002 *mpp = m;
1003 }
1004
1005 eh = mtod(m, struct ether_vlan_header *);
1006 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1007 etype = ntohs(eh->evl_proto);
1008 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1009 } else {
1010 etype = ntohs(eh->evl_encap_proto);
1011 ehdrlen = ETHER_HDR_LEN;
1012 }
1013
1014 switch (etype) {
1015#ifdef INET
1016 case ETHERTYPE_IP:
1017 ip = (struct ip *)(m->m_data + ehdrlen);
1018 if (ip->ip_p != IPPROTO_TCP)
1019 return NULL;
1020 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1021

--- 18 unchanged lines hidden (view full) ---

1040 if (!m)
1041 return NULL;
1042 *mpp = m;
1043 return m;
1044
1045}
1046#endif /* INET6 || INET */
1047
1048void
1049oce_tx_task(void *arg, int npending)
1050{
1051 struct oce_wq *wq = arg;
1052 POCE_SOFTC sc = wq->parent;
1053 struct ifnet *ifp = sc->ifp;
1054 int rc = 0;
1055

--- 14 unchanged lines hidden (view full) ---

1070
1071
1072void
1073oce_start(struct ifnet *ifp)
1074{
1075 POCE_SOFTC sc = ifp->if_softc;
1076 struct mbuf *m;
1077 int rc = 0;
1078 int def_q = 0; /* Defualt tx queue is 0*/
1079
1080 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1081 IFF_DRV_RUNNING)
1082 return;
1083
1084 do {
1085 IF_DEQUEUE(&sc->ifp->if_snd, m);
1086 if (m == NULL)
1087 break;
1088
1089 LOCK(&sc->wq[def_q]->tx_lock);
1090 rc = oce_tx(sc, &m, def_q);
1091 UNLOCK(&sc->wq[def_q]->tx_lock);
1092 if (rc) {
1093 if (m != NULL) {
1094 sc->wq[def_q]->tx_stats.tx_stops ++;
1095 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1097 m = NULL;
1098 }
1099 break;
1100 }
1101 if (m != NULL)
1102 ETHER_BPF_MTAP(ifp, m);
1103
1104 } while (TRUE);
1105
1106 return;
1107}
1108
1109
1110/* Handle the Completion Queue for transmit */
1111uint16_t
1112oce_wq_handler(void *arg)

--- 91 unchanged lines hidden (view full) ---

1204 uint32_t out;
1205 struct oce_packet_desc *pd;
1206 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1207 int i, len, frag_len;
1208 struct mbuf *m = NULL, *tail = NULL;
1209 uint16_t vtag;
1210
1211 len = cqe->u0.s.pkt_size;
1212 if (!len) {
1213 /*partial DMA workaround for Lancer*/
1214 oce_discard_rx_comp(rq, cqe);
1215 goto exit;
1216 }
1217
1218 /* Get vlan_tag value */
1219 if(IS_BE(sc))
1220 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1221 else
1222 vtag = cqe->u0.s.vlan_tag;
1223
1224
1225 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1226
1227 if (rq->packets_out == rq->packets_in) {
1228 device_printf(sc->dev,
1229 "RQ transmit descriptor missing\n");
1230 }
1231 out = rq->packets_out + 1;
1232 if (out == OCE_RQ_PACKET_ARRAY_SIZE)

--- 19 unchanged lines hidden (view full) ---

1252 pd->mbuf->m_pkthdr.csum_flags = 0;
1253 if (IF_CSUM_ENABLED(sc)) {
1254 if (cqe->u0.s.l4_cksum_pass) {
1255 pd->mbuf->m_pkthdr.csum_flags |=
1256 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1257 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1258 }
1259 if (cqe->u0.s.ip_cksum_pass) {
1260 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1261 pd->mbuf->m_pkthdr.csum_flags |=
1262 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1263 }
1264 }
1265 }
1266 m = tail = pd->mbuf;
1267 }
1268 pd->mbuf = NULL;

--- 6 unchanged lines hidden (view full) ---

1275 goto exit;
1276 }
1277
1278 m->m_pkthdr.rcvif = sc->ifp;
1279#if __FreeBSD_version >= 800000
1280 m->m_pkthdr.flowid = rq->queue_index;
1281 m->m_flags |= M_FLOWID;
1282#endif
1283 /* This deternies if vlan tag is Valid */
1284 if (oce_cqe_vtp_valid(sc, cqe)) {
1285 if (sc->function_mode & FNM_FLEX10_MODE) {
1286 /* FLEX10. If QnQ is not set, neglect VLAN */
1287 if (cqe->u0.s.qnq) {
1288 m->m_pkthdr.ether_vtag = vtag;
1289 m->m_flags |= M_VLANTAG;
1290 }
1291 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1292 /* In UMC mode generally pvid will be striped by
1293 hw. But in some cases we have seen it comes
1294 with pvid. So if pvid == vlan, neglect vlan.
1295 */
1296 m->m_pkthdr.ether_vtag = vtag;
1297 m->m_flags |= M_VLANTAG;
1298 }
1299 }
1300
1301 sc->ifp->if_ipackets++;
1302#if defined(INET6) || defined(INET)
1303 /* Try to queue to LRO */
1304 if (IF_LRO_ENABLED(sc) &&

--- 68 unchanged lines hidden (view full) ---

1373oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1374{
1375 struct oce_nic_rx_cqe_v1 *cqe_v1;
1376 int vtp = 0;
1377
1378 if (sc->be3_native) {
1379 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1380 vtp = cqe_v1->u0.s.vlan_tag_present;
1381 } else
1382 vtp = cqe->u0.s.vlan_tag_present;
1383
1384 return vtp;
1385
1386}
1387
1388
1389static int
1390oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)

--- 8 unchanged lines hidden (view full) ---

1399 return 0;
1400 } else
1401 ;/* For BE3 legacy and Lancer this is dummy */
1402
1403 return 1;
1404
1405}
1406
1407#if defined(INET6) || defined(INET)
1408static void
1409oce_rx_flush_lro(struct oce_rq *rq)
1410{
1411 struct lro_ctrl *lro = &rq->lro;
1412 struct lro_entry *queued;
1413 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1414

--- 23 unchanged lines hidden (view full) ---

1438 device_printf(sc->dev, "LRO init failed\n");
1439 return rc;
1440 }
1441 lro->ifp = sc->ifp;
1442 }
1443
1444 return rc;
1445}
1446
1447
1448void
1449oce_free_lro(POCE_SOFTC sc)
1450{
1451 struct lro_ctrl *lro = NULL;
1452 int i = 0;
1453
1454 for (i = 0; i < sc->nrqs; i++) {
1455 lro = &sc->rq[i]->lro;
1456 if (lro)
1457 tcp_lro_free(lro);
1458 }
1459}
1460#endif /* INET6 || INET */
1461
1462int
1463oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1464{
1465 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1466 int i, in, rc;
1467 struct oce_packet_desc *pd;
1468 bus_dma_segment_t segs[6];
1469 int nsegs, added = 0;

--- 104 unchanged lines hidden (view full) ---

1574 bus_dmamap_sync(cq->ring->dma.tag,
1575 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1576 cqe =
1577 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1578 num_cqes++;
1579 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1580 break;
1581 }
1582
1583#if defined(INET6) || defined(INET)
1584 if (IF_LRO_ENABLED(sc))
1585 oce_rx_flush_lro(rq);
1586#endif
1587
1588 if (num_cqes) {
1589 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1590 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;

--- 46 unchanged lines hidden (view full) ---

1637
1638 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1639 sc->ifp->if_hwassist |= CSUM_TSO;
1640 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1641
1642 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1643 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1644 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1645
1646#if defined(INET6) || defined(INET)
1647 sc->ifp->if_capabilities |= IFCAP_TSO;
1648 sc->ifp->if_capabilities |= IFCAP_LRO;
1649 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1650#endif
1651
1652 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1653 sc->ifp->if_baudrate = IF_Gbps(10UL);
1654
1655 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1656
1657 return 0;

--- 243 unchanged lines hidden (view full) ---

1901
1902 for_all_evnt_queues(sc, eq, i)
1903 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1904
1905 oce_hw_intr_enable(sc);
1906
1907}
1908
1909static void
1910process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1911{
1912 /* Update Link status */
1913 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1914 ASYNC_EVENT_LINK_UP) {
1915 sc->link_status = ASYNC_EVENT_LINK_UP;
1916 if_link_state_change(sc->ifp, LINK_STATE_UP);
1917 } else {
1918 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1919 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1920 }
1921
1922 /* Update speed */
1923 sc->link_speed = acqe->u0.s.speed;
1924 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1925
1926}
1927
1928
1929/* Handle the Completion Queue for the Mailbox/Async notifications */
1930uint16_t
1931oce_mq_handler(void *arg)
1932{
1933 struct oce_mq *mq = (struct oce_mq *)arg;
1934 POCE_SOFTC sc = mq->parent;
1935 struct oce_cq *cq = mq->cq;
1936 int num_cqes = 0, evt_type = 0, optype = 0;
1937 struct oce_mq_cqe *cqe;
1938 struct oce_async_cqe_link_state *acqe;
1939 struct oce_async_event_grp5_pvid_state *gcqe;
1940
1941
1942 bus_dmamap_sync(cq->ring->dma.tag,
1943 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1944 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1945
1946 while (cqe->u0.dw[3]) {
1947 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1948 if (cqe->u0.s.async_event) {
1949 evt_type = cqe->u0.s.event_type;
1950 optype = cqe->u0.s.async_type;
1951 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
1952 /* Link status evt */
1953 acqe = (struct oce_async_cqe_link_state *)cqe;
1954 process_link_state(sc, acqe);
1955 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
1956 (optype == ASYNC_EVENT_PVID_STATE)) {
1957 /* GRP5 PVID */
1958 gcqe =
1959 (struct oce_async_event_grp5_pvid_state *)cqe;
1960 if (gcqe->enabled)
1961 sc->pvid = gcqe->tag & VLAN_VID_MASK;
1962 else
1963 sc->pvid = 0;
1964
1965 }
1966 }
1967 cqe->u0.dw[3] = 0;
1968 RING_GET(cq->ring, 1);
1969 bus_dmamap_sync(cq->ring->dma.tag,
1970 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1971 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1972 num_cqes++;
1973 }
1974
1975 if (num_cqes)
1976 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);

--- 4 unchanged lines hidden (view full) ---

1981
1982static void
1983setup_max_queues_want(POCE_SOFTC sc)
1984{
1985 int max_rss = 0;
1986
1987 /* Check if it is FLEX machine. Is so dont use RSS */
1988 if ((sc->function_mode & FNM_FLEX10_MODE) ||
1989 (sc->function_mode & FNM_UMC_MODE) ||
1990 (sc->function_mode & FNM_VNIC_MODE) ||
1991 (!sc->rss_enable) ||
1992 (sc->flags & OCE_FLAGS_BE2)) {
1993 sc->nrqs = 1;
1994 sc->nwqs = 1;
1995 sc->rss_enable = 0;
1996 } else {
1997 /* For multiq, our deisgn is to have TX rings equal to
1998 RSS rings. So that we can pair up one RSS ring and TX
1999 to a single intr, which improves CPU cache efficiency.
2000 */
2001 if (IS_BE(sc) && (!sc->be3_native))
2002 max_rss = OCE_LEGACY_MODE_RSS;
2003 else
2004 max_rss = OCE_MAX_RSS;
2005
2006 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2007 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2008 }
2009
2010}
2011
2012
2013static void
2014update_queues_got(POCE_SOFTC sc)
2015{
2016 if (sc->rss_enable) {
2017 sc->nrqs = sc->intr_count + 1;
2018 sc->nwqs = sc->intr_count;
2019 } else {
2020 sc->nrqs = 1;
2021 sc->nwqs = 1;
2022 }
2023}
2024