Deleted Added
full compact
oce_if.c (231511) oce_if.c (231879)
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 231879 2012-02-17 13:55:17Z luigi $ */
39
40
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 231511 2012-02-11 08:33:52Z bz $ */
41
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
47
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);
50static int oce_attach(device_t dev);
51static int oce_detach(device_t dev);
52static int oce_shutdown(device_t dev);
53static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54static void oce_init(void *xsc);
55static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56static void oce_multiq_flush(struct ifnet *ifp);
57
58/* Driver interrupt routines protypes */
59static void oce_intr(void *arg, int pending);
60static int oce_setup_intr(POCE_SOFTC sc);
61static int oce_fast_isr(void *arg);
62static int oce_alloc_intr(POCE_SOFTC sc, int vector,
63 void (*isr) (void *arg, int pending));
64
65/* Media callbacks prototypes */
66static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67static int oce_media_change(struct ifnet *ifp);
68
69/* Transmit routines prototypes */
70static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73 uint32_t status);
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46
47/* Driver entry points prototypes */
48static int oce_probe(device_t dev);
49static int oce_attach(device_t dev);
50static int oce_detach(device_t dev);
51static int oce_shutdown(device_t dev);
52static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
53static void oce_init(void *xsc);
54static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
55static void oce_multiq_flush(struct ifnet *ifp);
56
57/* Driver interrupt routines protypes */
58static void oce_intr(void *arg, int pending);
59static int oce_setup_intr(POCE_SOFTC sc);
60static int oce_fast_isr(void *arg);
61static int oce_alloc_intr(POCE_SOFTC sc, int vector,
62 void (*isr) (void *arg, int pending));
63
64/* Media callbacks prototypes */
65static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66static int oce_media_change(struct ifnet *ifp);
67
68/* Transmit routines prototypes */
69static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72 uint32_t status);
74#if defined(INET6) || defined(INET)
75static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp,
76 uint16_t *mss);
77#endif
78static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
79 struct oce_wq *wq);
80
81/* Receive routines prototypes */
82static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
84static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
73static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74 struct oce_wq *wq);
75
76/* Receive routines prototypes */
77static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85#if defined(INET6) || defined(INET)
86static void oce_rx_flush_lro(struct oce_rq *rq);
87#endif
88static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
89 struct oce_nic_rx_cqe *cqe);
90
91/* Helper function prototypes in this file */
92static int oce_attach_ifp(POCE_SOFTC sc);
93static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
95static int oce_vid_config(POCE_SOFTC sc);
96static void oce_mac_addr_set(POCE_SOFTC sc);
97static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
98static void oce_local_timer(void *arg);
80static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81 struct oce_nic_rx_cqe *cqe);
82
83/* Helper function prototypes in this file */
84static int oce_attach_ifp(POCE_SOFTC sc);
85static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static int oce_vid_config(POCE_SOFTC sc);
88static void oce_mac_addr_set(POCE_SOFTC sc);
89static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90static void oce_local_timer(void *arg);
99#if defined(INET6) || defined(INET)
100static int oce_init_lro(POCE_SOFTC sc);
101#endif
102static void oce_if_deactivate(POCE_SOFTC sc);
103static void oce_if_activate(POCE_SOFTC sc);
104static void setup_max_queues_want(POCE_SOFTC sc);
105static void update_queues_got(POCE_SOFTC sc);
91static void oce_if_deactivate(POCE_SOFTC sc);
92static void oce_if_activate(POCE_SOFTC sc);
93static void setup_max_queues_want(POCE_SOFTC sc);
94static void update_queues_got(POCE_SOFTC sc);
95static void process_link_state(POCE_SOFTC sc,
96 struct oce_async_cqe_link_state *acqe);
106
97
98
99/* IP specific */
100#if defined(INET6) || defined(INET)
101static int oce_init_lro(POCE_SOFTC sc);
102static void oce_rx_flush_lro(struct oce_rq *rq);
103static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104#endif
105
107static device_method_t oce_dispatch[] = {
108 DEVMETHOD(device_probe, oce_probe),
109 DEVMETHOD(device_attach, oce_attach),
110 DEVMETHOD(device_detach, oce_detach),
111 DEVMETHOD(device_shutdown, oce_shutdown),
112 {0, 0}
113};
114
115static driver_t oce_driver = {
116 "oce",
117 oce_dispatch,
118 sizeof(OCE_SOFTC)
119};
120static devclass_t oce_devclass;
121
122
123DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
124MODULE_DEPEND(oce, pci, 1, 1, 1);
125MODULE_DEPEND(oce, ether, 1, 1, 1);
126MODULE_VERSION(oce, 1);
127
128
129/* global vars */
130const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
131
132/* Module capabilites and parameters */
133uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
134uint32_t oce_enable_rss = OCE_MODCAP_RSS;
135
136
137TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
138TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
139
140
141/* Supported devices table */
142static uint32_t supportedDevices[] = {
143 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
144 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
145 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
146 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
147 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
148};
149
150
151
152
153/*****************************************************************************
154 * Driver entry points functions *
155 *****************************************************************************/
156
157static int
158oce_probe(device_t dev)
159{
106static device_method_t oce_dispatch[] = {
107 DEVMETHOD(device_probe, oce_probe),
108 DEVMETHOD(device_attach, oce_attach),
109 DEVMETHOD(device_detach, oce_detach),
110 DEVMETHOD(device_shutdown, oce_shutdown),
111 {0, 0}
112};
113
114static driver_t oce_driver = {
115 "oce",
116 oce_dispatch,
117 sizeof(OCE_SOFTC)
118};
119static devclass_t oce_devclass;
120
121
122DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
123MODULE_DEPEND(oce, pci, 1, 1, 1);
124MODULE_DEPEND(oce, ether, 1, 1, 1);
125MODULE_VERSION(oce, 1);
126
127
128/* global vars */
129const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
130
131/* Module capabilites and parameters */
132uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
133uint32_t oce_enable_rss = OCE_MODCAP_RSS;
134
135
136TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
137TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
138
139
140/* Supported devices table */
141static uint32_t supportedDevices[] = {
142 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
143 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
144 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
145 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
146 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
147};
148
149
150
151
152/*****************************************************************************
153 * Driver entry points functions *
154 *****************************************************************************/
155
156static int
157oce_probe(device_t dev)
158{
160 uint16_t vendor;
161 uint16_t device;
162 int i;
163 char str[80];
159 uint16_t vendor = 0;
160 uint16_t device = 0;
161 int i = 0;
162 char str[256] = {0};
164 POCE_SOFTC sc;
165
166 sc = device_get_softc(dev);
167 bzero(sc, sizeof(OCE_SOFTC));
168 sc->dev = dev;
169
170 vendor = pci_get_vendor(dev);
171 device = pci_get_device(dev);
172
163 POCE_SOFTC sc;
164
165 sc = device_get_softc(dev);
166 bzero(sc, sizeof(OCE_SOFTC));
167 sc->dev = dev;
168
169 vendor = pci_get_vendor(dev);
170 device = pci_get_device(dev);
171
173 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint16_t)); i++) {
172 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
174 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175 if (device == (supportedDevices[i] & 0xffff)) {
173 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
174 if (device == (supportedDevices[i] & 0xffff)) {
176 sprintf(str, "%s:%s",
177 "Emulex CNA NIC function",
175 sprintf(str, "%s:%s", "Emulex CNA NIC function",
178 component_revision);
179 device_set_desc_copy(dev, str);
180
181 switch (device) {
182 case PCI_PRODUCT_BE2:
183 sc->flags |= OCE_FLAGS_BE2;
184 break;
185 case PCI_PRODUCT_BE3:
186 sc->flags |= OCE_FLAGS_BE3;
187 break;
188 case PCI_PRODUCT_XE201:
189 case PCI_PRODUCT_XE201_VF:
190 sc->flags |= OCE_FLAGS_XE201;
191 break;
192 default:
193 return ENXIO;
194 }
195 return BUS_PROBE_DEFAULT;
196 }
197 }
198 }
199
200 return ENXIO;
201}
202
203
204static int
205oce_attach(device_t dev)
206{
207 POCE_SOFTC sc;
208 int rc = 0;
209
210 sc = device_get_softc(dev);
211
212 rc = oce_hw_pci_alloc(sc);
213 if (rc)
214 return rc;
215
216 sc->rss_enable = oce_enable_rss;
217 sc->tx_ring_size = OCE_TX_RING_SIZE;
218 sc->rx_ring_size = OCE_RX_RING_SIZE;
219 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
220 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
221 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
222
223 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
224 LOCK_CREATE(&sc->dev_lock, "Device_lock");
225
226 /* initialise the hardware */
227 rc = oce_hw_init(sc);
228 if (rc)
229 goto pci_res_free;
230
176 component_revision);
177 device_set_desc_copy(dev, str);
178
179 switch (device) {
180 case PCI_PRODUCT_BE2:
181 sc->flags |= OCE_FLAGS_BE2;
182 break;
183 case PCI_PRODUCT_BE3:
184 sc->flags |= OCE_FLAGS_BE3;
185 break;
186 case PCI_PRODUCT_XE201:
187 case PCI_PRODUCT_XE201_VF:
188 sc->flags |= OCE_FLAGS_XE201;
189 break;
190 default:
191 return ENXIO;
192 }
193 return BUS_PROBE_DEFAULT;
194 }
195 }
196 }
197
198 return ENXIO;
199}
200
201
202static int
203oce_attach(device_t dev)
204{
205 POCE_SOFTC sc;
206 int rc = 0;
207
208 sc = device_get_softc(dev);
209
210 rc = oce_hw_pci_alloc(sc);
211 if (rc)
212 return rc;
213
214 sc->rss_enable = oce_enable_rss;
215 sc->tx_ring_size = OCE_TX_RING_SIZE;
216 sc->rx_ring_size = OCE_RX_RING_SIZE;
217 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
218 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
219 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
220
221 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
222 LOCK_CREATE(&sc->dev_lock, "Device_lock");
223
224 /* initialise the hardware */
225 rc = oce_hw_init(sc);
226 if (rc)
227 goto pci_res_free;
228
231
232 setup_max_queues_want(sc);
233
229 setup_max_queues_want(sc);
230
234
235 rc = oce_setup_intr(sc);
236 if (rc)
237 goto mbox_free;
238
231 rc = oce_setup_intr(sc);
232 if (rc)
233 goto mbox_free;
234
239
240 rc = oce_queue_init_all(sc);
241 if (rc)
242 goto intr_free;
243
235 rc = oce_queue_init_all(sc);
236 if (rc)
237 goto intr_free;
238
244
245 rc = oce_attach_ifp(sc);
246 if (rc)
247 goto queues_free;
248
239 rc = oce_attach_ifp(sc);
240 if (rc)
241 goto queues_free;
242
249
250#if defined(INET6) || defined(INET)
251 rc = oce_init_lro(sc);
252 if (rc)
243#if defined(INET6) || defined(INET)
244 rc = oce_init_lro(sc);
245 if (rc)
253 goto ifp_free;
246 goto ifp_free;
254#endif
255
247#endif
248
256
257 rc = oce_hw_start(sc);
258 if (rc)
259 goto lro_free;;
260
249 rc = oce_hw_start(sc);
250 if (rc)
251 goto lro_free;;
252
261
262 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
266
267 rc = oce_stats_init(sc);
268 if (rc)
269 goto vlan_free;
270
271 oce_add_sysctls(sc);
272
253 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
254 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
255 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
256 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
257
258 rc = oce_stats_init(sc);
259 if (rc)
260 goto vlan_free;
261
262 oce_add_sysctls(sc);
263
273
274 callout_init(&sc->timer, CALLOUT_MPSAFE);
275 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
276 if (rc)
277 goto stats_free;
264 callout_init(&sc->timer, CALLOUT_MPSAFE);
265 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
266 if (rc)
267 goto stats_free;
268#ifdef DEV_NETMAP
269#endif /* DEV_NETMAP */
278
279 return 0;
280
281stats_free:
282 callout_drain(&sc->timer);
283 oce_stats_free(sc);
284vlan_free:
285 if (sc->vlan_attach)
286 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
287 if (sc->vlan_detach)
288 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
289 oce_hw_intr_disable(sc);
290lro_free:
291#if defined(INET6) || defined(INET)
292 oce_free_lro(sc);
293ifp_free:
294#endif
295 ether_ifdetach(sc->ifp);
296 if_free(sc->ifp);
297queues_free:
298 oce_queue_release_all(sc);
299intr_free:
300 oce_intr_free(sc);
301mbox_free:
302 oce_dma_free(sc, &sc->bsmbx);
303pci_res_free:
304 oce_hw_pci_free(sc);
305 LOCK_DESTROY(&sc->dev_lock);
306 LOCK_DESTROY(&sc->bmbx_lock);
307 return rc;
308
309}
310
311
312static int
313oce_detach(device_t dev)
314{
315 POCE_SOFTC sc = device_get_softc(dev);
316
317 LOCK(&sc->dev_lock);
270
271 return 0;
272
273stats_free:
274 callout_drain(&sc->timer);
275 oce_stats_free(sc);
276vlan_free:
277 if (sc->vlan_attach)
278 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
279 if (sc->vlan_detach)
280 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
281 oce_hw_intr_disable(sc);
282lro_free:
283#if defined(INET6) || defined(INET)
284 oce_free_lro(sc);
285ifp_free:
286#endif
287 ether_ifdetach(sc->ifp);
288 if_free(sc->ifp);
289queues_free:
290 oce_queue_release_all(sc);
291intr_free:
292 oce_intr_free(sc);
293mbox_free:
294 oce_dma_free(sc, &sc->bsmbx);
295pci_res_free:
296 oce_hw_pci_free(sc);
297 LOCK_DESTROY(&sc->dev_lock);
298 LOCK_DESTROY(&sc->bmbx_lock);
299 return rc;
300
301}
302
303
304static int
305oce_detach(device_t dev)
306{
307 POCE_SOFTC sc = device_get_softc(dev);
308
309 LOCK(&sc->dev_lock);
318
319 oce_if_deactivate(sc);
310 oce_if_deactivate(sc);
320
321 UNLOCK(&sc->dev_lock);
322
323 callout_drain(&sc->timer);
324
325 if (sc->vlan_attach != NULL)
326 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
327 if (sc->vlan_detach != NULL)
328 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
329
330 ether_ifdetach(sc->ifp);
331
332 if_free(sc->ifp);
333
334 oce_hw_shutdown(sc);
335
336 bus_generic_detach(dev);
337
338 return 0;
339}
340
341
342static int
343oce_shutdown(device_t dev)
344{
345 int rc;
346
347 rc = oce_detach(dev);
348
349 return rc;
350}
351
352
353static int
354oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
355{
356 struct ifreq *ifr = (struct ifreq *)data;
357 POCE_SOFTC sc = ifp->if_softc;
358 int rc = 0;
359 uint32_t u;
360
361 switch (command) {
311 UNLOCK(&sc->dev_lock);
312
313 callout_drain(&sc->timer);
314
315 if (sc->vlan_attach != NULL)
316 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
317 if (sc->vlan_detach != NULL)
318 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
319
320 ether_ifdetach(sc->ifp);
321
322 if_free(sc->ifp);
323
324 oce_hw_shutdown(sc);
325
326 bus_generic_detach(dev);
327
328 return 0;
329}
330
331
332static int
333oce_shutdown(device_t dev)
334{
335 int rc;
336
337 rc = oce_detach(dev);
338
339 return rc;
340}
341
342
343static int
344oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
345{
346 struct ifreq *ifr = (struct ifreq *)data;
347 POCE_SOFTC sc = ifp->if_softc;
348 int rc = 0;
349 uint32_t u;
350
351 switch (command) {
362 case SIOCGIFPSRCADDR_IN6:
363 rc = ether_ioctl(ifp, command, data);
364 break;
365
352
366 case SIOCGIFPSRCADDR:
367 rc = ether_ioctl(ifp, command, data);
368 break;
369
370 case SIOCGIFSTATUS:
371 rc = ether_ioctl(ifp, command, data);
372 break;
373
374 case SIOCGIFMEDIA:
375 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376 break;
377
353 case SIOCGIFMEDIA:
354 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
355 break;
356
378 case SIOCSIFMEDIA:
379 rc = ether_ioctl(ifp, command, data);
380 break;
381
382 case SIOCGIFGENERIC:
383 rc = ether_ioctl(ifp, command, data);
384 break;
385
386 case SIOCGETMIFCNT_IN6:
387 rc = ether_ioctl(ifp, command, data);
388 break;
389
390 case SIOCSIFMTU:
391 if (ifr->ifr_mtu > OCE_MAX_MTU)
392 rc = EINVAL;
393 else
394 ifp->if_mtu = ifr->ifr_mtu;
395 break;
396
397 case SIOCSIFFLAGS:
398 if (ifp->if_flags & IFF_UP) {
399 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
400 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
401 oce_init(sc);
402 }
403 device_printf(sc->dev, "Interface Up\n");
404 } else {
405 LOCK(&sc->dev_lock);
406
407 sc->ifp->if_drv_flags &=
408 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
409 oce_if_deactivate(sc);
410
411 UNLOCK(&sc->dev_lock);
412
413 device_printf(sc->dev, "Interface Down\n");
414 }
415
416 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
417 sc->promisc = TRUE;
418 oce_rxf_set_promiscuous(sc, sc->promisc);
419 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
420 sc->promisc = FALSE;
421 oce_rxf_set_promiscuous(sc, sc->promisc);
422 }
423
424 break;
425
426 case SIOCADDMULTI:
427 case SIOCDELMULTI:
428 rc = oce_hw_update_multicast(sc);
429 if (rc)
430 device_printf(sc->dev,
431 "Update multicast address failed\n");
432 break;
433
434 case SIOCSIFCAP:
435 u = ifr->ifr_reqcap ^ ifp->if_capenable;
436
437 if (u & IFCAP_TXCSUM) {
438 ifp->if_capenable ^= IFCAP_TXCSUM;
439 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
440
441 if (IFCAP_TSO & ifp->if_capenable &&
442 !(IFCAP_TXCSUM & ifp->if_capenable)) {
443 ifp->if_capenable &= ~IFCAP_TSO;
444 ifp->if_hwassist &= ~CSUM_TSO;
445 if_printf(ifp,
446 "TSO disabled due to -txcsum.\n");
447 }
448 }
449
450 if (u & IFCAP_RXCSUM)
451 ifp->if_capenable ^= IFCAP_RXCSUM;
452
453 if (u & IFCAP_TSO4) {
454 ifp->if_capenable ^= IFCAP_TSO4;
455
456 if (IFCAP_TSO & ifp->if_capenable) {
457 if (IFCAP_TXCSUM & ifp->if_capenable)
458 ifp->if_hwassist |= CSUM_TSO;
459 else {
460 ifp->if_capenable &= ~IFCAP_TSO;
461 ifp->if_hwassist &= ~CSUM_TSO;
462 if_printf(ifp,
463 "Enable txcsum first.\n");
464 rc = EAGAIN;
465 }
466 } else
467 ifp->if_hwassist &= ~CSUM_TSO;
468 }
469
470 if (u & IFCAP_VLAN_HWTAGGING)
471 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
472
473 if (u & IFCAP_VLAN_HWFILTER) {
474 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
475 oce_vid_config(sc);
476 }
357 case SIOCSIFMTU:
358 if (ifr->ifr_mtu > OCE_MAX_MTU)
359 rc = EINVAL;
360 else
361 ifp->if_mtu = ifr->ifr_mtu;
362 break;
363
364 case SIOCSIFFLAGS:
365 if (ifp->if_flags & IFF_UP) {
366 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
367 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
368 oce_init(sc);
369 }
370 device_printf(sc->dev, "Interface Up\n");
371 } else {
372 LOCK(&sc->dev_lock);
373
374 sc->ifp->if_drv_flags &=
375 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
376 oce_if_deactivate(sc);
377
378 UNLOCK(&sc->dev_lock);
379
380 device_printf(sc->dev, "Interface Down\n");
381 }
382
383 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
384 sc->promisc = TRUE;
385 oce_rxf_set_promiscuous(sc, sc->promisc);
386 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
387 sc->promisc = FALSE;
388 oce_rxf_set_promiscuous(sc, sc->promisc);
389 }
390
391 break;
392
393 case SIOCADDMULTI:
394 case SIOCDELMULTI:
395 rc = oce_hw_update_multicast(sc);
396 if (rc)
397 device_printf(sc->dev,
398 "Update multicast address failed\n");
399 break;
400
401 case SIOCSIFCAP:
402 u = ifr->ifr_reqcap ^ ifp->if_capenable;
403
404 if (u & IFCAP_TXCSUM) {
405 ifp->if_capenable ^= IFCAP_TXCSUM;
406 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
407
408 if (IFCAP_TSO & ifp->if_capenable &&
409 !(IFCAP_TXCSUM & ifp->if_capenable)) {
410 ifp->if_capenable &= ~IFCAP_TSO;
411 ifp->if_hwassist &= ~CSUM_TSO;
412 if_printf(ifp,
413 "TSO disabled due to -txcsum.\n");
414 }
415 }
416
417 if (u & IFCAP_RXCSUM)
418 ifp->if_capenable ^= IFCAP_RXCSUM;
419
420 if (u & IFCAP_TSO4) {
421 ifp->if_capenable ^= IFCAP_TSO4;
422
423 if (IFCAP_TSO & ifp->if_capenable) {
424 if (IFCAP_TXCSUM & ifp->if_capenable)
425 ifp->if_hwassist |= CSUM_TSO;
426 else {
427 ifp->if_capenable &= ~IFCAP_TSO;
428 ifp->if_hwassist &= ~CSUM_TSO;
429 if_printf(ifp,
430 "Enable txcsum first.\n");
431 rc = EAGAIN;
432 }
433 } else
434 ifp->if_hwassist &= ~CSUM_TSO;
435 }
436
437 if (u & IFCAP_VLAN_HWTAGGING)
438 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
439
440 if (u & IFCAP_VLAN_HWFILTER) {
441 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
442 oce_vid_config(sc);
443 }
477
478#if defined(INET6) || defined(INET)
479 if (u & IFCAP_LRO)
480 ifp->if_capenable ^= IFCAP_LRO;
481#endif
482
483 break;
484
485 case SIOCGPRIVATE_0:
486 rc = oce_handle_passthrough(ifp, data);
487 break;
488 default:
489 rc = ether_ioctl(ifp, command, data);
490 break;
491 }
492
493 return rc;
494}
495
496
497static void
498oce_init(void *arg)
499{
500 POCE_SOFTC sc = arg;
501
502 LOCK(&sc->dev_lock);
503
504 if (sc->ifp->if_flags & IFF_UP) {
505 oce_if_deactivate(sc);
506 oce_if_activate(sc);
507 }
508
509 UNLOCK(&sc->dev_lock);
510
511}
512
513
514static int
515oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
516{
517 POCE_SOFTC sc = ifp->if_softc;
518 struct oce_wq *wq = NULL;
519 int queue_index = 0;
520 int status = 0;
521
522 if ((m->m_flags & M_FLOWID) != 0)
523 queue_index = m->m_pkthdr.flowid % sc->nwqs;
524
525 wq = sc->wq[queue_index];
526
527 if (TRY_LOCK(&wq->tx_lock)) {
528 status = oce_multiq_transmit(ifp, m, wq);
529 UNLOCK(&wq->tx_lock);
530 } else {
531 status = drbr_enqueue(ifp, wq->br, m);
532 }
533 return status;
534
535}
536
537
538static void
539oce_multiq_flush(struct ifnet *ifp)
540{
541 POCE_SOFTC sc = ifp->if_softc;
542 struct mbuf *m;
543 int i = 0;
544
545 for (i = 0; i < sc->nwqs; i++) {
546 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
547 m_freem(m);
548 }
549 if_qflush(ifp);
550}
551
552
553
554/*****************************************************************************
555 * Driver interrupt routines functions *
556 *****************************************************************************/
557
558static void
559oce_intr(void *arg, int pending)
560{
561
562 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
563 POCE_SOFTC sc = ii->sc;
564 struct oce_eq *eq = ii->eq;
565 struct oce_eqe *eqe;
566 struct oce_cq *cq = NULL;
567 int i, num_eqes = 0;
568
569
570 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
571 BUS_DMASYNC_POSTWRITE);
572 do {
573 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
574 if (eqe->evnt == 0)
575 break;
576 eqe->evnt = 0;
577 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
578 BUS_DMASYNC_POSTWRITE);
579 RING_GET(eq->ring, 1);
580 num_eqes++;
581
582 } while (TRUE);
583
584 if (!num_eqes)
585 goto eq_arm; /* Spurious */
586
587 /* Clear EQ entries, but dont arm */
588 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
589
590 /* Process TX, RX and MCC. But dont arm CQ*/
591 for (i = 0; i < eq->cq_valid; i++) {
592 cq = eq->cq[i];
593 (*cq->cq_handler)(cq->cb_arg);
594 }
595
596 /* Arm all cqs connected to this EQ */
597 for (i = 0; i < eq->cq_valid; i++) {
598 cq = eq->cq[i];
599 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
600 }
601
602eq_arm:
603 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
604 return;
605}
606
607
608static int
609oce_setup_intr(POCE_SOFTC sc)
610{
611 int rc = 0, use_intx = 0;
612 int vector = 0, req_vectors = 0;
613
614 if (sc->rss_enable)
615 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
616 else
617 req_vectors = 1;
618
619 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
620 sc->intr_count = req_vectors;
621 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
622 if (rc != 0) {
623 use_intx = 1;
624 pci_release_msi(sc->dev);
625 } else
626 sc->flags |= OCE_FLAGS_USING_MSIX;
627 } else
628 use_intx = 1;
629
630 if (use_intx)
631 sc->intr_count = 1;
632
633 /* Scale number of queues based on intr we got */
634 update_queues_got(sc);
635
636 if (use_intx) {
637 device_printf(sc->dev, "Using legacy interrupt\n");
638 rc = oce_alloc_intr(sc, vector, oce_intr);
639 if (rc)
640 goto error;
641 } else {
642 for (; vector < sc->intr_count; vector++) {
643 rc = oce_alloc_intr(sc, vector, oce_intr);
644 if (rc)
645 goto error;
646 }
647 }
648
649 return 0;
650error:
651 oce_intr_free(sc);
652 return rc;
653}
654
655
656static int
657oce_fast_isr(void *arg)
658{
659 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
660 POCE_SOFTC sc = ii->sc;
661
662 if (ii->eq == NULL)
663 return FILTER_STRAY;
664
665 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
666
667 taskqueue_enqueue_fast(ii->tq, &ii->task);
668
669 return FILTER_HANDLED;
670}
671
672
673static int
674oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
675{
676 POCE_INTR_INFO ii = &sc->intrs[vector];
677 int rc = 0, rr;
678
679 if (vector >= OCE_MAX_EQ)
680 return (EINVAL);
681
682 /* Set the resource id for the interrupt.
683 * MSIx is vector + 1 for the resource id,
684 * INTx is 0 for the resource id.
685 */
686 if (sc->flags & OCE_FLAGS_USING_MSIX)
687 rr = vector + 1;
688 else
689 rr = 0;
690 ii->intr_res = bus_alloc_resource_any(sc->dev,
691 SYS_RES_IRQ,
692 &rr, RF_ACTIVE|RF_SHAREABLE);
693 ii->irq_rr = rr;
694 if (ii->intr_res == NULL) {
695 device_printf(sc->dev,
696 "Could not allocate interrupt\n");
697 rc = ENXIO;
698 return rc;
699 }
700
701 TASK_INIT(&ii->task, 0, isr, ii);
702 ii->vector = vector;
703 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
704 ii->tq = taskqueue_create_fast(ii->task_name,
705 M_NOWAIT,
706 taskqueue_thread_enqueue,
707 &ii->tq);
708 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
709 device_get_nameunit(sc->dev));
710
711 ii->sc = sc;
712 rc = bus_setup_intr(sc->dev,
713 ii->intr_res,
714 INTR_TYPE_NET,
715 oce_fast_isr, NULL, ii, &ii->tag);
716 return rc;
717
718}
719
720
721void
722oce_intr_free(POCE_SOFTC sc)
723{
724 int i = 0;
725
726 for (i = 0; i < sc->intr_count; i++) {
727
728 if (sc->intrs[i].tag != NULL)
729 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
730 sc->intrs[i].tag);
731 if (sc->intrs[i].tq != NULL)
732 taskqueue_free(sc->intrs[i].tq);
733
734 if (sc->intrs[i].intr_res != NULL)
735 bus_release_resource(sc->dev, SYS_RES_IRQ,
736 sc->intrs[i].irq_rr,
737 sc->intrs[i].intr_res);
738 sc->intrs[i].tag = NULL;
739 sc->intrs[i].intr_res = NULL;
740 }
741
742 if (sc->flags & OCE_FLAGS_USING_MSIX)
743 pci_release_msi(sc->dev);
744
745}
746
747
748
749/******************************************************************************
750* Media callbacks functions *
751******************************************************************************/
752
753static void
754oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
755{
756 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
757
758
759 req->ifm_status = IFM_AVALID;
760 req->ifm_active = IFM_ETHER;
761
762 if (sc->link_status == 1)
763 req->ifm_status |= IFM_ACTIVE;
764 else
765 return;
766
767 switch (sc->link_speed) {
768 case 1: /* 10 Mbps */
769 req->ifm_active |= IFM_10_T | IFM_FDX;
770 sc->speed = 10;
771 break;
772 case 2: /* 100 Mbps */
773 req->ifm_active |= IFM_100_TX | IFM_FDX;
774 sc->speed = 100;
775 break;
776 case 3: /* 1 Gbps */
777 req->ifm_active |= IFM_1000_T | IFM_FDX;
778 sc->speed = 1000;
779 break;
780 case 4: /* 10 Gbps */
781 req->ifm_active |= IFM_10G_SR | IFM_FDX;
782 sc->speed = 10000;
783 break;
784 }
785
786 return;
787}
788
789
790int
791oce_media_change(struct ifnet *ifp)
792{
793 return 0;
794}
795
796
797
798
799/*****************************************************************************
800 * Transmit routines functions *
801 *****************************************************************************/
802
803static int
804oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
805{
806 int rc = 0, i, retry_cnt = 0;
807 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
808 struct mbuf *m, *m_temp;
809 struct oce_wq *wq = sc->wq[wq_index];
810 struct oce_packet_desc *pd;
811 uint32_t out;
812 struct oce_nic_hdr_wqe *nichdr;
813 struct oce_nic_frag_wqe *nicfrag;
814 int num_wqes;
815 uint32_t reg_value;
444#if defined(INET6) || defined(INET)
445 if (u & IFCAP_LRO)
446 ifp->if_capenable ^= IFCAP_LRO;
447#endif
448
449 break;
450
451 case SIOCGPRIVATE_0:
452 rc = oce_handle_passthrough(ifp, data);
453 break;
454 default:
455 rc = ether_ioctl(ifp, command, data);
456 break;
457 }
458
459 return rc;
460}
461
462
463static void
464oce_init(void *arg)
465{
466 POCE_SOFTC sc = arg;
467
468 LOCK(&sc->dev_lock);
469
470 if (sc->ifp->if_flags & IFF_UP) {
471 oce_if_deactivate(sc);
472 oce_if_activate(sc);
473 }
474
475 UNLOCK(&sc->dev_lock);
476
477}
478
479
480static int
481oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
482{
483 POCE_SOFTC sc = ifp->if_softc;
484 struct oce_wq *wq = NULL;
485 int queue_index = 0;
486 int status = 0;
487
488 if ((m->m_flags & M_FLOWID) != 0)
489 queue_index = m->m_pkthdr.flowid % sc->nwqs;
490
491 wq = sc->wq[queue_index];
492
493 if (TRY_LOCK(&wq->tx_lock)) {
494 status = oce_multiq_transmit(ifp, m, wq);
495 UNLOCK(&wq->tx_lock);
496 } else {
497 status = drbr_enqueue(ifp, wq->br, m);
498 }
499 return status;
500
501}
502
503
504static void
505oce_multiq_flush(struct ifnet *ifp)
506{
507 POCE_SOFTC sc = ifp->if_softc;
508 struct mbuf *m;
509 int i = 0;
510
511 for (i = 0; i < sc->nwqs; i++) {
512 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
513 m_freem(m);
514 }
515 if_qflush(ifp);
516}
517
518
519
520/*****************************************************************************
521 * Driver interrupt routines functions *
522 *****************************************************************************/
523
524static void
525oce_intr(void *arg, int pending)
526{
527
528 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
529 POCE_SOFTC sc = ii->sc;
530 struct oce_eq *eq = ii->eq;
531 struct oce_eqe *eqe;
532 struct oce_cq *cq = NULL;
533 int i, num_eqes = 0;
534
535
536 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
537 BUS_DMASYNC_POSTWRITE);
538 do {
539 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
540 if (eqe->evnt == 0)
541 break;
542 eqe->evnt = 0;
543 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
544 BUS_DMASYNC_POSTWRITE);
545 RING_GET(eq->ring, 1);
546 num_eqes++;
547
548 } while (TRUE);
549
550 if (!num_eqes)
551 goto eq_arm; /* Spurious */
552
553 /* Clear EQ entries, but dont arm */
554 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
555
556 /* Process TX, RX and MCC. But dont arm CQ*/
557 for (i = 0; i < eq->cq_valid; i++) {
558 cq = eq->cq[i];
559 (*cq->cq_handler)(cq->cb_arg);
560 }
561
562 /* Arm all cqs connected to this EQ */
563 for (i = 0; i < eq->cq_valid; i++) {
564 cq = eq->cq[i];
565 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
566 }
567
568eq_arm:
569 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
570 return;
571}
572
573
574static int
575oce_setup_intr(POCE_SOFTC sc)
576{
577 int rc = 0, use_intx = 0;
578 int vector = 0, req_vectors = 0;
579
580 if (sc->rss_enable)
581 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
582 else
583 req_vectors = 1;
584
585 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
586 sc->intr_count = req_vectors;
587 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
588 if (rc != 0) {
589 use_intx = 1;
590 pci_release_msi(sc->dev);
591 } else
592 sc->flags |= OCE_FLAGS_USING_MSIX;
593 } else
594 use_intx = 1;
595
596 if (use_intx)
597 sc->intr_count = 1;
598
599 /* Scale number of queues based on intr we got */
600 update_queues_got(sc);
601
602 if (use_intx) {
603 device_printf(sc->dev, "Using legacy interrupt\n");
604 rc = oce_alloc_intr(sc, vector, oce_intr);
605 if (rc)
606 goto error;
607 } else {
608 for (; vector < sc->intr_count; vector++) {
609 rc = oce_alloc_intr(sc, vector, oce_intr);
610 if (rc)
611 goto error;
612 }
613 }
614
615 return 0;
616error:
617 oce_intr_free(sc);
618 return rc;
619}
620
621
622static int
623oce_fast_isr(void *arg)
624{
625 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
626 POCE_SOFTC sc = ii->sc;
627
628 if (ii->eq == NULL)
629 return FILTER_STRAY;
630
631 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
632
633 taskqueue_enqueue_fast(ii->tq, &ii->task);
634
635 return FILTER_HANDLED;
636}
637
638
639static int
640oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
641{
642 POCE_INTR_INFO ii = &sc->intrs[vector];
643 int rc = 0, rr;
644
645 if (vector >= OCE_MAX_EQ)
646 return (EINVAL);
647
648 /* Set the resource id for the interrupt.
649 * MSIx is vector + 1 for the resource id,
650 * INTx is 0 for the resource id.
651 */
652 if (sc->flags & OCE_FLAGS_USING_MSIX)
653 rr = vector + 1;
654 else
655 rr = 0;
656 ii->intr_res = bus_alloc_resource_any(sc->dev,
657 SYS_RES_IRQ,
658 &rr, RF_ACTIVE|RF_SHAREABLE);
659 ii->irq_rr = rr;
660 if (ii->intr_res == NULL) {
661 device_printf(sc->dev,
662 "Could not allocate interrupt\n");
663 rc = ENXIO;
664 return rc;
665 }
666
667 TASK_INIT(&ii->task, 0, isr, ii);
668 ii->vector = vector;
669 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
670 ii->tq = taskqueue_create_fast(ii->task_name,
671 M_NOWAIT,
672 taskqueue_thread_enqueue,
673 &ii->tq);
674 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
675 device_get_nameunit(sc->dev));
676
677 ii->sc = sc;
678 rc = bus_setup_intr(sc->dev,
679 ii->intr_res,
680 INTR_TYPE_NET,
681 oce_fast_isr, NULL, ii, &ii->tag);
682 return rc;
683
684}
685
686
687void
688oce_intr_free(POCE_SOFTC sc)
689{
690 int i = 0;
691
692 for (i = 0; i < sc->intr_count; i++) {
693
694 if (sc->intrs[i].tag != NULL)
695 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
696 sc->intrs[i].tag);
697 if (sc->intrs[i].tq != NULL)
698 taskqueue_free(sc->intrs[i].tq);
699
700 if (sc->intrs[i].intr_res != NULL)
701 bus_release_resource(sc->dev, SYS_RES_IRQ,
702 sc->intrs[i].irq_rr,
703 sc->intrs[i].intr_res);
704 sc->intrs[i].tag = NULL;
705 sc->intrs[i].intr_res = NULL;
706 }
707
708 if (sc->flags & OCE_FLAGS_USING_MSIX)
709 pci_release_msi(sc->dev);
710
711}
712
713
714
715/******************************************************************************
716* Media callbacks functions *
717******************************************************************************/
718
719static void
720oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
721{
722 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
723
724
725 req->ifm_status = IFM_AVALID;
726 req->ifm_active = IFM_ETHER;
727
728 if (sc->link_status == 1)
729 req->ifm_status |= IFM_ACTIVE;
730 else
731 return;
732
733 switch (sc->link_speed) {
734 case 1: /* 10 Mbps */
735 req->ifm_active |= IFM_10_T | IFM_FDX;
736 sc->speed = 10;
737 break;
738 case 2: /* 100 Mbps */
739 req->ifm_active |= IFM_100_TX | IFM_FDX;
740 sc->speed = 100;
741 break;
742 case 3: /* 1 Gbps */
743 req->ifm_active |= IFM_1000_T | IFM_FDX;
744 sc->speed = 1000;
745 break;
746 case 4: /* 10 Gbps */
747 req->ifm_active |= IFM_10G_SR | IFM_FDX;
748 sc->speed = 10000;
749 break;
750 }
751
752 return;
753}
754
755
756int
757oce_media_change(struct ifnet *ifp)
758{
759 return 0;
760}
761
762
763
764
765/*****************************************************************************
766 * Transmit routines functions *
767 *****************************************************************************/
768
769static int
770oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
771{
772 int rc = 0, i, retry_cnt = 0;
773 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
774 struct mbuf *m, *m_temp;
775 struct oce_wq *wq = sc->wq[wq_index];
776 struct oce_packet_desc *pd;
777 uint32_t out;
778 struct oce_nic_hdr_wqe *nichdr;
779 struct oce_nic_frag_wqe *nicfrag;
780 int num_wqes;
781 uint32_t reg_value;
816#if defined(INET6) || defined(INET)
817 uint16_t mss = 0;
818#endif
819
820 m = *mpp;
821 if (!m)
822 return EINVAL;
823
824 if (!(m->m_flags & M_PKTHDR)) {
825 rc = ENXIO;
826 goto free_ret;
827 }
828
829 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
782
783 m = *mpp;
784 if (!m)
785 return EINVAL;
786
787 if (!(m->m_flags & M_PKTHDR)) {
788 rc = ENXIO;
789 goto free_ret;
790 }
791
792 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
830#if defined(INET6) || defined(INET)
831 /* consolidate packet buffers for TSO/LSO segment offload */
793 /* consolidate packet buffers for TSO/LSO segment offload */
832 m = oce_tso_setup(sc, mpp, &mss);
794#if defined(INET6) || defined(INET)
795 m = oce_tso_setup(sc, mpp);
833#else
834 m = NULL;
835#endif
836 if (m == NULL) {
837 rc = ENXIO;
838 goto free_ret;
839 }
840 }
841
842 out = wq->packets_out + 1;
843 if (out == OCE_WQ_PACKET_ARRAY_SIZE)
844 out = 0;
845 if (out == wq->packets_in)
846 return EBUSY;
847
848 pd = &wq->pckts[wq->packets_out];
849retry:
850 rc = bus_dmamap_load_mbuf_sg(wq->tag,
851 pd->map,
852 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
853 if (rc == 0) {
854 num_wqes = pd->nsegs + 1;
855 if (IS_BE(sc)) {
856 /*Dummy required only for BE3.*/
857 if (num_wqes & 1)
858 num_wqes++;
859 }
860 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
861 bus_dmamap_unload(wq->tag, pd->map);
862 return EBUSY;
863 }
864
865 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
866 pd->mbuf = m;
867 wq->packets_out = out;
868
869 nichdr =
870 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
871 nichdr->u0.dw[0] = 0;
872 nichdr->u0.dw[1] = 0;
873 nichdr->u0.dw[2] = 0;
874 nichdr->u0.dw[3] = 0;
875
876 nichdr->u0.s.complete = 1;
877 nichdr->u0.s.event = 1;
878 nichdr->u0.s.crc = 1;
879 nichdr->u0.s.forward = 0;
880 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
881 nichdr->u0.s.udpcs =
882 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
883 nichdr->u0.s.tcpcs =
884 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
885 nichdr->u0.s.num_wqe = num_wqes;
886 nichdr->u0.s.total_length = m->m_pkthdr.len;
887 if (m->m_flags & M_VLANTAG) {
888 nichdr->u0.s.vlan = 1; /*Vlan present*/
889 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
890 }
891 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
892 if (m->m_pkthdr.tso_segsz) {
893 nichdr->u0.s.lso = 1;
894 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
895 }
896 if (!IS_BE(sc))
897 nichdr->u0.s.ipcs = 1;
898 }
899
900 RING_PUT(wq->ring, 1);
901 wq->ring->num_used++;
902
903 for (i = 0; i < pd->nsegs; i++) {
904 nicfrag =
905 RING_GET_PRODUCER_ITEM_VA(wq->ring,
906 struct oce_nic_frag_wqe);
907 nicfrag->u0.s.rsvd0 = 0;
908 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
909 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
910 nicfrag->u0.s.frag_len = segs[i].ds_len;
911 pd->wqe_idx = wq->ring->pidx;
912 RING_PUT(wq->ring, 1);
913 wq->ring->num_used++;
914 }
915 if (num_wqes > (pd->nsegs + 1)) {
916 nicfrag =
917 RING_GET_PRODUCER_ITEM_VA(wq->ring,
918 struct oce_nic_frag_wqe);
919 nicfrag->u0.dw[0] = 0;
920 nicfrag->u0.dw[1] = 0;
921 nicfrag->u0.dw[2] = 0;
922 nicfrag->u0.dw[3] = 0;
923 pd->wqe_idx = wq->ring->pidx;
924 RING_PUT(wq->ring, 1);
925 wq->ring->num_used++;
926 pd->nsegs++;
927 }
928
929 sc->ifp->if_opackets++;
930 wq->tx_stats.tx_reqs++;
931 wq->tx_stats.tx_wrbs += num_wqes;
932 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
933 wq->tx_stats.tx_pkts++;
934
935 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937 reg_value = (num_wqes << 16) | wq->wq_id;
938 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
939
940 } else if (rc == EFBIG) {
941 if (retry_cnt == 0) {
942 m_temp = m_defrag(m, M_DONTWAIT);
943 if (m_temp == NULL)
944 goto free_ret;
945 m = m_temp;
946 *mpp = m_temp;
947 retry_cnt = retry_cnt + 1;
948 goto retry;
949 } else
950 goto free_ret;
951 } else if (rc == ENOMEM)
952 return rc;
953 else
954 goto free_ret;
955
956 return 0;
957
958free_ret:
959 m_freem(*mpp);
960 *mpp = NULL;
961 return rc;
962}
963
964
965static void
966oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
967{
968 uint32_t in;
969 struct oce_packet_desc *pd;
970 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
971 struct mbuf *m;
972
973 if (wq->packets_out == wq->packets_in)
974 device_printf(sc->dev, "WQ transmit descriptor missing\n");
975
976 in = wq->packets_in + 1;
977 if (in == OCE_WQ_PACKET_ARRAY_SIZE)
978 in = 0;
979
980 pd = &wq->pckts[wq->packets_in];
981 wq->packets_in = in;
982 wq->ring->num_used -= (pd->nsegs + 1);
983 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
984 bus_dmamap_unload(wq->tag, pd->map);
985
986 m = pd->mbuf;
987 m_freem(m);
988 pd->mbuf = NULL;
989
990 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
991 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
992 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
993 oce_tx_restart(sc, wq);
994 }
995 }
996}
997
998
999static void
1000oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1001{
1002
1003 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1004 return;
1005
1006#if __FreeBSD_version >= 800000
1007 if (!drbr_empty(sc->ifp, wq->br))
1008#else
1009 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1010#endif
1011 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1012
1013}
1014
796#else
797 m = NULL;
798#endif
799 if (m == NULL) {
800 rc = ENXIO;
801 goto free_ret;
802 }
803 }
804
805 out = wq->packets_out + 1;
806 if (out == OCE_WQ_PACKET_ARRAY_SIZE)
807 out = 0;
808 if (out == wq->packets_in)
809 return EBUSY;
810
811 pd = &wq->pckts[wq->packets_out];
812retry:
813 rc = bus_dmamap_load_mbuf_sg(wq->tag,
814 pd->map,
815 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
816 if (rc == 0) {
817 num_wqes = pd->nsegs + 1;
818 if (IS_BE(sc)) {
819 /*Dummy required only for BE3.*/
820 if (num_wqes & 1)
821 num_wqes++;
822 }
823 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
824 bus_dmamap_unload(wq->tag, pd->map);
825 return EBUSY;
826 }
827
828 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
829 pd->mbuf = m;
830 wq->packets_out = out;
831
832 nichdr =
833 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
834 nichdr->u0.dw[0] = 0;
835 nichdr->u0.dw[1] = 0;
836 nichdr->u0.dw[2] = 0;
837 nichdr->u0.dw[3] = 0;
838
839 nichdr->u0.s.complete = 1;
840 nichdr->u0.s.event = 1;
841 nichdr->u0.s.crc = 1;
842 nichdr->u0.s.forward = 0;
843 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
844 nichdr->u0.s.udpcs =
845 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
846 nichdr->u0.s.tcpcs =
847 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
848 nichdr->u0.s.num_wqe = num_wqes;
849 nichdr->u0.s.total_length = m->m_pkthdr.len;
850 if (m->m_flags & M_VLANTAG) {
851 nichdr->u0.s.vlan = 1; /*Vlan present*/
852 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
853 }
854 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
855 if (m->m_pkthdr.tso_segsz) {
856 nichdr->u0.s.lso = 1;
857 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
858 }
859 if (!IS_BE(sc))
860 nichdr->u0.s.ipcs = 1;
861 }
862
863 RING_PUT(wq->ring, 1);
864 wq->ring->num_used++;
865
866 for (i = 0; i < pd->nsegs; i++) {
867 nicfrag =
868 RING_GET_PRODUCER_ITEM_VA(wq->ring,
869 struct oce_nic_frag_wqe);
870 nicfrag->u0.s.rsvd0 = 0;
871 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
872 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
873 nicfrag->u0.s.frag_len = segs[i].ds_len;
874 pd->wqe_idx = wq->ring->pidx;
875 RING_PUT(wq->ring, 1);
876 wq->ring->num_used++;
877 }
878 if (num_wqes > (pd->nsegs + 1)) {
879 nicfrag =
880 RING_GET_PRODUCER_ITEM_VA(wq->ring,
881 struct oce_nic_frag_wqe);
882 nicfrag->u0.dw[0] = 0;
883 nicfrag->u0.dw[1] = 0;
884 nicfrag->u0.dw[2] = 0;
885 nicfrag->u0.dw[3] = 0;
886 pd->wqe_idx = wq->ring->pidx;
887 RING_PUT(wq->ring, 1);
888 wq->ring->num_used++;
889 pd->nsegs++;
890 }
891
892 sc->ifp->if_opackets++;
893 wq->tx_stats.tx_reqs++;
894 wq->tx_stats.tx_wrbs += num_wqes;
895 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
896 wq->tx_stats.tx_pkts++;
897
898 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
900 reg_value = (num_wqes << 16) | wq->wq_id;
901 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
902
903 } else if (rc == EFBIG) {
904 if (retry_cnt == 0) {
905 m_temp = m_defrag(m, M_DONTWAIT);
906 if (m_temp == NULL)
907 goto free_ret;
908 m = m_temp;
909 *mpp = m_temp;
910 retry_cnt = retry_cnt + 1;
911 goto retry;
912 } else
913 goto free_ret;
914 } else if (rc == ENOMEM)
915 return rc;
916 else
917 goto free_ret;
918
919 return 0;
920
921free_ret:
922 m_freem(*mpp);
923 *mpp = NULL;
924 return rc;
925}
926
927
928static void
929oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
930{
931 uint32_t in;
932 struct oce_packet_desc *pd;
933 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
934 struct mbuf *m;
935
936 if (wq->packets_out == wq->packets_in)
937 device_printf(sc->dev, "WQ transmit descriptor missing\n");
938
939 in = wq->packets_in + 1;
940 if (in == OCE_WQ_PACKET_ARRAY_SIZE)
941 in = 0;
942
943 pd = &wq->pckts[wq->packets_in];
944 wq->packets_in = in;
945 wq->ring->num_used -= (pd->nsegs + 1);
946 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
947 bus_dmamap_unload(wq->tag, pd->map);
948
949 m = pd->mbuf;
950 m_freem(m);
951 pd->mbuf = NULL;
952
953 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
954 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
955 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
956 oce_tx_restart(sc, wq);
957 }
958 }
959}
960
961
962static void
963oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
964{
965
966 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
967 return;
968
969#if __FreeBSD_version >= 800000
970 if (!drbr_empty(sc->ifp, wq->br))
971#else
972 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
973#endif
974 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
975
976}
977
978
1015#if defined(INET6) || defined(INET)
1016static struct mbuf *
979#if defined(INET6) || defined(INET)
980static struct mbuf *
1017oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp, uint16_t *mss)
981oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1018{
1019 struct mbuf *m;
1020#ifdef INET
1021 struct ip *ip;
1022#endif
1023#ifdef INET6
1024 struct ip6_hdr *ip6;
1025#endif
1026 struct ether_vlan_header *eh;
1027 struct tcphdr *th;
982{
983 struct mbuf *m;
984#ifdef INET
985 struct ip *ip;
986#endif
987#ifdef INET6
988 struct ip6_hdr *ip6;
989#endif
990 struct ether_vlan_header *eh;
991 struct tcphdr *th;
1028 int total_len = 0;
1029 uint16_t etype;
992 uint16_t etype;
1030 int ehdrlen = 0;
993 int total_len = 0, ehdrlen = 0;
1031
1032 m = *mpp;
994
995 m = *mpp;
1033 *mss = m->m_pkthdr.tso_segsz;
1034
1035 if (M_WRITABLE(m) == 0) {
1036 m = m_dup(*mpp, M_DONTWAIT);
1037 if (!m)
1038 return NULL;
1039 m_freem(*mpp);
1040 *mpp = m;
1041 }
1042
1043 eh = mtod(m, struct ether_vlan_header *);
1044 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1045 etype = ntohs(eh->evl_proto);
1046 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1047 } else {
1048 etype = ntohs(eh->evl_encap_proto);
1049 ehdrlen = ETHER_HDR_LEN;
1050 }
1051
996
997 if (M_WRITABLE(m) == 0) {
998 m = m_dup(*mpp, M_DONTWAIT);
999 if (!m)
1000 return NULL;
1001 m_freem(*mpp);
1002 *mpp = m;
1003 }
1004
1005 eh = mtod(m, struct ether_vlan_header *);
1006 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1007 etype = ntohs(eh->evl_proto);
1008 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1009 } else {
1010 etype = ntohs(eh->evl_encap_proto);
1011 ehdrlen = ETHER_HDR_LEN;
1012 }
1013
1052
1053 switch (etype) {
1054#ifdef INET
1055 case ETHERTYPE_IP:
1056 ip = (struct ip *)(m->m_data + ehdrlen);
1057 if (ip->ip_p != IPPROTO_TCP)
1058 return NULL;
1059 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1060
1061 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1062 break;
1063#endif
1064#ifdef INET6
1065 case ETHERTYPE_IPV6:
1066 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1067 if (ip6->ip6_nxt != IPPROTO_TCP)
1068 return NULL;
1069 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1070
1071 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1072 break;
1073#endif
1074 default:
1075 return NULL;
1076 }
1077
1078 m = m_pullup(m, total_len);
1079 if (!m)
1080 return NULL;
1081 *mpp = m;
1082 return m;
1083
1084}
1085#endif /* INET6 || INET */
1086
1014 switch (etype) {
1015#ifdef INET
1016 case ETHERTYPE_IP:
1017 ip = (struct ip *)(m->m_data + ehdrlen);
1018 if (ip->ip_p != IPPROTO_TCP)
1019 return NULL;
1020 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1021
1022 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1023 break;
1024#endif
1025#ifdef INET6
1026 case ETHERTYPE_IPV6:
1027 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1028 if (ip6->ip6_nxt != IPPROTO_TCP)
1029 return NULL;
1030 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1031
1032 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1033 break;
1034#endif
1035 default:
1036 return NULL;
1037 }
1038
1039 m = m_pullup(m, total_len);
1040 if (!m)
1041 return NULL;
1042 *mpp = m;
1043 return m;
1044
1045}
1046#endif /* INET6 || INET */
1047
1087
1088void
1089oce_tx_task(void *arg, int npending)
1090{
1091 struct oce_wq *wq = arg;
1092 POCE_SOFTC sc = wq->parent;
1093 struct ifnet *ifp = sc->ifp;
1094 int rc = 0;
1095
1096#if __FreeBSD_version >= 800000
1097 if (TRY_LOCK(&wq->tx_lock)) {
1098 rc = oce_multiq_transmit(ifp, NULL, wq);
1099 if (rc) {
1100 device_printf(sc->dev,
1101 "TX[%d] restart failed\n", wq->queue_index);
1102 }
1103 UNLOCK(&wq->tx_lock);
1104 }
1105#else
1106 oce_start(ifp);
1107#endif
1108
1109}
1110
1111
1112void
1113oce_start(struct ifnet *ifp)
1114{
1115 POCE_SOFTC sc = ifp->if_softc;
1116 struct mbuf *m;
1117 int rc = 0;
1048void
1049oce_tx_task(void *arg, int npending)
1050{
1051 struct oce_wq *wq = arg;
1052 POCE_SOFTC sc = wq->parent;
1053 struct ifnet *ifp = sc->ifp;
1054 int rc = 0;
1055
1056#if __FreeBSD_version >= 800000
1057 if (TRY_LOCK(&wq->tx_lock)) {
1058 rc = oce_multiq_transmit(ifp, NULL, wq);
1059 if (rc) {
1060 device_printf(sc->dev,
1061 "TX[%d] restart failed\n", wq->queue_index);
1062 }
1063 UNLOCK(&wq->tx_lock);
1064 }
1065#else
1066 oce_start(ifp);
1067#endif
1068
1069}
1070
1071
1072void
1073oce_start(struct ifnet *ifp)
1074{
1075 POCE_SOFTC sc = ifp->if_softc;
1076 struct mbuf *m;
1077 int rc = 0;
1078 int def_q = 0; /* Defualt tx queue is 0*/
1118
1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120 IFF_DRV_RUNNING)
1121 return;
1122
1123 do {
1124 IF_DEQUEUE(&sc->ifp->if_snd, m);
1125 if (m == NULL)
1126 break;
1079
1080 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1081 IFF_DRV_RUNNING)
1082 return;
1083
1084 do {
1085 IF_DEQUEUE(&sc->ifp->if_snd, m);
1086 if (m == NULL)
1087 break;
1127 /* oce_start always uses default TX queue 0 */
1128 LOCK(&sc->wq[0]->tx_lock);
1129 rc = oce_tx(sc, &m, 0);
1130 UNLOCK(&sc->wq[0]->tx_lock);
1088
1089 LOCK(&sc->wq[def_q]->tx_lock);
1090 rc = oce_tx(sc, &m, def_q);
1091 UNLOCK(&sc->wq[def_q]->tx_lock);
1131 if (rc) {
1132 if (m != NULL) {
1092 if (rc) {
1093 if (m != NULL) {
1133 sc->wq[0]->tx_stats.tx_stops ++;
1094 sc->wq[def_q]->tx_stats.tx_stops ++;
1134 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1135 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1136 m = NULL;
1137 }
1138 break;
1139 }
1140 if (m != NULL)
1141 ETHER_BPF_MTAP(ifp, m);
1142
1095 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1097 m = NULL;
1098 }
1099 break;
1100 }
1101 if (m != NULL)
1102 ETHER_BPF_MTAP(ifp, m);
1103
1143 } while (1);
1104 } while (TRUE);
1144
1145 return;
1146}
1147
1148
1149/* Handle the Completion Queue for transmit */
1150uint16_t
1151oce_wq_handler(void *arg)
1152{
1153 struct oce_wq *wq = (struct oce_wq *)arg;
1154 POCE_SOFTC sc = wq->parent;
1155 struct oce_cq *cq = wq->cq;
1156 struct oce_nic_tx_cqe *cqe;
1157 int num_cqes = 0;
1158
1159 LOCK(&wq->tx_lock);
1160 bus_dmamap_sync(cq->ring->dma.tag,
1161 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1162 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1163 while (cqe->u0.dw[3]) {
1164 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1165
1166 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1167 if (wq->ring->cidx >= wq->ring->num_items)
1168 wq->ring->cidx -= wq->ring->num_items;
1169
1170 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1171 wq->tx_stats.tx_compl++;
1172 cqe->u0.dw[3] = 0;
1173 RING_GET(cq->ring, 1);
1174 bus_dmamap_sync(cq->ring->dma.tag,
1175 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1176 cqe =
1177 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1178 num_cqes++;
1179 }
1180
1181 if (num_cqes)
1182 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1183 UNLOCK(&wq->tx_lock);
1184
1185 return 0;
1186}
1187
1188
1189static int
1190oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1191{
1192 POCE_SOFTC sc = ifp->if_softc;
1193 int status = 0, queue_index = 0;
1194 struct mbuf *next = NULL;
1195 struct buf_ring *br = NULL;
1196
1197 br = wq->br;
1198 queue_index = wq->queue_index;
1199
1200 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1201 IFF_DRV_RUNNING) {
1202 if (m != NULL)
1203 status = drbr_enqueue(ifp, br, m);
1204 return status;
1205 }
1206
1207 if (m == NULL)
1208 next = drbr_dequeue(ifp, br);
1209 else if (drbr_needs_enqueue(ifp, br)) {
1210 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1211 return status;
1212 next = drbr_dequeue(ifp, br);
1213 } else
1214 next = m;
1215
1216 while (next != NULL) {
1217 if (oce_tx(sc, &next, queue_index)) {
1218 if (next != NULL) {
1219 wq->tx_stats.tx_stops ++;
1220 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1221 status = drbr_enqueue(ifp, br, next);
1222 }
1223 break;
1224 }
1225 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
1226 ETHER_BPF_MTAP(ifp, next);
1227 next = drbr_dequeue(ifp, br);
1228 }
1229
1230 return status;
1231}
1232
1233
1234
1235
1236/*****************************************************************************
1237 * Receive routines functions *
1238 *****************************************************************************/
1239
1240static void
1241oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1242{
1243 uint32_t out;
1244 struct oce_packet_desc *pd;
1245 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1246 int i, len, frag_len;
1247 struct mbuf *m = NULL, *tail = NULL;
1248 uint16_t vtag;
1249
1250 len = cqe->u0.s.pkt_size;
1105
1106 return;
1107}
1108
1109
1110/* Handle the Completion Queue for transmit */
1111uint16_t
1112oce_wq_handler(void *arg)
1113{
1114 struct oce_wq *wq = (struct oce_wq *)arg;
1115 POCE_SOFTC sc = wq->parent;
1116 struct oce_cq *cq = wq->cq;
1117 struct oce_nic_tx_cqe *cqe;
1118 int num_cqes = 0;
1119
1120 LOCK(&wq->tx_lock);
1121 bus_dmamap_sync(cq->ring->dma.tag,
1122 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1123 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1124 while (cqe->u0.dw[3]) {
1125 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1126
1127 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1128 if (wq->ring->cidx >= wq->ring->num_items)
1129 wq->ring->cidx -= wq->ring->num_items;
1130
1131 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1132 wq->tx_stats.tx_compl++;
1133 cqe->u0.dw[3] = 0;
1134 RING_GET(cq->ring, 1);
1135 bus_dmamap_sync(cq->ring->dma.tag,
1136 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1137 cqe =
1138 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1139 num_cqes++;
1140 }
1141
1142 if (num_cqes)
1143 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1144 UNLOCK(&wq->tx_lock);
1145
1146 return 0;
1147}
1148
1149
1150static int
1151oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1152{
1153 POCE_SOFTC sc = ifp->if_softc;
1154 int status = 0, queue_index = 0;
1155 struct mbuf *next = NULL;
1156 struct buf_ring *br = NULL;
1157
1158 br = wq->br;
1159 queue_index = wq->queue_index;
1160
1161 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1162 IFF_DRV_RUNNING) {
1163 if (m != NULL)
1164 status = drbr_enqueue(ifp, br, m);
1165 return status;
1166 }
1167
1168 if (m == NULL)
1169 next = drbr_dequeue(ifp, br);
1170 else if (drbr_needs_enqueue(ifp, br)) {
1171 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1172 return status;
1173 next = drbr_dequeue(ifp, br);
1174 } else
1175 next = m;
1176
1177 while (next != NULL) {
1178 if (oce_tx(sc, &next, queue_index)) {
1179 if (next != NULL) {
1180 wq->tx_stats.tx_stops ++;
1181 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1182 status = drbr_enqueue(ifp, br, next);
1183 }
1184 break;
1185 }
1186 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
1187 ETHER_BPF_MTAP(ifp, next);
1188 next = drbr_dequeue(ifp, br);
1189 }
1190
1191 return status;
1192}
1193
1194
1195
1196
1197/*****************************************************************************
1198 * Receive routines functions *
1199 *****************************************************************************/
1200
1201static void
1202oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1203{
1204 uint32_t out;
1205 struct oce_packet_desc *pd;
1206 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1207 int i, len, frag_len;
1208 struct mbuf *m = NULL, *tail = NULL;
1209 uint16_t vtag;
1210
1211 len = cqe->u0.s.pkt_size;
1251 vtag = cqe->u0.s.vlan_tag;
1252 if (!len) {
1253 /*partial DMA workaround for Lancer*/
1254 oce_discard_rx_comp(rq, cqe);
1255 goto exit;
1256 }
1257
1212 if (!len) {
1213 /*partial DMA workaround for Lancer*/
1214 oce_discard_rx_comp(rq, cqe);
1215 goto exit;
1216 }
1217
1218 /* Get vlan_tag value */
1219 if(IS_BE(sc))
1220 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1221 else
1222 vtag = cqe->u0.s.vlan_tag;
1223
1224
1258 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1259
1260 if (rq->packets_out == rq->packets_in) {
1261 device_printf(sc->dev,
1262 "RQ transmit descriptor missing\n");
1263 }
1264 out = rq->packets_out + 1;
1265 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1266 out = 0;
1267 pd = &rq->pckts[rq->packets_out];
1268 rq->packets_out = out;
1269
1270 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1271 bus_dmamap_unload(rq->tag, pd->map);
1272 rq->pending--;
1273
1274 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1275 pd->mbuf->m_len = frag_len;
1276
1277 if (tail != NULL) {
1278 /* additional fragments */
1279 pd->mbuf->m_flags &= ~M_PKTHDR;
1280 tail->m_next = pd->mbuf;
1281 tail = pd->mbuf;
1282 } else {
1283 /* first fragment, fill out much of the packet header */
1284 pd->mbuf->m_pkthdr.len = len;
1285 pd->mbuf->m_pkthdr.csum_flags = 0;
1286 if (IF_CSUM_ENABLED(sc)) {
1287 if (cqe->u0.s.l4_cksum_pass) {
1288 pd->mbuf->m_pkthdr.csum_flags |=
1289 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1290 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1291 }
1292 if (cqe->u0.s.ip_cksum_pass) {
1225 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1226
1227 if (rq->packets_out == rq->packets_in) {
1228 device_printf(sc->dev,
1229 "RQ transmit descriptor missing\n");
1230 }
1231 out = rq->packets_out + 1;
1232 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1233 out = 0;
1234 pd = &rq->pckts[rq->packets_out];
1235 rq->packets_out = out;
1236
1237 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1238 bus_dmamap_unload(rq->tag, pd->map);
1239 rq->pending--;
1240
1241 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1242 pd->mbuf->m_len = frag_len;
1243
1244 if (tail != NULL) {
1245 /* additional fragments */
1246 pd->mbuf->m_flags &= ~M_PKTHDR;
1247 tail->m_next = pd->mbuf;
1248 tail = pd->mbuf;
1249 } else {
1250 /* first fragment, fill out much of the packet header */
1251 pd->mbuf->m_pkthdr.len = len;
1252 pd->mbuf->m_pkthdr.csum_flags = 0;
1253 if (IF_CSUM_ENABLED(sc)) {
1254 if (cqe->u0.s.l4_cksum_pass) {
1255 pd->mbuf->m_pkthdr.csum_flags |=
1256 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1257 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1258 }
1259 if (cqe->u0.s.ip_cksum_pass) {
1293 if (!cqe->u0.s.ip_ver) { //IPV4
1260 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1294 pd->mbuf->m_pkthdr.csum_flags |=
1295 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1296 }
1297 }
1298 }
1299 m = tail = pd->mbuf;
1300 }
1301 pd->mbuf = NULL;
1302 len -= frag_len;
1303 }
1304
1305 if (m) {
1306 if (!oce_cqe_portid_valid(sc, cqe)) {
1307 m_freem(m);
1308 goto exit;
1309 }
1310
1311 m->m_pkthdr.rcvif = sc->ifp;
1312#if __FreeBSD_version >= 800000
1313 m->m_pkthdr.flowid = rq->queue_index;
1314 m->m_flags |= M_FLOWID;
1315#endif
1261 pd->mbuf->m_pkthdr.csum_flags |=
1262 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1263 }
1264 }
1265 }
1266 m = tail = pd->mbuf;
1267 }
1268 pd->mbuf = NULL;
1269 len -= frag_len;
1270 }
1271
1272 if (m) {
1273 if (!oce_cqe_portid_valid(sc, cqe)) {
1274 m_freem(m);
1275 goto exit;
1276 }
1277
1278 m->m_pkthdr.rcvif = sc->ifp;
1279#if __FreeBSD_version >= 800000
1280 m->m_pkthdr.flowid = rq->queue_index;
1281 m->m_flags |= M_FLOWID;
1282#endif
1316 //This deternies if vlan tag is present
1283 /* This deternies if vlan tag is Valid */
1317 if (oce_cqe_vtp_valid(sc, cqe)) {
1318 if (sc->function_mode & FNM_FLEX10_MODE) {
1284 if (oce_cqe_vtp_valid(sc, cqe)) {
1285 if (sc->function_mode & FNM_FLEX10_MODE) {
1319 /* FLEX10 */
1286 /* FLEX10. If QnQ is not set, neglect VLAN */
1320 if (cqe->u0.s.qnq) {
1287 if (cqe->u0.s.qnq) {
1321 /* If QnQ is not set, neglect VLAN */
1322 if (IS_BE(sc))
1323 m->m_pkthdr.ether_vtag =
1324 BSWAP_16(vtag);
1325 else
1326 m->m_pkthdr.ether_vtag = vtag;
1288 m->m_pkthdr.ether_vtag = vtag;
1327 m->m_flags |= M_VLANTAG;
1328 }
1289 m->m_flags |= M_VLANTAG;
1290 }
1329 } else {
1330 if (IS_BE(sc))
1331 m->m_pkthdr.ether_vtag = BSWAP_16(vtag);
1332 else
1333 m->m_pkthdr.ether_vtag = vtag;
1291 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1292 /* In UMC mode generally pvid will be striped by
1293 hw. But in some cases we have seen it comes
1294 with pvid. So if pvid == vlan, neglect vlan.
1295 */
1296 m->m_pkthdr.ether_vtag = vtag;
1334 m->m_flags |= M_VLANTAG;
1335 }
1336 }
1337
1338 sc->ifp->if_ipackets++;
1339#if defined(INET6) || defined(INET)
1340 /* Try to queue to LRO */
1341 if (IF_LRO_ENABLED(sc) &&
1342 !(m->m_flags & M_VLANTAG) &&
1343 (cqe->u0.s.ip_cksum_pass) &&
1344 (cqe->u0.s.l4_cksum_pass) &&
1345 (!cqe->u0.s.ip_ver) &&
1346 (rq->lro.lro_cnt != 0)) {
1347
1348 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1349 rq->lro_pkts_queued ++;
1350 goto post_done;
1351 }
1352 /* If LRO posting fails then try to post to STACK */
1353 }
1354#endif
1355
1356 (*sc->ifp->if_input) (sc->ifp, m);
1357#if defined(INET6) || defined(INET)
1358post_done:
1359#endif
1360 /* Update rx stats per queue */
1361 rq->rx_stats.rx_pkts++;
1362 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1363 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1364 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1365 rq->rx_stats.rx_mcast_pkts++;
1366 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1367 rq->rx_stats.rx_ucast_pkts++;
1368 }
1369exit:
1370 return;
1371}
1372
1373
1374static void
1375oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1376{
1377 uint32_t out, i = 0;
1378 struct oce_packet_desc *pd;
1379 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1380 int num_frags = cqe->u0.s.num_fragments;
1381
1382 if (IS_XE201(sc) && cqe->u0.s.error) {
1383 /* Lancer A0 workaround
1384 * num_frags will be 1 more than actual in case of error
1385 */
1386 if (num_frags)
1387 num_frags -= 1;
1388 }
1389 for (i = 0; i < num_frags; i++) {
1390 if (rq->packets_out == rq->packets_in) {
1391 device_printf(sc->dev,
1392 "RQ transmit descriptor missing\n");
1393 }
1394 out = rq->packets_out + 1;
1395 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1396 out = 0;
1397 pd = &rq->pckts[rq->packets_out];
1398 rq->packets_out = out;
1399
1400 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1401 bus_dmamap_unload(rq->tag, pd->map);
1402 rq->pending--;
1403 m_freem(pd->mbuf);
1404 }
1405
1406}
1407
1408
1409static int
1410oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1411{
1412 struct oce_nic_rx_cqe_v1 *cqe_v1;
1413 int vtp = 0;
1414
1415 if (sc->be3_native) {
1416 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1417 vtp = cqe_v1->u0.s.vlan_tag_present;
1297 m->m_flags |= M_VLANTAG;
1298 }
1299 }
1300
1301 sc->ifp->if_ipackets++;
1302#if defined(INET6) || defined(INET)
1303 /* Try to queue to LRO */
1304 if (IF_LRO_ENABLED(sc) &&
1305 !(m->m_flags & M_VLANTAG) &&
1306 (cqe->u0.s.ip_cksum_pass) &&
1307 (cqe->u0.s.l4_cksum_pass) &&
1308 (!cqe->u0.s.ip_ver) &&
1309 (rq->lro.lro_cnt != 0)) {
1310
1311 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1312 rq->lro_pkts_queued ++;
1313 goto post_done;
1314 }
1315 /* If LRO posting fails then try to post to STACK */
1316 }
1317#endif
1318
1319 (*sc->ifp->if_input) (sc->ifp, m);
1320#if defined(INET6) || defined(INET)
1321post_done:
1322#endif
1323 /* Update rx stats per queue */
1324 rq->rx_stats.rx_pkts++;
1325 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1326 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1327 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1328 rq->rx_stats.rx_mcast_pkts++;
1329 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1330 rq->rx_stats.rx_ucast_pkts++;
1331 }
1332exit:
1333 return;
1334}
1335
1336
1337static void
1338oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1339{
1340 uint32_t out, i = 0;
1341 struct oce_packet_desc *pd;
1342 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1343 int num_frags = cqe->u0.s.num_fragments;
1344
1345 if (IS_XE201(sc) && cqe->u0.s.error) {
1346 /* Lancer A0 workaround
1347 * num_frags will be 1 more than actual in case of error
1348 */
1349 if (num_frags)
1350 num_frags -= 1;
1351 }
1352 for (i = 0; i < num_frags; i++) {
1353 if (rq->packets_out == rq->packets_in) {
1354 device_printf(sc->dev,
1355 "RQ transmit descriptor missing\n");
1356 }
1357 out = rq->packets_out + 1;
1358 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1359 out = 0;
1360 pd = &rq->pckts[rq->packets_out];
1361 rq->packets_out = out;
1362
1363 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1364 bus_dmamap_unload(rq->tag, pd->map);
1365 rq->pending--;
1366 m_freem(pd->mbuf);
1367 }
1368
1369}
1370
1371
1372static int
1373oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1374{
1375 struct oce_nic_rx_cqe_v1 *cqe_v1;
1376 int vtp = 0;
1377
1378 if (sc->be3_native) {
1379 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1380 vtp = cqe_v1->u0.s.vlan_tag_present;
1418 } else {
1381 } else
1419 vtp = cqe->u0.s.vlan_tag_present;
1382 vtp = cqe->u0.s.vlan_tag_present;
1420 }
1421
1422 return vtp;
1423
1424}
1425
1426
1427static int
1428oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1429{
1430 struct oce_nic_rx_cqe_v1 *cqe_v1;
1431 int port_id = 0;
1432
1433 if (sc->be3_native && IS_BE(sc)) {
1434 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1435 port_id = cqe_v1->u0.s.port;
1436 if (sc->port_id != port_id)
1437 return 0;
1438 } else
1439 ;/* For BE3 legacy and Lancer this is dummy */
1440
1441 return 1;
1442
1443}
1444
1383
1384 return vtp;
1385
1386}
1387
1388
1389static int
1390oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1391{
1392 struct oce_nic_rx_cqe_v1 *cqe_v1;
1393 int port_id = 0;
1394
1395 if (sc->be3_native && IS_BE(sc)) {
1396 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1397 port_id = cqe_v1->u0.s.port;
1398 if (sc->port_id != port_id)
1399 return 0;
1400 } else
1401 ;/* For BE3 legacy and Lancer this is dummy */
1402
1403 return 1;
1404
1405}
1406
1445
1446#if defined(INET6) || defined(INET)
1447static void
1448oce_rx_flush_lro(struct oce_rq *rq)
1449{
1450 struct lro_ctrl *lro = &rq->lro;
1451 struct lro_entry *queued;
1452 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1453
1454 if (!IF_LRO_ENABLED(sc))
1455 return;
1456
1457 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1458 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1459 tcp_lro_flush(lro, queued);
1460 }
1461 rq->lro_pkts_queued = 0;
1462
1463 return;
1464}
1465
1466
1467static int
1468oce_init_lro(POCE_SOFTC sc)
1469{
1470 struct lro_ctrl *lro = NULL;
1471 int i = 0, rc = 0;
1472
1473 for (i = 0; i < sc->nrqs; i++) {
1474 lro = &sc->rq[i]->lro;
1475 rc = tcp_lro_init(lro);
1476 if (rc != 0) {
1477 device_printf(sc->dev, "LRO init failed\n");
1478 return rc;
1479 }
1480 lro->ifp = sc->ifp;
1481 }
1482
1483 return rc;
1484}
1407#if defined(INET6) || defined(INET)
1408static void
1409oce_rx_flush_lro(struct oce_rq *rq)
1410{
1411 struct lro_ctrl *lro = &rq->lro;
1412 struct lro_entry *queued;
1413 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1414
1415 if (!IF_LRO_ENABLED(sc))
1416 return;
1417
1418 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1419 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1420 tcp_lro_flush(lro, queued);
1421 }
1422 rq->lro_pkts_queued = 0;
1423
1424 return;
1425}
1426
1427
1428static int
1429oce_init_lro(POCE_SOFTC sc)
1430{
1431 struct lro_ctrl *lro = NULL;
1432 int i = 0, rc = 0;
1433
1434 for (i = 0; i < sc->nrqs; i++) {
1435 lro = &sc->rq[i]->lro;
1436 rc = tcp_lro_init(lro);
1437 if (rc != 0) {
1438 device_printf(sc->dev, "LRO init failed\n");
1439 return rc;
1440 }
1441 lro->ifp = sc->ifp;
1442 }
1443
1444 return rc;
1445}
1485#endif /* INET6 || INET */
1486
1446
1447
1487void
1488oce_free_lro(POCE_SOFTC sc)
1489{
1448void
1449oce_free_lro(POCE_SOFTC sc)
1450{
1490#if defined(INET6) || defined(INET)
1491 struct lro_ctrl *lro = NULL;
1492 int i = 0;
1493
1494 for (i = 0; i < sc->nrqs; i++) {
1495 lro = &sc->rq[i]->lro;
1496 if (lro)
1497 tcp_lro_free(lro);
1498 }
1451 struct lro_ctrl *lro = NULL;
1452 int i = 0;
1453
1454 for (i = 0; i < sc->nrqs; i++) {
1455 lro = &sc->rq[i]->lro;
1456 if (lro)
1457 tcp_lro_free(lro);
1458 }
1499#endif
1500}
1459}
1460#endif /* INET6 || INET */
1501
1461
1502
1503int
1504oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1505{
1506 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1507 int i, in, rc;
1508 struct oce_packet_desc *pd;
1509 bus_dma_segment_t segs[6];
1510 int nsegs, added = 0;
1511 struct oce_nic_rqe *rqe;
1512 pd_rxulp_db_t rxdb_reg;
1513
1514
1515 for (i = 0; i < count; i++) {
1516 in = rq->packets_in + 1;
1517 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1518 in = 0;
1519 if (in == rq->packets_out)
1520 break; /* no more room */
1521
1522 pd = &rq->pckts[rq->packets_in];
1523 pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1524 if (pd->mbuf == NULL)
1525 break;
1526
1527 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1528 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1529 pd->map,
1530 pd->mbuf,
1531 segs, &nsegs, BUS_DMA_NOWAIT);
1532 if (rc) {
1533 m_free(pd->mbuf);
1534 break;
1535 }
1536
1537 if (nsegs != 1) {
1538 i--;
1539 continue;
1540 }
1541
1542 rq->packets_in = in;
1543 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1544
1545 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1546 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1547 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1548 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1549 RING_PUT(rq->ring, 1);
1550 added++;
1551 rq->pending++;
1552 }
1553 if (added != 0) {
1554 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1555 DELAY(1);
1556 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1557 rxdb_reg.bits.qid = rq->rq_id;
1558 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1559 added -= OCE_MAX_RQ_POSTS;
1560 }
1561 if (added > 0) {
1562 DELAY(1);
1563 rxdb_reg.bits.qid = rq->rq_id;
1564 rxdb_reg.bits.num_posted = added;
1565 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1566 }
1567 }
1568
1569 return 0;
1570}
1571
1572
1573/* Handle the Completion Queue for receive */
1574uint16_t
1575oce_rq_handler(void *arg)
1576{
1577 struct oce_rq *rq = (struct oce_rq *)arg;
1578 struct oce_cq *cq = rq->cq;
1579 POCE_SOFTC sc = rq->parent;
1580 struct oce_nic_rx_cqe *cqe;
1581 int num_cqes = 0, rq_buffers_used = 0;
1582
1583
1584 LOCK(&rq->rx_lock);
1585 bus_dmamap_sync(cq->ring->dma.tag,
1586 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1587 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1588 while (cqe->u0.dw[2]) {
1589 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1590
1591 RING_GET(rq->ring, 1);
1592 if (cqe->u0.s.error == 0) {
1593 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1594 } else {
1595 rq->rx_stats.rxcp_err++;
1596 sc->ifp->if_ierrors++;
1597 if (IS_XE201(sc))
1598 /* Lancer A0 no buffer workaround */
1599 oce_discard_rx_comp(rq, cqe);
1600 else
1601 /* Post L3/L4 errors to stack.*/
1602 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1603
1604 }
1605 rq->rx_stats.rx_compl++;
1606 cqe->u0.dw[2] = 0;
1607
1608#if defined(INET6) || defined(INET)
1609 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1610 oce_rx_flush_lro(rq);
1611 }
1612#endif
1613
1614 RING_GET(cq->ring, 1);
1615 bus_dmamap_sync(cq->ring->dma.tag,
1616 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1617 cqe =
1618 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1619 num_cqes++;
1620 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1621 break;
1622 }
1462int
1463oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1464{
1465 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1466 int i, in, rc;
1467 struct oce_packet_desc *pd;
1468 bus_dma_segment_t segs[6];
1469 int nsegs, added = 0;
1470 struct oce_nic_rqe *rqe;
1471 pd_rxulp_db_t rxdb_reg;
1472
1473
1474 for (i = 0; i < count; i++) {
1475 in = rq->packets_in + 1;
1476 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1477 in = 0;
1478 if (in == rq->packets_out)
1479 break; /* no more room */
1480
1481 pd = &rq->pckts[rq->packets_in];
1482 pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1483 if (pd->mbuf == NULL)
1484 break;
1485
1486 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1487 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1488 pd->map,
1489 pd->mbuf,
1490 segs, &nsegs, BUS_DMA_NOWAIT);
1491 if (rc) {
1492 m_free(pd->mbuf);
1493 break;
1494 }
1495
1496 if (nsegs != 1) {
1497 i--;
1498 continue;
1499 }
1500
1501 rq->packets_in = in;
1502 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1503
1504 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1505 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1506 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1507 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1508 RING_PUT(rq->ring, 1);
1509 added++;
1510 rq->pending++;
1511 }
1512 if (added != 0) {
1513 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1514 DELAY(1);
1515 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1516 rxdb_reg.bits.qid = rq->rq_id;
1517 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1518 added -= OCE_MAX_RQ_POSTS;
1519 }
1520 if (added > 0) {
1521 DELAY(1);
1522 rxdb_reg.bits.qid = rq->rq_id;
1523 rxdb_reg.bits.num_posted = added;
1524 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1525 }
1526 }
1527
1528 return 0;
1529}
1530
1531
1532/* Handle the Completion Queue for receive */
1533uint16_t
1534oce_rq_handler(void *arg)
1535{
1536 struct oce_rq *rq = (struct oce_rq *)arg;
1537 struct oce_cq *cq = rq->cq;
1538 POCE_SOFTC sc = rq->parent;
1539 struct oce_nic_rx_cqe *cqe;
1540 int num_cqes = 0, rq_buffers_used = 0;
1541
1542
1543 LOCK(&rq->rx_lock);
1544 bus_dmamap_sync(cq->ring->dma.tag,
1545 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1546 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1547 while (cqe->u0.dw[2]) {
1548 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1549
1550 RING_GET(rq->ring, 1);
1551 if (cqe->u0.s.error == 0) {
1552 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1553 } else {
1554 rq->rx_stats.rxcp_err++;
1555 sc->ifp->if_ierrors++;
1556 if (IS_XE201(sc))
1557 /* Lancer A0 no buffer workaround */
1558 oce_discard_rx_comp(rq, cqe);
1559 else
1560 /* Post L3/L4 errors to stack.*/
1561 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1562
1563 }
1564 rq->rx_stats.rx_compl++;
1565 cqe->u0.dw[2] = 0;
1566
1567#if defined(INET6) || defined(INET)
1568 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1569 oce_rx_flush_lro(rq);
1570 }
1571#endif
1572
1573 RING_GET(cq->ring, 1);
1574 bus_dmamap_sync(cq->ring->dma.tag,
1575 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1576 cqe =
1577 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1578 num_cqes++;
1579 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1580 break;
1581 }
1582
1623#if defined(INET6) || defined(INET)
1624 if (IF_LRO_ENABLED(sc))
1625 oce_rx_flush_lro(rq);
1626#endif
1627
1628 if (num_cqes) {
1629 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1630 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1631 if (rq_buffers_used > 1)
1632 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1633 }
1634
1635 UNLOCK(&rq->rx_lock);
1636
1637 return 0;
1638
1639}
1640
1641
1642
1643
1644/*****************************************************************************
1645 * Helper function prototypes in this file *
1646 *****************************************************************************/
1647
1648static int
1649oce_attach_ifp(POCE_SOFTC sc)
1650{
1651
1652 sc->ifp = if_alloc(IFT_ETHER);
1653 if (!sc->ifp)
1654 return ENOMEM;
1655
1656 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1657 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1658 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1659
1660 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1661 sc->ifp->if_ioctl = oce_ioctl;
1662 sc->ifp->if_start = oce_start;
1663 sc->ifp->if_init = oce_init;
1664 sc->ifp->if_mtu = ETHERMTU;
1665 sc->ifp->if_softc = sc;
1666#if __FreeBSD_version >= 800000
1667 sc->ifp->if_transmit = oce_multiq_start;
1668 sc->ifp->if_qflush = oce_multiq_flush;
1669#endif
1670
1671 if_initname(sc->ifp,
1672 device_get_name(sc->dev), device_get_unit(sc->dev));
1673
1674 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1675 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1676 IFQ_SET_READY(&sc->ifp->if_snd);
1677
1678 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1679 sc->ifp->if_hwassist |= CSUM_TSO;
1680 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1681
1682 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1683 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1684 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1583#if defined(INET6) || defined(INET)
1584 if (IF_LRO_ENABLED(sc))
1585 oce_rx_flush_lro(rq);
1586#endif
1587
1588 if (num_cqes) {
1589 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1590 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1591 if (rq_buffers_used > 1)
1592 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1593 }
1594
1595 UNLOCK(&rq->rx_lock);
1596
1597 return 0;
1598
1599}
1600
1601
1602
1603
1604/*****************************************************************************
1605 * Helper function prototypes in this file *
1606 *****************************************************************************/
1607
1608static int
1609oce_attach_ifp(POCE_SOFTC sc)
1610{
1611
1612 sc->ifp = if_alloc(IFT_ETHER);
1613 if (!sc->ifp)
1614 return ENOMEM;
1615
1616 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1617 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1618 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1619
1620 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1621 sc->ifp->if_ioctl = oce_ioctl;
1622 sc->ifp->if_start = oce_start;
1623 sc->ifp->if_init = oce_init;
1624 sc->ifp->if_mtu = ETHERMTU;
1625 sc->ifp->if_softc = sc;
1626#if __FreeBSD_version >= 800000
1627 sc->ifp->if_transmit = oce_multiq_start;
1628 sc->ifp->if_qflush = oce_multiq_flush;
1629#endif
1630
1631 if_initname(sc->ifp,
1632 device_get_name(sc->dev), device_get_unit(sc->dev));
1633
1634 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1635 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1636 IFQ_SET_READY(&sc->ifp->if_snd);
1637
1638 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1639 sc->ifp->if_hwassist |= CSUM_TSO;
1640 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1641
1642 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1643 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1644 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1645
1685#if defined(INET6) || defined(INET)
1686 sc->ifp->if_capabilities |= IFCAP_TSO;
1687 sc->ifp->if_capabilities |= IFCAP_LRO;
1646#if defined(INET6) || defined(INET)
1647 sc->ifp->if_capabilities |= IFCAP_TSO;
1648 sc->ifp->if_capabilities |= IFCAP_LRO;
1649 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1688#endif
1689
1690 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1691 sc->ifp->if_baudrate = IF_Gbps(10UL);
1692
1693 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1694
1695 return 0;
1696}
1697
1698
1699static void
1700oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1701{
1702 POCE_SOFTC sc = ifp->if_softc;
1703
1704 if (ifp->if_softc != arg)
1705 return;
1706 if ((vtag == 0) || (vtag > 4095))
1707 return;
1708
1709 sc->vlan_tag[vtag] = 1;
1710 sc->vlans_added++;
1711 oce_vid_config(sc);
1712}
1713
1714
1715static void
1716oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1717{
1718 POCE_SOFTC sc = ifp->if_softc;
1719
1720 if (ifp->if_softc != arg)
1721 return;
1722 if ((vtag == 0) || (vtag > 4095))
1723 return;
1724
1725 sc->vlan_tag[vtag] = 0;
1726 sc->vlans_added--;
1727 oce_vid_config(sc);
1728}
1729
1730
1731/*
1732 * A max of 64 vlans can be configured in BE. If the user configures
1733 * more, place the card in vlan promiscuous mode.
1734 */
1735static int
1736oce_vid_config(POCE_SOFTC sc)
1737{
1738 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1739 uint16_t ntags = 0, i;
1740 int status = 0;
1741
1742 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1743 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1744 for (i = 0; i < MAX_VLANS; i++) {
1745 if (sc->vlan_tag[i]) {
1746 vtags[ntags].vtag = i;
1747 ntags++;
1748 }
1749 }
1750 if (ntags)
1751 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1752 vtags, ntags, 1, 0);
1753 } else
1754 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1755 NULL, 0, 1, 1);
1756 return status;
1757}
1758
1759
1760static void
1761oce_mac_addr_set(POCE_SOFTC sc)
1762{
1763 uint32_t old_pmac_id = sc->pmac_id;
1764 int status = 0;
1765
1766
1767 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1768 sc->macaddr.size_of_struct);
1769 if (!status)
1770 return;
1771
1772 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1773 sc->if_id, &sc->pmac_id);
1774 if (!status) {
1775 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1776 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1777 sc->macaddr.size_of_struct);
1778 }
1779 if (status)
1780 device_printf(sc->dev, "Failed update macaddress\n");
1781
1782}
1783
1784
1785static int
1786oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1787{
1788 POCE_SOFTC sc = ifp->if_softc;
1789 struct ifreq *ifr = (struct ifreq *)data;
1790 int rc = ENXIO;
1791 char cookie[32] = {0};
1792 void *priv_data = (void *)ifr->ifr_data;
1793 void *ioctl_ptr;
1794 uint32_t req_size;
1795 struct mbx_hdr req;
1796 OCE_DMA_MEM dma_mem;
1797
1798
1799 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1800 return EFAULT;
1801
1802 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1803 return EINVAL;
1804
1805 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1806 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1807 return EFAULT;
1808
1809 req_size = le32toh(req.u0.req.request_length);
1810 if (req_size > 65536)
1811 return EINVAL;
1812
1813 req_size += sizeof(struct mbx_hdr);
1814 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1815 if (rc)
1816 return ENOMEM;
1817
1818 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1819 rc = EFAULT;
1820 goto dma_free;
1821 }
1822
1823 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1824 if (rc) {
1825 rc = EIO;
1826 goto dma_free;
1827 }
1828
1829 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1830 rc = EFAULT;
1831
1832dma_free:
1833 oce_dma_free(sc, &dma_mem);
1834 return rc;
1835
1836}
1837
1838
1839static void
1840oce_local_timer(void *arg)
1841{
1842 POCE_SOFTC sc = arg;
1843 int i = 0;
1844
1845 oce_refresh_nic_stats(sc);
1846 oce_refresh_queue_stats(sc);
1847 oce_mac_addr_set(sc);
1848
1849 /* TX Watch Dog*/
1850 for (i = 0; i < sc->nwqs; i++)
1851 oce_tx_restart(sc, sc->wq[i]);
1852
1853 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1854}
1855
1856
1857static void
1858oce_if_deactivate(POCE_SOFTC sc)
1859{
1860 int i, mtime = 0;
1861 int wait_req = 0;
1862 struct oce_rq *rq;
1863 struct oce_wq *wq;
1864 struct oce_eq *eq;
1865
1866 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1867
1868 /*Wait for max of 400ms for TX completions to be done */
1869 while (mtime < 400) {
1870 wait_req = 0;
1871 for_all_wq_queues(sc, wq, i) {
1872 if (wq->ring->num_used) {
1873 wait_req = 1;
1874 DELAY(1);
1875 break;
1876 }
1877 }
1878 mtime += 1;
1879 if (!wait_req)
1880 break;
1881 }
1882
1883 /* Stop intrs and finish any bottom halves pending */
1884 oce_hw_intr_disable(sc);
1885
1886 for (i = 0; i < sc->intr_count; i++) {
1887 if (sc->intrs[i].tq != NULL) {
1888 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1889 }
1890 }
1891
1892 /* Delete RX queue in card with flush param */
1893 oce_stop_rx(sc);
1894
1895 /* Invalidate any pending cq and eq entries*/
1896 for_all_evnt_queues(sc, eq, i)
1897 oce_drain_eq(eq);
1898 for_all_rq_queues(sc, rq, i)
1899 oce_drain_rq_cq(rq);
1900 for_all_wq_queues(sc, wq, i)
1901 oce_drain_wq_cq(wq);
1902
1903 /* But still we need to get MCC aync events.
1904 So enable intrs and also arm first EQ
1905 */
1906 oce_hw_intr_enable(sc);
1907 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1908
1909 DELAY(10);
1910}
1911
1912
1913static void
1914oce_if_activate(POCE_SOFTC sc)
1915{
1916 struct oce_eq *eq;
1917 struct oce_rq *rq;
1918 struct oce_wq *wq;
1919 int i, rc = 0;
1920
1921 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1922
1923 oce_hw_intr_disable(sc);
1924
1925 oce_start_rx(sc);
1926
1927 for_all_rq_queues(sc, rq, i) {
1928 rc = oce_start_rq(rq);
1929 if (rc)
1930 device_printf(sc->dev, "Unable to start RX\n");
1931 }
1932
1933 for_all_wq_queues(sc, wq, i) {
1934 rc = oce_start_wq(wq);
1935 if (rc)
1936 device_printf(sc->dev, "Unable to start TX\n");
1937 }
1938
1939
1940 for_all_evnt_queues(sc, eq, i)
1941 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1942
1943 oce_hw_intr_enable(sc);
1944
1945}
1946
1650#endif
1651
1652 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1653 sc->ifp->if_baudrate = IF_Gbps(10UL);
1654
1655 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1656
1657 return 0;
1658}
1659
1660
1661static void
1662oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1663{
1664 POCE_SOFTC sc = ifp->if_softc;
1665
1666 if (ifp->if_softc != arg)
1667 return;
1668 if ((vtag == 0) || (vtag > 4095))
1669 return;
1670
1671 sc->vlan_tag[vtag] = 1;
1672 sc->vlans_added++;
1673 oce_vid_config(sc);
1674}
1675
1676
1677static void
1678oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1679{
1680 POCE_SOFTC sc = ifp->if_softc;
1681
1682 if (ifp->if_softc != arg)
1683 return;
1684 if ((vtag == 0) || (vtag > 4095))
1685 return;
1686
1687 sc->vlan_tag[vtag] = 0;
1688 sc->vlans_added--;
1689 oce_vid_config(sc);
1690}
1691
1692
1693/*
1694 * A max of 64 vlans can be configured in BE. If the user configures
1695 * more, place the card in vlan promiscuous mode.
1696 */
1697static int
1698oce_vid_config(POCE_SOFTC sc)
1699{
1700 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1701 uint16_t ntags = 0, i;
1702 int status = 0;
1703
1704 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1705 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1706 for (i = 0; i < MAX_VLANS; i++) {
1707 if (sc->vlan_tag[i]) {
1708 vtags[ntags].vtag = i;
1709 ntags++;
1710 }
1711 }
1712 if (ntags)
1713 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1714 vtags, ntags, 1, 0);
1715 } else
1716 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1717 NULL, 0, 1, 1);
1718 return status;
1719}
1720
1721
1722static void
1723oce_mac_addr_set(POCE_SOFTC sc)
1724{
1725 uint32_t old_pmac_id = sc->pmac_id;
1726 int status = 0;
1727
1728
1729 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1730 sc->macaddr.size_of_struct);
1731 if (!status)
1732 return;
1733
1734 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1735 sc->if_id, &sc->pmac_id);
1736 if (!status) {
1737 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1738 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1739 sc->macaddr.size_of_struct);
1740 }
1741 if (status)
1742 device_printf(sc->dev, "Failed update macaddress\n");
1743
1744}
1745
1746
1747static int
1748oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1749{
1750 POCE_SOFTC sc = ifp->if_softc;
1751 struct ifreq *ifr = (struct ifreq *)data;
1752 int rc = ENXIO;
1753 char cookie[32] = {0};
1754 void *priv_data = (void *)ifr->ifr_data;
1755 void *ioctl_ptr;
1756 uint32_t req_size;
1757 struct mbx_hdr req;
1758 OCE_DMA_MEM dma_mem;
1759
1760
1761 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1762 return EFAULT;
1763
1764 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1765 return EINVAL;
1766
1767 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1768 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1769 return EFAULT;
1770
1771 req_size = le32toh(req.u0.req.request_length);
1772 if (req_size > 65536)
1773 return EINVAL;
1774
1775 req_size += sizeof(struct mbx_hdr);
1776 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1777 if (rc)
1778 return ENOMEM;
1779
1780 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1781 rc = EFAULT;
1782 goto dma_free;
1783 }
1784
1785 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1786 if (rc) {
1787 rc = EIO;
1788 goto dma_free;
1789 }
1790
1791 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1792 rc = EFAULT;
1793
1794dma_free:
1795 oce_dma_free(sc, &dma_mem);
1796 return rc;
1797
1798}
1799
1800
1801static void
1802oce_local_timer(void *arg)
1803{
1804 POCE_SOFTC sc = arg;
1805 int i = 0;
1806
1807 oce_refresh_nic_stats(sc);
1808 oce_refresh_queue_stats(sc);
1809 oce_mac_addr_set(sc);
1810
1811 /* TX Watch Dog*/
1812 for (i = 0; i < sc->nwqs; i++)
1813 oce_tx_restart(sc, sc->wq[i]);
1814
1815 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1816}
1817
1818
1819static void
1820oce_if_deactivate(POCE_SOFTC sc)
1821{
1822 int i, mtime = 0;
1823 int wait_req = 0;
1824 struct oce_rq *rq;
1825 struct oce_wq *wq;
1826 struct oce_eq *eq;
1827
1828 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1829
1830 /*Wait for max of 400ms for TX completions to be done */
1831 while (mtime < 400) {
1832 wait_req = 0;
1833 for_all_wq_queues(sc, wq, i) {
1834 if (wq->ring->num_used) {
1835 wait_req = 1;
1836 DELAY(1);
1837 break;
1838 }
1839 }
1840 mtime += 1;
1841 if (!wait_req)
1842 break;
1843 }
1844
1845 /* Stop intrs and finish any bottom halves pending */
1846 oce_hw_intr_disable(sc);
1847
1848 for (i = 0; i < sc->intr_count; i++) {
1849 if (sc->intrs[i].tq != NULL) {
1850 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1851 }
1852 }
1853
1854 /* Delete RX queue in card with flush param */
1855 oce_stop_rx(sc);
1856
1857 /* Invalidate any pending cq and eq entries*/
1858 for_all_evnt_queues(sc, eq, i)
1859 oce_drain_eq(eq);
1860 for_all_rq_queues(sc, rq, i)
1861 oce_drain_rq_cq(rq);
1862 for_all_wq_queues(sc, wq, i)
1863 oce_drain_wq_cq(wq);
1864
1865 /* But still we need to get MCC aync events.
1866 So enable intrs and also arm first EQ
1867 */
1868 oce_hw_intr_enable(sc);
1869 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1870
1871 DELAY(10);
1872}
1873
1874
1875static void
1876oce_if_activate(POCE_SOFTC sc)
1877{
1878 struct oce_eq *eq;
1879 struct oce_rq *rq;
1880 struct oce_wq *wq;
1881 int i, rc = 0;
1882
1883 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1884
1885 oce_hw_intr_disable(sc);
1886
1887 oce_start_rx(sc);
1888
1889 for_all_rq_queues(sc, rq, i) {
1890 rc = oce_start_rq(rq);
1891 if (rc)
1892 device_printf(sc->dev, "Unable to start RX\n");
1893 }
1894
1895 for_all_wq_queues(sc, wq, i) {
1896 rc = oce_start_wq(wq);
1897 if (rc)
1898 device_printf(sc->dev, "Unable to start TX\n");
1899 }
1900
1901
1902 for_all_evnt_queues(sc, eq, i)
1903 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1904
1905 oce_hw_intr_enable(sc);
1906
1907}
1908
1909static void
1910process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1911{
1912 /* Update Link status */
1913 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1914 ASYNC_EVENT_LINK_UP) {
1915 sc->link_status = ASYNC_EVENT_LINK_UP;
1916 if_link_state_change(sc->ifp, LINK_STATE_UP);
1917 } else {
1918 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1919 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1920 }
1921
1922 /* Update speed */
1923 sc->link_speed = acqe->u0.s.speed;
1924 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1925
1926}
1927
1928
1947/* Handle the Completion Queue for the Mailbox/Async notifications */
1948uint16_t
1949oce_mq_handler(void *arg)
1950{
1951 struct oce_mq *mq = (struct oce_mq *)arg;
1952 POCE_SOFTC sc = mq->parent;
1953 struct oce_cq *cq = mq->cq;
1929/* Handle the Completion Queue for the Mailbox/Async notifications */
1930uint16_t
1931oce_mq_handler(void *arg)
1932{
1933 struct oce_mq *mq = (struct oce_mq *)arg;
1934 POCE_SOFTC sc = mq->parent;
1935 struct oce_cq *cq = mq->cq;
1954 int num_cqes = 0;
1936 int num_cqes = 0, evt_type = 0, optype = 0;
1955 struct oce_mq_cqe *cqe;
1956 struct oce_async_cqe_link_state *acqe;
1937 struct oce_mq_cqe *cqe;
1938 struct oce_async_cqe_link_state *acqe;
1939 struct oce_async_event_grp5_pvid_state *gcqe;
1957
1940
1941
1958 bus_dmamap_sync(cq->ring->dma.tag,
1959 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1960 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1942 bus_dmamap_sync(cq->ring->dma.tag,
1943 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1944 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1945
1961 while (cqe->u0.dw[3]) {
1962 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1963 if (cqe->u0.s.async_event) {
1946 while (cqe->u0.dw[3]) {
1947 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1948 if (cqe->u0.s.async_event) {
1964 acqe = (struct oce_async_cqe_link_state *)cqe;
1965 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1966 ASYNC_EVENT_LINK_UP) {
1967 sc->link_status = ASYNC_EVENT_LINK_UP;
1968 if_link_state_change(sc->ifp, LINK_STATE_UP);
1969 } else {
1970 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1971 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1949 evt_type = cqe->u0.s.event_type;
1950 optype = cqe->u0.s.async_type;
1951 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
1952 /* Link status evt */
1953 acqe = (struct oce_async_cqe_link_state *)cqe;
1954 process_link_state(sc, acqe);
1955 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
1956 (optype == ASYNC_EVENT_PVID_STATE)) {
1957 /* GRP5 PVID */
1958 gcqe =
1959 (struct oce_async_event_grp5_pvid_state *)cqe;
1960 if (gcqe->enabled)
1961 sc->pvid = gcqe->tag & VLAN_VID_MASK;
1962 else
1963 sc->pvid = 0;
1964
1972 }
1965 }
1973
1974 if (acqe->u0.s.event_code ==
1975 ASYNC_EVENT_CODE_LINK_STATE) {
1976 sc->link_speed = acqe->u0.s.speed;
1977 sc->qos_link_speed =
1978 (uint32_t )acqe->u0.s.qos_link_speed * 10;
1979 }
1980 }
1981 cqe->u0.dw[3] = 0;
1982 RING_GET(cq->ring, 1);
1966 }
1967 cqe->u0.dw[3] = 0;
1968 RING_GET(cq->ring, 1);
1983 RING_GET(mq->ring, 1);
1984 bus_dmamap_sync(cq->ring->dma.tag,
1985 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1986 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1987 num_cqes++;
1988 }
1989
1990 if (num_cqes)
1991 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1992
1993 return 0;
1994}
1995
1996
1997static void
1998setup_max_queues_want(POCE_SOFTC sc)
1999{
2000 int max_rss = 0;
2001
2002 /* Check if it is FLEX machine. Is so dont use RSS */
2003 if ((sc->function_mode & FNM_FLEX10_MODE) ||
1969 bus_dmamap_sync(cq->ring->dma.tag,
1970 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1971 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1972 num_cqes++;
1973 }
1974
1975 if (num_cqes)
1976 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1977
1978 return 0;
1979}
1980
1981
1982static void
1983setup_max_queues_want(POCE_SOFTC sc)
1984{
1985 int max_rss = 0;
1986
1987 /* Check if it is FLEX machine. Is so dont use RSS */
1988 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2004 (!sc->rss_enable) ||
2005 (sc->flags & OCE_FLAGS_BE2)) {
1989 (sc->function_mode & FNM_UMC_MODE) ||
1990 (sc->function_mode & FNM_VNIC_MODE) ||
1991 (!sc->rss_enable) ||
1992 (sc->flags & OCE_FLAGS_BE2)) {
2006 sc->nrqs = 1;
2007 sc->nwqs = 1;
2008 sc->rss_enable = 0;
2009 } else {
2010 /* For multiq, our deisgn is to have TX rings equal to
2011 RSS rings. So that we can pair up one RSS ring and TX
2012 to a single intr, which improves CPU cache efficiency.
2013 */
2014 if (IS_BE(sc) && (!sc->be3_native))
2015 max_rss = OCE_LEGACY_MODE_RSS;
2016 else
2017 max_rss = OCE_MAX_RSS;
2018
2019 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2020 sc->nwqs = MIN(OCE_NCPUS, max_rss);
1993 sc->nrqs = 1;
1994 sc->nwqs = 1;
1995 sc->rss_enable = 0;
1996 } else {
1997 /* For multiq, our deisgn is to have TX rings equal to
1998 RSS rings. So that we can pair up one RSS ring and TX
1999 to a single intr, which improves CPU cache efficiency.
2000 */
2001 if (IS_BE(sc) && (!sc->be3_native))
2002 max_rss = OCE_LEGACY_MODE_RSS;
2003 else
2004 max_rss = OCE_MAX_RSS;
2005
2006 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2007 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2021
2022 /*Hardware issue. Turn off multi TX for be2 */
2023 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2024 sc->nwqs = 1;
2025
2026 }
2027
2028}
2029
2030
2031static void
2032update_queues_got(POCE_SOFTC sc)
2033{
2034 if (sc->rss_enable) {
2035 sc->nrqs = sc->intr_count + 1;
2036 sc->nwqs = sc->intr_count;
2008 }
2009
2010}
2011
2012
2013static void
2014update_queues_got(POCE_SOFTC sc)
2015{
2016 if (sc->rss_enable) {
2017 sc->nrqs = sc->intr_count + 1;
2018 sc->nwqs = sc->intr_count;
2037 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2038 sc->nwqs = 1;
2039 } else {
2040 sc->nrqs = 1;
2041 sc->nwqs = 1;
2042 }
2043}
2044
2019 } else {
2020 sc->nrqs = 1;
2021 sc->nwqs = 1;
2022 }
2023}
2024